F diff --git a/build/cmake/libs/innards.txt b/build/cmake/libs/innards.txt
--- a/build/cmake/libs/innards.txt
+++ b/build/cmake/libs/innards.txt
src/environment/command_arguments/gcc_arguments.c
src/environment/error/gcc_error.c
src/frontend/lex/lexer.c
- src/frontend/lex/preprocessing.c
+ src/frontend/lex/lex_preprocessing_directive.c
src/frontend/parse/parse_declaration.c
src/frontend/parse/parse_expression.c
src/frontend/parse/parse_statement.c
F diff --git a/lkfjas b/lkfjas
deleted file mode 100644
--- a/lkfjas
+++ /dev/null
- .
- ├── build
- │ └── cmake
- │ ├── generator.txt
- │ ├── include_directories.txt
- │ ├── libs
- │ │ ├── automata_inner.txt
- │ │ ├── chonky.txt
- │ │ ├── innards.txt
- │ │ ├── misc.txt
- │ │ ├── wobler_assert.txt
- │ │ └── wonky_assert.txt
- │ ├── libs.txt
- │ ├── prebuild.txt
- │ ├── wobler.txt
- │ └── wonky.txt
- ├── CMakeLists.txt
- ├── doc
- │ ├── arch.txt
- │ ├── build.txt
- │ ├── hier.txt
- │ ├── scribblings
- │ │ └── preprocessor_rewrite.txt
- │ ├── tests.txt
- │ └── todo.txt
- ├── GPATH
- ├── GRTAGS
- ├── GTAGS
- ├── README.txt
- ├── src
- │ ├── backend
- │ │ ├── asm
- │ │ │ └── intel
- │ │ │ ├── intel_asm.c
- │ │ │ ├── intel_asm.h
- │ │ │ ├── intel_asm.hh
- │ │ │ ├── intel_compile.c
- │ │ │ ├── intel_compile.h
- │ │ │ ├── intel_instruction.c
- │ │ │ ├── intel_instruction.h
- │ │ │ ├── intel_instruction.hh
- │ │ │ ├── intel_location.c
- │ │ │ ├── intel_location.h
- │ │ │ └── intel_location.hh
- │ │ ├── compile.c
- │ │ ├── compile.h
- │ │ ├── compile.hh
- │ │ └── text
- │ │ ├── lines.c
- │ │ ├── lines.h
- │ │ ├── lines.hh
- │ │ └── print
- │ │ ├── print.c
- │ │ ├── print.h
- │ │ └── print.hh
- │ ├── common.h
- │ ├── debug
- │ │ ├── debug_ast.c
- │ │ ├── debug_ast.h
- │ │ ├── debug_denoted.c
- │ │ ├── debug_denoted.h
- │ │ ├── debug.h
- │ │ ├── debug_initialiser.c
- │ │ ├── debug_initialiser.h
- │ │ ├── debug_lexer.c
- │ │ ├── debug_lexer.h
- │ │ ├── debug_linkage.c
- │ │ ├── debug_linkage.h
- │ │ ├── debug_scope.c
- │ │ ├── debug_scope.h
- │ │ ├── debug_type.c
- │ │ ├── debug_type.h
- │ │ ├── debug_value.c
- │ │ ├── debug_value.h
- │ │ ├── wobler
- │ │ │ ├── wobler_assert.c
- │ │ │ ├── wobler.c
- │ │ │ ├── wobler_declarations.h
- │ │ │ ├── wobler.h
- │ │ │ └── wobler_tests.h
- │ │ ├── wonky_assert.c
- │ │ └── wonky_assert.h
- │ ├── environment
- │ │ ├── command_arguments
- │ │ │ ├── gcc_arguments.c
- │ │ │ ├── gcc_arguments.h
- │ │ │ └── gcc_arguments.hh
- │ │ └── error
- │ │ ├── gcc_error.c
- │ │ ├── gcc_error.h
- │ │ └── gcc_error.hh
- │ ├── frontend
- │ │ ├── lex
- │ │ │ ├── lexer.c
- │ │ │ ├── lexer.h
- │ │ │ ├── lexer.hh
- │ │ │ ├── preprocessing.c
- │ │ │ ├── preprocessing.h
- │ │ │ └── preprocessing.hh
- │ │ └── parse
- │ │ ├── parse_declaration.c
- │ │ ├── parse_declaration.h
- │ │ ├── parse_expression.c
- │ │ ├── parse_expression.h
- │ │ ├── parse.h
- │ │ ├── parse_statement.c
- │ │ ├── parse_statement.h
- │ │ ├── parse_statement.hh
- │ │ ├── parse_translation_unit.c
- │ │ └── parse_translation_unit.h
- │ ├── misc
- │ │ ├── gcc_string.c
- │ │ ├── gcc_string.h
- │ │ ├── map.c
- │ │ ├── map.h
- │ │ ├── map.hh
- │ │ ├── queue.c
- │ │ ├── queue.h
- │ │ ├── queue.hh
- │ │ ├── stack.c
- │ │ ├── stack.h
- │ │ ├── stack.hh
- │ │ ├── wonky_malloc.c
- │ │ ├── wonky_malloc.h
- │ │ └── wonky_malloc.hh
- │ ├── semantics
- │ │ ├── ast.c
- │ │ ├── ast.h
- │ │ ├── ast.hh
- │ │ ├── constraints
- │ │ │ ├── constraints.h
- │ │ │ ├── expression_constraints.c
- │ │ │ ├── expression_constraints.h
- │ │ │ ├── initialiser_constraints.c
- │ │ │ ├── initialiser_constraints.h
- │ │ │ ├── linkage_constraints.c
- │ │ │ ├── linkage_constraints.h
- │ │ │ ├── statement_constraints.c
- │ │ │ └── statement_constraints.h
- │ │ ├── identifiers
- │ │ │ ├── denoted.c
- │ │ │ ├── denoted.h
- │ │ │ ├── denoted.hh
- │ │ │ ├── linkage.c
- │ │ │ ├── linkage.h
- │ │ │ ├── linkage.hh
- │ │ │ ├── scope.c
- │ │ │ ├── scope.h
- │ │ │ └── scope.hh
- │ │ ├── memory
- │ │ │ ├── memory_location.c
- │ │ │ ├── memory_location.h
- │ │ │ ├── memory_location.hh
- │ │ │ ├── object.c
- │ │ │ ├── object.h
- │ │ │ └── object.hh
- │ │ ├── program
- │ │ │ ├── program.c
- │ │ │ ├── program.h
- │ │ │ ├── program.hh
- │ │ │ ├── translation_unit.c
- │ │ │ ├── translation_unit.h
- │ │ │ └── translation_unit.hh
- │ │ └── value
- │ │ ├── constant.c
- │ │ ├── constant.h
- │ │ ├── constant.hh
- │ │ ├── evaluation.c
- │ │ ├── evaluation.h
- │ │ ├── initialiser.c
- │ │ ├── initialiser.h
- │ │ ├── initialiser.hh
- │ │ ├── type.c
- │ │ ├── type.h
- │ │ ├── type.hh
- │ │ ├── value.c
- │ │ ├── value.h
- │ │ └── value.hh
- │ ├── syntax
- │ │ ├── automatas
- │ │ │ ├── automata.c
- │ │ │ ├── automata.h
- │ │ │ ├── automata.hh
- │ │ │ └── generator
- │ │ │ ├── generator.c
- │ │ │ ├── generator.h
- │ │ │ ├── generator.hh
- │ │ │ ├── keyword_list.c
- │ │ │ ├── keyword_list.h
- │ │ │ └── keyword_list.hh
- │ │ ├── identifier
- │ │ │ ├── identifier.c
- │ │ │ ├── identifier.h
- │ │ │ └── identifier.hh
- │ │ ├── source_file.c
- │ │ ├── source_file.h
- │ │ ├── source_file.hh
- │ │ └── token
- │ │ ├── token.c
- │ │ ├── token.h
- │ │ └── token.hh
- │ ├── wonky.c
- │ └── wonky.h
- ├── tests
- │ ├── test3.c
- │ ├── test5.c
- │ ├── test_bitfield_error2.c
- │ ├── test_bitfield_error3.c
- │ ├── test_bitfield_error.c
- │ ├── test_bitfields.c
- │ ├── test_conditional_expression.c
- │ ├── test_declaration2.c
- │ ├── test_declaration.c
- │ ├── test_declaration_error.c
- │ ├── test_declaration_speed.c
- │ ├── test_digraphs.c
- │ ├── test_for_cycle_declaration.c
- │ ├── test_function_definition.c
- │ ├── test_function_definition_error2.c
- │ ├── test_function_definition_error.c
- │ ├── test_generic.c
- │ ├── test_generic_error.c
- │ ├── test_linkage2.c
- │ ├── test_linkage.c
- │ ├── test_linkage_error2.c
- │ ├── test_linkage_error.c
- │ ├── test_preproc_error.c
- │ ├── test_typedef.c
- │ ├── test_undeclared_error.c
- │ ├── test_variadic_function.c
- │ ├── test_variadic_function_error2.c
- │ └── test_variadic_function_error.c
- └── tools
- └── wsh
-
- 33 directories, 200 files
F diff --git a/src/backend/asm/intel/intel_asm.c b/src/backend/asm/intel/intel_asm.c
--- a/src/backend/asm/intel/intel_asm.c
+++ b/src/backend/asm/intel/intel_asm.c
}
void intel_asm_anotate_function(struct Compile_Data_Intel_Asm *compile_data,struct Denoted_Function *function)
{
- struct token *id;
+ struct identifier *id;
struct Intel_Asm_Memory_Location_By_Label *location;
id=function->id;
location=(struct Intel_Asm_Memory_Location_By_Label*)
get_intel_asm_label_location(
- (struct Intel_Asm_Label*)get_intel_asm_label(gstr_dup(id->data,id->data+id->data_size,1024))
+ (struct Intel_Asm_Label*)get_intel_asm_label(gstr_dup(id->data,id->data+id->size,1024))
);
function->location=(struct Memory_Location*)location;
void export_function_definition(struct Compile_Data_Intel_Asm *compile_data,struct AST_Function_Definition *function)
{
wonky_assert(function!=NULL && function->type==ST_FUNCTION_DEFINITION);
- Queue_Push(compile_data->exports,get_intel_asm_export(gstr_dup(function->function->id->data,function->function->id->data+function->function->id->data_size,1024)));
+ Queue_Push(compile_data->exports,get_intel_asm_export(gstr_dup(function->function->id->data,function->function->id->data+function->function->id->size,1024)));
}
void export_object_definition(struct Compile_Data_Intel_Asm *compile_data,struct AST_Object_Declaration *object)
{
wonky_assert(object!=NULL && object->type==ST_OBJECT_DECLARATION);
- Queue_Push(compile_data->exports,get_intel_asm_export(gstr_dup(object->object->id->data,object->object->id->data+object->object->id->data_size,1024)));
+ Queue_Push(compile_data->exports,get_intel_asm_export(gstr_dup(object->object->id->data,object->object->id->data+object->object->id->size,1024)));
}
void import_function_definition(struct Compile_Data_Intel_Asm *compile_data,struct AST_Function_Declaration *function)
{
wonky_assert(function!=NULL && function->type==ST_FUNCTION_DECLARATION);
- Queue_Push(compile_data->imports,get_intel_asm_import(gstr_dup(function->function->id->data,function->function->id->data+function->function->id->data_size,1024)));
+ Queue_Push(compile_data->imports,get_intel_asm_import(gstr_dup(function->function->id->data,function->function->id->data+function->function->id->size,1024)));
}
void import_object_definition(struct Compile_Data_Intel_Asm *compile_data,struct AST_Object_Declaration *object)
{
wonky_assert(object!=NULL && object->type==ST_OBJECT_DECLARATION);
wonky_assert(object->initializer==NULL);/*we only ask for objects without initialisers*/
- Queue_Push(compile_data->imports,get_intel_asm_import(gstr_dup(object->object->id->data,object->object->id->data+object->object->id->data_size,1024)));
+ Queue_Push(compile_data->imports,get_intel_asm_import(gstr_dup(object->object->id->data,object->object->id->data+object->object->id->size,1024)));
}
#endif
F diff --git a/src/backend/asm/intel/intel_compile.c b/src/backend/asm/intel/intel_compile.c
--- a/src/backend/asm/intel/intel_compile.c
+++ b/src/backend/asm/intel/intel_compile.c
(struct Intel_Asm_Label*)get_intel_asm_label(
gstr_dup(
def->function->id->data,
- def->function->id->data+def->function->id->data_size,
+ def->function->id->data+def->function->id->size,
1024
)
)
F diff --git a/src/backend/text/print/print.c b/src/backend/text/print/print.c
--- a/src/backend/text/print/print.c
+++ b/src/backend/text/print/print.c
void print_token(struct Compile_Data_Print *compile_data,struct token *token)
{
char *token_string;
- token_string=gstr_dup(token->data,token->data+token->data_size,1024);
- append_to_last_line(token_string,compile_data->lines);
+ wonky_assert(SHOULD_NOT_REACH_HERE);
+ //token_string=gstr_dup(token->data,token->data+token->data_size,1024);
+ //append_to_last_line(token_string,compile_data->lines);
}
char print_tokens_of_program(FILE *out,char **base_source_names)
{
- struct Source_File *base_file;
- struct Translation_Data *hold_translation_data;
+ struct Source_Name *name;
+ struct Program *program;
char *this_directory[]={"./",NULL};
- char ret;
+ _Bool ret;
wonky_assert(base_source_names!=NULL);
{
return 0;
}
+
ret=0;
- hold_translation_data=get_translation_data(NULL,get_linkage(),get_linkage());
+ program=get_program();
+
do
{
- base_file=get_source_file(*base_source_names,this_directory);
+ name=get_source_name(*base_source_names,this_directory[0]);
- if(base_file==NULL)
+ if(name==NULL)
{
- /*TODO error*/
continue;
}else
{
- lex(base_file,hold_translation_data);
- if(hold_translation_data->errors->size>0)
+ lex(name,program);
+ if(program->errors->size>0)
{
ret=1;
- /*if we are here then the is_quiet flag has not been set*/
- print_errors(out,hold_translation_data->errors);
- delete_source_file(base_file);
+ print_errors(out,program->errors);
break;
}
- fprintf(out,"\nTOKENS OF %s {\n",base_file->src_name->filename);
- print_tokens(out,hold_translation_data->tokens);
+
+ fprintf(out,"\nTOKENS OF %s {\n",name->normalised_name);
+ print_tokens(out,((struct Preprocessing_Translation_Unit*)program->preprocessing_translation_units_to_be_compiled->first)->tokens);
fprintf(out,"\n} END OF TOKENS\n");
}
}while(*(++base_source_names));
- /*TODO fix memory leak*/
- wonky_free(hold_translation_data);
-
return ret;
}
fprintf(out,"[");
print_keyword_enum(out,token->type);
+ /*
if(token->data==NULL)continue;
for(i=0;i<token->data_size;++i)
{
fprintf(out,"%c",token->data[i]);
}
+ */
fprintf(out,"] ");
}
wonky_assert(SHOULD_NOT_REACH_HERE);
}
append_to_last_line(gstr_to_heap("denoted object "),compile_data->lines);
- print_token(compile_data,((struct Denoted_Object*)denoted)->id);
+ print_id(compile_data,((struct Denoted_Object*)denoted)->id);
print_object(compile_data,((struct Denoted_Object*)denoted)->object);
append_to_last_line(gstr_to_heap(" is a"),compile_data->lines);
print_type(compile_data,((struct Denoted_Object*)denoted)->object->type,1);
return;
case DT_Typedef:
append_to_last_line(gstr_to_heap("typedef "),compile_data->lines);
- print_token(compile_data,((struct Denoted_Type*)denoted)->id);
+ print_id(compile_data,((struct Denoted_Type*)denoted)->id);
append_to_last_line(gstr_to_heap(" to "),compile_data->lines);
print_type(compile_data,((struct Denoted_Type*)denoted)->type,0);
return;
case DT_Function:
- print_token(compile_data,((struct Denoted_Function*)denoted)->id);
+ print_id(compile_data,((struct Denoted_Function*)denoted)->id);
append_to_last_line(gstr_to_heap(" is "),compile_data->lines);
switch(((struct Denoted_Function*)denoted)->linkage)
{
print_type(compile_data,((struct Denoted_Function*)denoted)->type,1);
return;
case DT_Enum:
- print_token(compile_data,((struct Denoted_Enum*)denoted)->enumeration->id);
+ print_id(compile_data,((struct Denoted_Enum*)denoted)->enumeration->id);
append_to_last_line(gstr_to_heap(" is "),compile_data->lines);
print_enumeration(compile_data,((struct Denoted_Enum*)denoted)->enumeration);
return;
return;
}
case DT_Struct_Union_Tag:
- print_token(compile_data,((struct Denoted_Struct_Union*)denoted)->struct_union->id);
+ print_id(compile_data,((struct Denoted_Struct_Union*)denoted)->struct_union->id);
append_to_last_line(gstr_to_heap(" is "),compile_data->lines);
print_struct_union(compile_data,((struct Denoted_Struct_Union*)denoted)->struct_union);
return;
void print_function_definition(struct Compile_Data_Print *compile_data,struct AST_Function_Definition *function)
{
- print_token(compile_data,function->function->id);
+ print_id(compile_data,function->function->id);
append_to_last_line(gstr_to_heap(" is"),compile_data->lines);
switch(function->function->linkage)
case KW_ELSE:
fprintf(out,"KW_ELSE");
break;
- case KW_DEFINED:
+ case PKW_DEFINED:
fprintf(out,"KW_DEFINED");
break;
case KW_LONG:
{
append_to_last_line(gstr_to_heap("denoted object "),compile_data->lines);
if(denoted->id)
- print_token(compile_data,denoted->id);
+ print_id(compile_data,denoted->id);
print_object(compile_data,denoted->object);
append_to_last_line(gstr_to_heap(" is a"),compile_data->lines);
print_type(compile_data,denoted->object->type,1);
F diff --git a/src/backend/text/print/print.h b/src/backend/text/print/print.h
--- a/src/backend/text/print/print.h
+++ b/src/backend/text/print/print.h
#include <queue.h>
#include <object.h>
#include <value.h>
+ #include <program.h>
+ #include <translation_unit.h>
+ #include <gcc_error.h>
#define ASTPTR(s) ((struct AST*)(s))
void print_type_sign(struct Compile_Data_Print *compile_data,struct Type *type);
void print_expression_value(struct Compile_Data_Print *compile_data,struct Expression_Value *value);
void print_expression_value_type(struct Compile_Data_Print *compile_data,struct Expression_Value *value);
+ void print_id(struct Compile_Data_Print *compile_data,struct identifier *id);
#endif
F diff --git a/src/environment/error/gcc_error.c b/src/environment/error/gcc_error.c
--- a/src/environment/error/gcc_error.c
+++ b/src/environment/error/gcc_error.c
push_translation_message_inner("[Note] ",note_message,translation_data,args);
va_end(args);
}
- void push_lexing_error(const char *error_message,struct Source_File *src,struct Translation_Data *translation_data, ...)
+ void push_lexing_error(char *error_message,struct Lexer_Data *lexer_data)
{
- va_list args;
- va_start(args,translation_data);
- push_translation_message_inner("[Error] ",error_message,translation_data,args);
- va_end(args);
- }
- void push_lexing_note(const char *note_message,struct Source_File *src,struct Translation_Data *translation_data, ...)
- {
-
- va_list args;
- va_start(args,translation_data);
- push_translation_message_inner("[Note] ",note_message,translation_data,args);
- va_end(args);
+ wonky_assert(!"REWORK THE ERROR MESSAGE CODE");
}
void push_raw_translation_error(const char *error_message,size_t line,size_t column,const char *filename,struct Translation_Data *translation_data)
{
F diff --git a/src/environment/error/gcc_error.h b/src/environment/error/gcc_error.h
--- a/src/environment/error/gcc_error.h
+++ b/src/environment/error/gcc_error.h
%D denoted - takes pointer to Denoted
%t token - takes pointer to a token
*/
- struct Translation_Message* get_translation_message(const char *message_format,struct Translation_Data *translation_data,char *filename,size_t line,size_t column,va_list args);
+ struct Translation_Message* get_translation_message(const char *message_format,struct Program *program,struct Source_Location *location,va_list args);
+ struct Translation_Message* get_translation_message_inner(const char *message,struct Program *program,struct Source_Location *location);
void push_translation_message_inner(const char *prefix,const char *message_format,struct Translation_Data *translation_data,va_list args);
void push_translation_error(const char *message_format,struct Translation_Data *translation_data, ...);
void push_translation_note(const char *note_message,struct Translation_Data *translation_data, ...);
- void push_lexing_error(const char *error_message,struct Source_File *src,struct Translation_Data *translation_data, ...);
- void push_lexing_note(const char *error_message,struct Source_File *src,struct Translation_Data *translation_data, ...);
+ void push_lexing_error(char *error_message,struct Lexer_Data *lexer_data);
void push_raw_translation_error(const char *error_message,size_t line,size_t column,const char *filename,struct Translation_Data *translation_data);
F diff --git a/src/frontend/lex/lex_preprocessing_directive.c b/src/frontend/lex/lex_preprocessing_directive.c
--- a/src/frontend/lex/lex_preprocessing_directive.c
+++ b/src/frontend/lex/lex_preprocessing_directive.c
#define WONKY_LEX_PREPROCESSING_DIRECTIVE_C WONKY_LEX_PREPROCESSING_DIRECTIVE_C
#include <lex_preprocessing_directive.h>
- /*we have skipped the leading #*/
- /*
- #include string
- #include <qchar>
- #define [ id(list) replacement
- #undef [ id ]
- #if
- #ifdef
- #ifndef
- #
-
- #elif
- #else
- #endif
-
-
- #error
- #pragma
- #line number [string]
-
- */
- void parse_preproc_line(struct Source_File *src,struct Translation_Data *translation_data)
- {
- struct token *hold;
- /*TODO fix!*/
- /*hold=get_next_token(src,&chonky_jr[0],0);*/
- hold=get_next_token(src,&chonky[0],0);
- switch(hold->type)
- {
- case PKW_INCLUDE:
- wonky_free(hold);
- parse_include_line(src,translation_data);
- return;
- case PKW_DEFINE:
- wonky_free(hold);
- parse_define_line(src,translation_data);
- return;
- case PKW_IF:
- wonky_free(hold);
- parse_preproc_if_line(src,translation_data);
- return;
- case PKW_IFDEF:
- wonky_free(hold);
- parse_preproc_ifdef_line(src,translation_data);
- return;
- case PKW_IFNDEF:
- wonky_free(hold);
- parse_preproc_ifndef_line(src,translation_data);
- return;
- case PKW_UNDEF:
- wonky_free(hold);
- parse_preproc_undef_line(src,translation_data);
- return;
- case PKW_ENDIF:
- wonky_free(hold);
- push_lexing_error("unmatched endif",src,translation_data);
- return;
- case PKW_ELSE:
- wonky_free(hold);
- push_lexing_error("unmatched else",src,translation_data);
- return;
- case PKW_ELIF:
- wonky_free(hold);
- push_lexing_error("unmatched elif",src,translation_data);
- return;
- case PKW_LINE:
- wonky_free(hold);
- parse_preproc_line_line(src,translation_data);
- return;
- case PKW_ERROR:
- wonky_free(hold);
- parse_preproc_error_line(src,translation_data);
- return;
- default:
- /*TODO error*/
- wonky_free(hold);
- push_lexing_error("expected a preprocessing directive",src,translation_data);
- return;
-
- }
- }
- void parse_include_line(struct Source_File *src,struct Translation_Data *translation_data)
- {
- struct token *hold;
- hold=get_next_token(src,&chonky[0],0);
- if(hold->type==KW_STRING)
- {
- char *where_to_search[]={src->src_name->base,NULL};
- struct Source_File *hold_file;
-
- hold->data[hold->data_size-1]='\0';
- hold->data_size-=2;
- ++hold->data;
- handle_splicing(hold);
-
-
- /*search in the directory of the file from which we include*/
- hold_file=get_source_file(hold->data,where_to_search);
- /*fallback to well known locations == <>*/
- if(hold_file==NULL)
- {
- hold_file=get_source_file(hold->data,well_known_locations_base);
- if(hold_file==NULL)
- {
- /*TODO error*/
- push_lexing_error("file in include directive not found",src,translation_data);
- wonky_free(hold);
- return;
- }
- }
- lex_program(translation_data,hold_file);
- wonky_free(hold);
- }else if(hold->type==KW_LESS)/*hack*/
- {
- struct Source_File *hold_file;
- ++hold->data;
- while(src->src[src->where_in_src]!='>' && src->where_in_src<src->src_size)
- {
- ++src->where_in_src;
- ++hold->data_size;
- }
- if(src->where_in_src==src->src_size)
- {
- /*TODO error*/
- wonky_free(hold);
- return;
- }
- /*skip the >*/
- ++src->where_in_src;
- hold->data[hold->data_size-1]='\0';
- handle_splicing(hold);
-
- hold_file=get_source_file(hold->data,well_known_locations_base);
- if(hold_file==NULL)
- {
- /*TODO error*/
- push_lexing_error("file in include directive not found",src,translation_data);
- wonky_free(hold);
- return;
- }
-
- lex_program(translation_data,hold_file);
- wonky_free(hold);
-
- }else
- {
- /*TODO error*/
- push_lexing_error("include error",src,translation_data);
- wonky_free(hold);
- return;
- }
-
-
- chase_new_line(src,translation_data);
- }
-
- /*skipped # and 'define'*/
- void parse_define_line(struct Source_File *src,struct Translation_Data *translation_data)
- {
- struct token *hold_token;
- struct token *macro_name;
- struct define_directive *new_macro;
- struct Queue *hold_tokens;
- size_t number_of_arguments=0;
- int *hold_index;
-
-
- macro_name=get_next_token(src,&chonky[0],0);
- if(macro_name->type!=KW_ID)
- {
- wonky_free(macro_name);
- push_lexing_error("expected id after #define",src,translation_data);
- return;
- }
-
- new_macro=get_define_directive(macro_name);
- /*white space*/
- hold_token=get_next_token(src,&chonky[0],0);
- if(hold_token->type==KW_OPEN_NORMAL)
- {
- wonky_free(hold_token);
- while(1)
- {
- hold_token=get_next_token(src,&chonky[0],0);
- if(hold_token->type!=KW_ID)
- {
- push_lexing_error("expected id in define argument list",src,translation_data);
- wonky_free(hold_token);
- break;
- }
- hold_index=wonky_malloc(sizeof(int));
- *hold_index=number_of_arguments;
- ++number_of_arguments;
- Map_Push(new_macro->arguments,hold_token->data,hold_token->data_size,hold_index);
- wonky_free(hold_token);
- hold_token=get_next_token(src,&chonky[0],0);
- if(hold_token->type!=KW_COMMA)
- {
- if(hold_token->type==KW_CLOSE_NORMAL)
- {
- wonky_free(hold_token);
- break;
- }else
- {
- push_lexing_error("expected ',' in define argument list",src,translation_data);
- wonky_free(hold_token);
- break;
- }
- }
- wonky_free(hold_token);
- }
-
- }else if(hold_token->type==KW_NOTYPE)
- {
- wonky_free(hold_token);
- }
-
- /*push things*/
-
- hold_tokens=translation_data->tokens;
- translation_data->tokens=new_macro->macro_tokens;
-
- new_macro->number_of_arguments=number_of_arguments;
- /*there is something in hold_token*/
- while( (hold_token=get_next_token(src,&chonky[0],0))->type != KW_NOTYPE)
- {
- expand_macro(hold_token,src,translation_data);
- }
-
- /*removing the notype token*/
- wonky_free(hold_token);
-
- translation_data->tokens=hold_tokens;
- /*push the directive into the macro map*/
- Map_Push(translation_data->macros,macro_name->data,macro_name->data_size,new_macro);
- //wonky_free(macro_name);
- chase_new_line(src,translation_data);
-
- }
- /*
- id[(list)] tokens \n
- */
- struct define_directive* get_define_directive(struct token* macro_name)
- {
- struct define_directive *ret;
- ret=wonky_malloc(sizeof(struct token));
- ret->macro_name=macro_name;
-
- ret->macro_tokens=wonky_malloc(sizeof(struct Queue));
- Queue_Init(ret->macro_tokens);
-
- ret->arguments=wonky_malloc(sizeof(struct Map));
- Map_Init(ret->arguments);
-
- ret->number_of_arguments=0;
-
- return ret;
- }
-
- /*returns an array of queues*/
- struct Queue* make_define_argument_list(size_t number_of_arguments)
- {
- size_t i;
- struct Queue *ret;
-
- if(number_of_arguments==0)
- return NULL;
-
- ret=wonky_malloc(sizeof(struct Queue)*number_of_arguments);
-
- for(i=0;i<number_of_arguments;++i)
- {
- Queue_Init(ret+i);
- }
- return ret;
- }
- void delete_define_argument_list(size_t number_of_arguments,struct Queue *args)
- {
- if(number_of_arguments==0)
- {
- wonky_assert(args==NULL);
- return;
- }
- flush_macro_arguments(number_of_arguments,args);
- wonky_free(args);
- }
-
- void expand_macro_argument(struct Queue *replacement_tokens,struct Source_File *src,struct Translation_Data *translation_data)
- {
- struct Queue_Node *it;
- struct token *hold_token;
- for(it=replacement_tokens->first;it!=NULL;it=it->prev)
- {
- hold_token=copy_token((struct token*)it->data);
- hold_token->line=src->which_row;
- hold_token->column=src->which_column;
- Queue_Push(translation_data->tokens,hold_token);
- //Queue_Push(translation_data->tokens,copy_token((struct token*)it->data));
- }
- }
- void load_macro_arguments(struct Queue *args,size_t number_of_arguments,struct Source_File *src,struct Translation_Data *translation_data)
- {
- struct token *hold;
- struct Queue *hack;
- size_t i;
- size_t j;
-
- if(number_of_arguments==0)
- return;
-
- hold=get_next_token(src,&chonky[0],1);
- if(hold->type!=KW_OPEN_NORMAL)
- {
- push_lexing_error("expected '(' in macro expansion",src,translation_data);
- wonky_free(hold);
- return;
- }
- wonky_free(hold);
-
- hack=translation_data->tokens;
- for(i=0;i<number_of_arguments-1;++i)
- {
- translation_data->tokens=args+i;
- for(
- hold=get_next_token(src,&chonky[0],1),j=0;
- hold->type!=KW_COMMA && hold->type!=KW_NOTYPE;
- hold=get_next_token(src,&chonky[0],1),++j
- )
- {
- expand_macro(hold,src,translation_data);
- }
- if(hold->type==KW_NOTYPE)
- {
- push_lexing_error("expected ',' in macro argument list",src,translation_data);
- wonky_free(hold);
- goto cleanup;
- }
- if(j==0)
- {
- push_lexing_error("expected argument in macro argument list",src,translation_data);
- wonky_free(hold);
- goto cleanup;
- }
-
- }
- translation_data->tokens=args+i;
- for(
- hold=get_next_token(src,&chonky[0],1),j=0;
- hold->type!=KW_CLOSE_NORMAL;
- hold=get_next_token(src,&chonky[0],1),++j
- )
- {
- if(hold->type==KW_NOTYPE)
- {
- push_lexing_error("expected ')' in macro argument list",src,translation_data);
- wonky_free(hold);
- goto cleanup;
- }
- expand_macro(hold,src,translation_data);
- }
- if(j==0)
- {
- push_lexing_error("expected argument in macro argument list",src,translation_data);
- wonky_free(hold);
- }
-
- cleanup:
- translation_data->tokens=hack;
-
-
- }
- void flush_macro_arguments(size_t number_of_arguments,struct Queue *args)
- {
- size_t i;
- for(i=0;i<number_of_arguments;++i)
- {
- while(args[i].size>0)
- wonky_free(Queue_Pop(args+i));
- }
- }
- /*macro name token is wonky_freed on expansion , if it is not a macro name it is pushed into token queue*/
- void expand_macro(struct token* macro_name,struct Source_File *src,struct Translation_Data *translation_data)
- {
- struct define_directive *hold=NULL;
- struct token *hold_token;
- int *index;
- struct Queue_Node *it;
- struct Queue *argument_list;
-
- if(macro_name->type==KW_ID)
- hold=Map_Check(translation_data->macros,macro_name->data,macro_name->data_size);
- if(hold!=NULL)
- {
- wonky_free(macro_name);
- argument_list=make_define_argument_list(hold->number_of_arguments);
- load_macro_arguments(argument_list,hold->number_of_arguments,src,translation_data);
- if(translation_data->errors->size>0)
- {
- delete_define_argument_list(hold->number_of_arguments,argument_list);
- return;
- }
-
-
- for(it=hold->macro_tokens->first;it!=NULL;it=it->prev)
- {
- hold_token=(struct token*)it->data;
- index=Map_Check(hold->arguments,hold_token->data,hold_token->data_size);
- if(index!=NULL)
- {
- expand_macro_argument(argument_list+*index,src,translation_data);
- }else
- {
- hold_token=copy_token(hold_token);
-
- hold_token->line=src->which_row;
- hold_token->column=src->which_column;
-
- wonky_assert(is_valid_token(hold_token));
-
- Queue_Push(translation_data->tokens,hold_token);
- }
- }
- delete_define_argument_list(hold->number_of_arguments,argument_list);
- }else
- {
- /*this isn't a macro, so we just push it to the token queue*/
- wonky_assert(is_valid_token(macro_name));
- Queue_Push(translation_data->tokens,macro_name);
- }
- }
- void preproc_lex_first_part(struct Source_File *src,struct Translation_Data *translation_data)
- {
- struct Source_File temp_src;
- struct token *hold_token;
- char just_in_case;
-
- temp_src=*src;
- hold_token=preproc_find_else(src,translation_data,1);
-
-
- temp_src.src_size=src->where_in_src;
- just_in_case=src->src[src->where_in_src];
- src->src[src->where_in_src]='\0';
-
- lex(&temp_src,translation_data);
-
- src->src[src->where_in_src]=just_in_case;
-
- if(hold_token!=NULL)
- wonky_free(hold_token);
- do
- {
- hold_token=preproc_find_else(src,translation_data,0);
- if(hold_token)
- wonky_free(hold_token);
- else
- break;
- }while(!has_new_errors(translation_data));
-
- if(hold_token!=NULL)
- {
- wonky_free(hold_token);
- push_lexing_error("could not find matching #else, #elif or #endif",src,translation_data);
- }
- }
- /*
- we have skipped the #if part so this could be used for elif
- */
- void parse_preproc_if_line(struct Source_File *src,struct Translation_Data *translation_data)
- {
-
- struct Queue *tokens;
- struct Queue *swap;
- struct AST *condition;
- struct Scope *null_scope;
- struct token *hold_token;
- int result;
-
- null_scope=get_normal_scope(NULL,FILE_SCOPE);
-
- tokens=lex_line(src,translation_data,1);
-
- swap=translation_data->tokens;
- translation_data->tokens=tokens;
-
- condition=parse_expression(translation_data,null_scope);
- result=evaluate_const_expression_integer(condition,translation_data);
- delete_normal_scope((struct Normal_Scope*)null_scope);
- delete_ast(condition);
-
- if(result)
- {
- preproc_lex_first_part(src,translation_data);
- }else
- {
- hold_token=preproc_find_else(src,translation_data,0);
- if(hold_token!=NULL && hold_token->type==PKW_ELIF)
- {
- parse_preproc_if_line(src,translation_data);
- }
- else if(hold_token!=NULL)
- {
- preproc_lex_first_part(src,translation_data);
- }
- }
-
-
- }
- struct token* preproc_find_else(struct Source_File *src,struct Translation_Data *translation_data,char jump_before)
- {
- struct token *hold_token;
- struct Source_File temp_src;
- int indentation=1;
-
- temp_src=*src;
- while(src->src[src->where_in_src]!='\0' && indentation)
- {
- /*BEWARE*/
- temp_src=*src;
- /*END BEWARE*/
-
- hold_token=get_next_token(src,&chonky[0],1);
- if(hold_token->type==KW_HASHTAG)
- {
- wonky_free(hold_token);
- /*TODO FIX*/
- /*hold_token=get_next_token(src,&chonky_jr[0],0);*/
- hold_token=get_next_token(src,&chonky[0],0);
- switch(hold_token->type)
- {
- case PKW_IF:
- case PKW_IFDEF:
- case PKW_IFNDEF:
- ++indentation;
- break;
-
- case PKW_ENDIF:
- --indentation;
- break;
-
- case PKW_ELSE:
- case PKW_ELIF:
- if(indentation==1)
- {
- if(jump_before)
- *src=temp_src;
- return hold_token;
- }
- else
- {
- break;
- }
- case PKW_NOTYPE:
- wonky_free(hold_token);
- goto_new_line(src,translation_data);
- return NULL;
- }
- wonky_free(hold_token);
-
- }else if(hold_token->type!=KW_NOTYPE)
- {
- wonky_free(hold_token);
- }else
- {
- if(src->where_in_src!=src->src_size)
- push_lexing_error("unexpected character",src,translation_data);
- wonky_free(hold_token);
- return NULL;
- }
- goto_new_line(src,translation_data);
- }
- /*BEWARE*/
- //goto_new_line(src,translation_data);
- /*END BEWARE*/
- if(jump_before)
- *src=temp_src;
- return NULL;
- }
- void parse_preproc_ifdef_line(struct Source_File *src,struct Translation_Data *translation_data)
- {
- struct token *hold_token;
- hold_token=get_next_token(src,&chonky[0],0);
- if(hold_token==NULL || hold_token->type!=KW_ID)
- {
- wonky_free(hold_token);
- push_lexing_error("expected an id here",src,translation_data);
- chase_new_line(src,translation_data);
- return;
- }else
- {
- if(Map_Check(translation_data->macros,hold_token->data,hold_token->data_size))
- {
- preproc_lex_first_part(src,translation_data);
- }else
- {
- wonky_free(hold_token);
-
- hold_token=preproc_find_else(src,translation_data,1);
-
- if(hold_token!=NULL && hold_token->type==PKW_ELIF)
- {
- parse_preproc_if_line(src,translation_data);
- }else if(hold_token!=NULL)
- {
- preproc_find_else(src,translation_data,0);
- preproc_lex_first_part(src,translation_data);
- }
-
- wonky_free(hold_token);
- }
-
- }
- chase_new_line(src,translation_data);
- }
- void parse_preproc_ifndef_line(struct Source_File *src,struct Translation_Data *translation_data)
- {
- struct token *hold_token;
- hold_token=get_next_token(src,&chonky[0],0);
- if(hold_token==NULL || hold_token->type!=KW_ID)
- {
- push_lexing_error("expected an id here",src,translation_data);
- chase_new_line(src,translation_data);
- wonky_free(hold_token);
- return;
- }else
- {
- if(!Map_Check(translation_data->macros,hold_token->data,hold_token->data_size))
- {
- wonky_free(hold_token);
- preproc_lex_first_part(src,translation_data);
- }else
- {
- wonky_free(hold_token);
-
- hold_token=preproc_find_else(src,translation_data,1);
- if(hold_token!=NULL && hold_token->type==PKW_ELIF)
- {
- parse_preproc_if_line(src,translation_data);
- }
- else if(hold_token!=NULL)
- {
- preproc_find_else(src,translation_data,0);
- preproc_lex_first_part(src,translation_data);
- }
- wonky_free(hold_token);
- }
-
- }
- chase_new_line(src,translation_data);
- }
- void parse_preproc_undef_line(struct Source_File *src,struct Translation_Data *translation_data)
- {
- struct define_directive *hold_macro;
- struct token *id;
-
- id=get_next_token(src,&chonky[0],0);
- if(id->type!=KW_ID)
- {
- push_lexing_error("expected an id here",src,translation_data);
- }else
- {
- hold_macro=Map_Check(translation_data->macros,id->data,id->data_size);
- if(hold_macro!=NULL)
- {
- delete_macro(hold_macro);
- Map_Remove(translation_data->macros,id->data,id->data_size);
- }
- }
- wonky_free(id);
- chase_new_line(src,translation_data);
- }
- void parse_preproc_error_line(struct Source_File *src,struct Translation_Data *translation_data)
+ struct token* preprocessing_lex_directive(struct Lexer_Data *lexer_data,struct Source_Location *where)
{
- char *error;
- size_t line,column;
- error=src->src+src->where_in_src;
- line=src->which_row+1;
- column=src->which_column+1;
-
-
- goto_new_line(src,translation_data);
- src->src[src->where_in_src-1]='\0';
-
- push_raw_translation_error(error,line,column,src->src_name->filename,translation_data);
-
- }
- void parse_preproc_line_line(struct Source_File *src,struct Translation_Data *translation_data)
- {
- struct Queue *tokens;
- struct Translation_Data hack;
- struct token *hold_line;
- struct token *hold_name;
- struct AST *line_expression;
-
- tokens=lex_line(src,translation_data,0);
- hack=*translation_data;
- hack.tokens=tokens;
- /*TODO account for other types of integer constants*/
- if(check(&hack,KW_DECIMAL_CONSTANT,0))
- {
- hold_line=(struct token*)Queue_Pop(tokens);
- line_expression=(struct AST*)get_constant_tree(get_expression_value_constant(extract_constant(hold_line,translation_data)));
-
- src->which_row=evaluate_const_expression_integer(line_expression,translation_data);
- if(check(&hack,KW_STRING,0))
- {
- hold_name=(struct token*)Queue_Pop(tokens);
- hold_name->data[hold_name->data_size]='\0';
- if(tokens->size>0)
- {
- wonky_free(hold_line);
- wonky_free(hold_name);
- flush_tokens(tokens);
- push_lexing_error("expected a new line in #line preprocessing directive here",src,translation_data);
- return;
- }else
- {
- delete_source_name(src->src_name);
- src->src_name=get_source_name(hold_name->data,"");
- return;
- }
-
- }else if(tokens->size>0)
- {
- wonky_free(hold_line);
- flush_tokens(tokens);
- push_lexing_error("expected a string or new line in #line preprocessing directive here",src,translation_data);
- return;
- }
-
- }else
- {
- flush_tokens(tokens);
- push_lexing_error("expected a line number in #line preprocessing directive here",src,translation_data);
- return;
- }
- }
- void delete_macro(void *macro)
- {
- /*
- #define AS_MACRO(x) ((struct define_directive*)macro)
- wonky_free(AS_MACRO(macro)->macro_name);
- flush_tokens(AS_MACRO(macro)->macro_tokens);
- wonky_free(AS_MACRO(macro)->macro_tokens);
- Map_Map(AS_MACRO(macro)->arguments,wonky_free);
- wonky_free(AS_MACRO(macro)->arguments);
- wonky_free(macro);
- #undef AS_MACRO
- */
- }
- struct Queue* lex_line(struct Source_File *src,struct Translation_Data *translation_data,char lex_defined_token)
- {
-
- struct Source_File temp_src;
- struct token *hold_token;
- struct Queue *tokens;
- char just_in_case;
-
- tokens=wonky_malloc(sizeof(struct Queue));
- Queue_Init(tokens);
-
-
- temp_src=*src;
- goto_new_line(src,translation_data);
- just_in_case=src->src[src->where_in_src];
- src->src[src->where_in_src]='\0';
-
- translation_data->tokens=tokens;
-
- while((hold_token=get_next_token(&temp_src,&chonky[0],0))->type!=KW_NOTYPE)
- {
- if(lex_defined_token && hold_token->type==KW_ID && hold_token->data_size==7 && gstrn_cmp(hold_token->data,"defined",7))
- {
- wonky_free(hold_token);
- hold_token=get_next_token(&temp_src,&chonky[0],0);
- if(hold_token->type==KW_OPEN_NORMAL)
- {
- wonky_free(hold_token);
- hold_token=get_next_token(&temp_src,&chonky[0],0);
- if(hold_token->type!=KW_ID)
- {
- push_lexing_error("expected an id after '(' in defined",src,translation_data);
- }else
- {
- struct token *hold_closing_token;
- hold_closing_token=get_next_token(&temp_src,&chonky[0],0);
- if(hold_closing_token->type!=KW_CLOSE_NORMAL)
- {
- push_lexing_error("expected an ')' after id in define",src,translation_data);
- }else
- {
- if(!Map_Check(translation_data->macros,hold_token->data,hold_token->data_size))
- {
- hold_token->type=KW_DECIMAL_CONSTANT;
- hold_token->data="0";
- hold_token->data_size=1;
- }else
- {
- hold_token->type=KW_DECIMAL_CONSTANT;
- hold_token->data="1";
- hold_token->data_size=1;
- }
-
- }
- }
- }else if(hold_token->type!=KW_ID)
- {
- push_lexing_error("expected an id after define",src,translation_data);
- }else
- {
- if(!Map_Check(translation_data->macros,hold_token->data,hold_token->data_size))
- {
- hold_token->type=KW_DECIMAL_CONSTANT;
- hold_token->data="0";
- hold_token->data_size=1;
- }else
- {
- hold_token->type=KW_DECIMAL_CONSTANT;
- hold_token->data="1";
- hold_token->data_size=1;
- }
- }
- }
- Queue_Push(tokens,hold_token);
- }
-
- wonky_free(hold_token);
- src->src[src->where_in_src]=just_in_case;
-
- return tokens;
+ return get_error_token();
}
#endif
F diff --git a/src/frontend/lex/lex_preprocessing_directive.h b/src/frontend/lex/lex_preprocessing_directive.h
--- a/src/frontend/lex/lex_preprocessing_directive.h
+++ b/src/frontend/lex/lex_preprocessing_directive.h
#ifndef WONKY_LEX_PREPROCESSING_DIRECTIVE_H
#define WONKY_LEX_PREPROCESSING_DIRECTIVE_H WONKY_LEX_PREPROCESSING_DIRECTIVE_H
#include <lex_preprocessing_directive.hh>
+
+ #include <queue.h>
#include <program.h>
- #include <lexer.h>
- #include <automata.h>
- #include <gcc_error.h>
- #include <map.h>
- #include <scope.h>
#include <common.h>
- #include <preprocessing.h>
-
- void parse_preproc_line(struct Source_File *src,struct Translation_Data *translation_data);
- void parse_include_line(struct Source_File *src,struct Translation_Data *translation_data);
- void parse_define_line(struct Source_File *src,struct Translation_Data *translation_data);
- void parse_preproc_if_line(struct Source_File *src,struct Translation_Data *translation_data);
- void parse_preproc_ifdef_line(struct Source_File *src,struct Translation_Data *translation_data);
- void parse_preproc_ifndef_line(struct Source_File *src,struct Translation_Data *translation_data);
- void parse_preproc_undef_line(struct Source_File *src,struct Translation_Data *translation_data);
- void parse_preproc_error_line(struct Source_File *src,struct Translation_Data *translation_data);
- void parse_preproc_line_line(struct Source_File *src,struct Translation_Data *translation_data);
-
-
- struct Queue* lex_line(struct Source_File *src,struct Translation_Data *translation_data,char lex_defined_token);
- /*preproc if stuff*/
- /*returns an else or elif token, or if it hits matching endif before that return NULL*/
- struct token* preproc_find_else(struct Source_File *src,struct Translation_Data *translation_data,char jump_before);
-
- /*hack*/
- void preproc_lex_first_part(struct Source_File *src,struct Translation_Data *translation_data);
+ #include <wonky_malloc.h>
+ #include <automata.h>
+ #include <token.h>
+ #include <lexer.h>
- /*define stuff*/
- void expand_macro(struct token* macro_name,struct Source_File *src,struct Translation_Data *translation_data);
+ struct token* preprocessing_lex_directive(struct Lexer_Data *lexer_data,struct Source_Location *where);
+ struct token* preprocessing_lex_include_directive(struct Lexer_Data *lexer_data,struct Source_Location *where);
- struct define_directive* get_define_directive(struct token* macro_name);
- struct Queue* make_define_argument_list(size_t number_of_arguments);
+ struct token* preprocessing_lex_if_directive(struct Lexer_Data *lexer_data,struct Source_Location *where);
+ struct token* preprocessing_lex_ifdef_directive(struct Lexer_Data *lexer_data,struct Source_Location *where);
+ struct token* preprocessing_lex_ifndef_directive(struct Lexer_Data *lexer_data,struct Source_Location *where);
- void expand_macro_argument(struct Queue *replacement_tokens,struct Source_File *src,struct Translation_Data *translation_data);
- void load_macro_arguments(struct Queue *args,size_t number_of_arguments,struct Source_File *src,struct Translation_Data *translation_data);
- void flush_macro_arguments(size_t number_of_arguments,struct Queue *args);
- void parse_define_line(struct Source_File *src,struct Translation_Data *translation_data);
+ struct token* preprocessing_lex_define_directive(struct Lexer_Data *lexer_data,struct Source_Location *where);
+ struct token* preprocessing_lex_normal_define_directive(struct Lexer_Data *lexer_data,struct Source_Location *where);
+ struct token* preprocessing_lex_functionlike_define_directive(struct Lexer_Data *lexer_data,struct Source_Location *where);
- void delete_define_argument_list(size_t number_of_arguments,struct Queue *args);
- void delete_macro(void *macro);
+ struct token* preprocessing_lex_undef_directive(struct Lexer_Data *lexer_data,struct Source_Location *where);
+ struct token* preprocessing_lex_line_directive(struct Lexer_Data *lexer_data,struct Source_Location *where);
+ struct token* preprocessing_lex_error_directive(struct Lexer_Data *lexer_data,struct Source_Location *where);
+ struct token* preprocessing_lex_pragma_directive(struct Lexer_Data *lexer_data,struct Source_Location *where);
#endif
F diff --git a/src/frontend/lex/lexer.c b/src/frontend/lex/lexer.c
--- a/src/frontend/lex/lexer.c
+++ b/src/frontend/lex/lexer.c
struct Preprocessing_Translation_Unit *unit;
struct token *token;
- while(lexer_skip_white_space(lexer_data) , !lexer_eof(lexer_data))
+ while(!lexer_eof(lexer_data))
{
token=lexer_extract_next_token(lexer_data);
push_token_into_preprocessing_translation_unit(unit,token);
}state=BLANK_SPACE;
while(state!=NON_WHITE_SPACE && !lexer_eof(lexer_data))
+ {
switch(lexer_data->src->src[lexer_data->where_in_src])
{
'\n':
state=BLANK_SPACE;
++lexer_data->where_in_src;
+ lexer_data->is_in_the_begining_of_line=1;
break;
' ':
'\t':
++lexer_data->where_in_src;
break;
- '\':
+ '\\':
if(state==POSSIBLE_LINE_SPLICE)
{
state=NON_WHITE_SPACE;
default:
state=NON_WHITE_SPACE;
}
-
+ }
}
inline _Bool lexer_eof(struct Lexer_Data *lexer_data)
{
return lexer_data->where_in_src==lexer_data->src->src_size;
}
+ struct token* lexer_extract_next_token(struct Lexer_Data *lexer_data)
+ {
+ struct token *ret;
+ struct Automata_Node *hold_node;
+ size_t where_does_the_token_start_in_the_source_file;
+ do{
+ lexer_skip_white_space(lexer_data);
+ where_does_the_token_start_in_the_source_file=lexer_data->where_in_src;
+ hold_node=lexer_feed_automata_until_error(lexer_data);
+ if(hold_node==NULL)
+ if(lexer_eof(lexer_data))
+ return get_eof_token();
+ else
+ return lexer_get_general_error(lexer_data);
+ }while(hold_node->keyword!=KW_COMMENT);
+ ret=lexer_make_token_finishing_on_node(lexer_data, hold_node, where_does_the_token_start_in_the_source_file);
+ lexer_data->is_in_the_begining_of_line=0;
+ return ret;
+ }
+ struct Automata_Node* lexer_feed_automata_until_error(struct Lexer_Data *lexer_data)
+ {
+ struct Automata_Node *head;
+ struct Automata_Node *follower;
+ head=&chonky[0];
+ follower=NULL;
+ while(hold_node!=NULL)
+ {
+ follower=hold_node;
+ hold_node=lexer_feed_automata_next_char(lexer_data,hold_node);
+ }
+ return follower;
+ }
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- void lex(struct Source_File *src,struct Translation_Data *translation_data)
+ struct Automata_Node *lexer_feed_automata_next_char(struct Lexer_Data *lexer_data,struct Automata_Node *node)
{
+ enum {
+ UNKNOWN_CHAR,
+ START_OF_POSSIBLE_LINE_SPLICE,
+ KNOWN_CHAR
+ } state;
+ size_t hold_where_in_src;
+ size_t hold_which_column;
+ size_t hold_which_row;
+ struct Automata_Node *ret;
+ state=UNKNOWN_CHAR;
+ hold_where_in_src=lexer_data->where_in_src;
+ hold_which_column=lexer_data->which_column;
+ hold_which_row=lexer_data->which_row;
- struct token *current_token;
- while(src->src[src->where_in_src]!='\0')
- {
- if(has_new_errors(translation_data))
- {
- push_lexing_error("Fatal error",src,translation_data);
- return;
- }
+ do{
- current_token=get_next_token(src,&chonky[0],1);
- if(current_token->type==KW_HASHTAG)
+ if(lexer_eof(lexer_data))
+ return NULL;
+
+ switch(state)
{
- if(src->is_in_the_begining_of_line)
- {
- parse_preproc_line(src,translation_data);
- wonky_free(current_token);
- }else
- {
- push_lexing_error("preprocessing directive must be at the beggining of the line",src,translation_data);
- wonky_free(current_token);
- while((current_token=get_next_token(src,&chonky[0],0))->type!=KW_NOTYPE)
+ case UNKNOWN_CHAR:
+ if(lexer_data->src->src[lexer_data->where_in_src] == '\\')
+ {
+ state=START_OF_POSSIBLE_LINE_SPLICE;
+ ++lexer_data->where_in_src;
+ ++lexer_data->which_column;
+ }else
{
- wonky_free(current_token);
+ state=KNOWN_CHAR;
}
- wonky_free(current_token);
- }
-
- }else if(current_token->type!=KW_NOTYPE)
- {
- expand_macro(current_token,src,translation_data);
- }else
- {
- if(src->where_in_src!=src->src_size)
- push_lexing_error("unexpected character",src,translation_data);
- wonky_free(current_token);
- return;
+ break;
+ case START_OF_POSSIBLE_LINE_SPLICE:
+ if(lexer_data->src->src[lexer_data->where_in_src] == '\n')
+ {
+ state=UNKNOWN_CHAR;
+ ++lexer_data->where_in_src;
+ ++lexer_data->which_column;
+ lexer_data->which_row=0;
+ }else
+ {
+ state=KNOWN_CHAR;
+ }
+ break;
+ default:
+ wonky_assert(SHOULD_NOT_REACH_HERE);
}
- }
-
- }
+ }while(state!=KNOWN_CHAR);
- /*hack*/
- void handle_splicing(struct token *word)
- {
- size_t back;
- size_t front;
- if(word->data_size==0)
- return;
- front=0;
- for(front;front<word->data_size-1;++front)
+ ret=node->delta[get_ch(& lexer_data->src->src[lexer_data->where_in_src],1)];
+ if(ret==NULL)
{
- if(word->data[front]=='\\' && word->data[front+1]=='\n')
- {
- front+=2;
- break;
- }
- }
- if(front==word->data_size-1)
- return;
+ lexer_data->where_in_src=hold_where_in_src;
+ lexer_data->which_column=hold_which_column;
+ lexer_data->which_row=hold_which_row;
- for(back=front-2;front<word->data_size-1;)
+ return NULL;
+ }else
{
- if(word->data[front]=='\\' && word->data[front+1]=='\n')
- {
- front+=2;
- }else
- {
- word->data[back]=word->data[front];
- ++front;
- ++back;
- }
+ return ret;
}
- word->data[back]=word->data[front];
}
-
- struct token* get_next_token(struct Source_File *src,struct Automata_Node *start_state,char skip_new_line)
+ struct token* lexer_make_token_finishing_on_node(struct Lexer_Data *lexer_data,struct Automata_Node *finishing_node,size_t start_position)
{
- int temp;
- char hold_char;
+ struct Source_Location *token_location;
- struct token *ret;
- struct Automata_Node *current_state;
- struct Automata_Node *best_state;
+ wonky_assert(lexer_data->where_in_src > start_position);
+ wonky_assert(is_valid_automata_node(finishing_node));
- /*ignore leading spaces,tabs and newlines*/
- skip_white_space(src,skip_new_line);
+ token_location=get_source_location(
+ lexer_data->which_column,
+ lexer_data->which_row,
+ lexer_data->where_in_src,
+ lexer_data->src->src_name
+ );
- src_reset_token_data(src,1);
-
- best_state=current_state=start_state;
+ switch(finishing_node->keyword)
+ {
+ case KW_HASHTAG:
+ if(lexer_data->automata_view==AUTOMATA_VIEW_PREPROCESSING_DIRECTIVE)
+ {
+ wonky_assert(!"IMPLEMENT # OPERATOR IN PREPROCESSING DIRECTIVES");
+ }else
+ {
+ if(!lexer_data->is_in_the_begining_of_line)
+ {
+ return get_error_token("# is not in the begining of a logical line",token_location,lexer_data->program);
+ }else
+ {
+ return lex_preprocessing_directive(lexer_data,token_location);
+ }
+ }
+ break;
+ case KW_ID:
+ if(finishing_node->data==NULL)
+ {
+ struct identifier *id;
+ id=get_identifier(lexer_data->src->src+start_position,lexer_data->where_in_src-start_position);
+ finishing_node->data=id;
+ }
+ return get_id_token(finishing_node->data,token_location);
+ break;
+ case KW_AUTO:
+ case KW_DO:
+ case KW_DOUBLE:
+ case KW_INT:
+ case KW_STRUCT:
+ case KW_BREAK:
+ case KW_ELSE:
+ case KW_LONG:
+ case KW_SWITCH:
+ case KW_CASE:
+ case KW_ENUM:
+ case KW_REGISTER:
+ case KW_TYPEDEF:
+ case KW_CHAR:
+ case KW_EXTERN:
+ case KW_RETURN:
+ case KW_UNION:
+ case KW_CONST:
+ case KW_FLOAT:
+ case KW_SHORT:
+ case KW_UNSIGNED:
+ case KW_CONTINUE:
+ case KW_FOR:
+ case KW_SIGNED:
+ case KW_VOID:
+ case KW_DEFAULT:
+ case KW_GOTO:
+ case KW_SIZEOF:
+ case KW_VOLATILE:
+ case KW_IF:
+ case KW_STATIC:
+ case KW_WHILE:
+ case KW_INLINE:
+ case KW_RESTRICT:
+ case KW_BOOL:
+ case KW_COMPLEX:
+ case KW_IMAGINARY:
+ return get_keyword_token(finishing_node->keyword,token_location);
+
+ case KW_EXCLAMATION:
+ case KW_PERCENT:
+ case KW_AND:
+ case KW_AND_AND:
+ case KW_OPEN_NORMAL:
+ case KW_CLOSE_NORMAL:
+ case KW_STAR:
+ case KW_PLUS:
+ case KW_COMMA:
+ case KW_MINUS:
+ case KW_DOT:
+ case KW_ARROW:
+ case KW_COLUMN:
+ case KW_SEMI_COLUMN:
+ case KW_LESS:
+ case KW_EQ:
+ case KW_EQEQ:
+ case KW_MORE:
+ case KW_QUESTION:
+ case KW_HAT:
+ case KW_PIPE:
+ case KW_PIPE_PIPE:
+ case KW_TILDE:
+ case KW_PLUSPLUS:
+ case KW_MINUSMINUS:
+ case KW_SHIFT_RIGHT:
+ case KW_SHIFT_LEFT:
+ case KW_LESS_EQ:
+ case KW_MORE_EQ:
+ case KW_NOT_EQ:
+ case KW_PLUS_EQ:
+ case KW_MINUS_EQ:
+ case KW_STAR_EQ:
+ case KW_PERCENT_EQ:
+ case KW_SHIFT_LEFT_EQ:
+ case KW_SHIFT_RIGHT_EQ:
+ case KW_AND_EQ:
+ case KW_HAT_EQ:
+ case KW_PIPE_EQ:
+ case KW_ELIPSIS:
+ case KW_DIV:
+ case KW_OPEN_SQUARE:
+ case KW_CLOSE_SQUARE:
+ case KW_CLOSE_CURLY:
+ case KW_OPEN_CURLY:
+ case KW_DIV_EQ:
+ case KW_FORWARD_SLASH:
+ return get_punctuator_token(finishing_node->keyword,token_location);
+
+ case KW_HEXADECIMAL_CONSTANT:
+ case KW_DECIMAL_CONSTANT:
+ case KW_OCTAL_CONSTANT:
+ case KW_UNSIGNED_DECIMAL_CONSTANT:
+ case KW_UNSIGNED_OCTAL_CONSTANT:
+ case KW_UNSIGNED_HEXADECIMAL_CONSTANT:
+ case KW_UNSIGNED_LONG_HEXADECIMAL_CONSTANT:
+ case KW_UNSIGNED_LONG_OCTAL_CONSTANT:
+ case KW_UNSIGNED_LONG_DECIMAL_CONSTANT:
+ case KW_UNSIGNED_LONG_LONG_DECIMAL_CONSTANT:
+ case KW_UNSIGNED_LONG_LONG_HEXADECIMAL_CONSTANT:
+ case KW_UNSIGNED_LONG_LONG_OCTAL_CONSTANT:
+ case KW_LONG_HEXADECIMAL_CONSTANT:
+ case KW_LONG_OCTAL_CONSTANT:
+ case KW_LONG_DECIMAL_CONSTANT:
+ case KW_LONG_LONG_HEXADECIMAL_CONSTANT:
+ case KW_LONG_LONG_OCTAL_CONSTANT:
+ case KW_LONG_LONG_DECIMAL_CONSTANT:
+ case KW_DOUBLE_DECIMAL_CONSTANT:
+ case KW_LONG_DOUBLE_DECIMAL_CONSTANT:
+ case KW_FLOAT_DECIMAL_CONSTANT:
+ case KW_DOUBLE_HEXADECIMAL_CONSTANT:
+ case KW_LONG_DOUBLE_HEXADECIMAL_CONSTANT:
+ case KW_FLOAT_HEXADECIMAL_CONSTANT:
+ case KW_CHAR_CONSTANT:
+ case KW_WIDE_CHAR_CONSTANT:
+ return get_constant_token(finishing_node->keyword,token_location,lexer_data->src->src+start_position,lexer_data->where_in_src-start_position);
+
+ case KW_STRING:
+ case KW_WIDE_STRING:
+ return get_string_token(finishing_node->keyword,token_location,lexer_data->src->src+start_position,lexer_data->where_in_src-start_position);
+ case PKW_IF:
+ case PKW_IFDEF:
+ case PKW_IFNDEF:
+ case PKW_ELIF:
+ case PKW_ELSE:
+ case PKW_ENDIF:
+ case PKW_INCLUDE:
+ case PKW_DEFINE:
+ case PKW_UNDEF:
+ case PKW_LINE:
+ case PKW_ERROR:
+ case PKW_PRAGMA:
+ if(finishing_node->data==NULL)
+ {
+ struct identifier *id;
- while( (hold_char=src_getc(src,1,0,0)) !='\0')
- {
- if(hold_char=='\n' && !skip_new_line)
- {
+ id=get_identifier(lexer_data->src->src+start_position,lexer_data->where_in_src-start_position);
+ finishing_node->data=id;
+ }
+ return get_id_token(finishing_node->data,token_location);
break;
- }
- current_state=current_state->delta[compress[hold_char]];
- if(current_state==NULL)
- {
- if(best_state->keyword==KW_COMMENT || best_state->keyword==PKW_COMMENT)
+ case PKW_DEFINED:
+ if(lexer_data->automata_view==AUTOMATA_VIEW_PREPROCESSING_DIRECTIVE)
{
- /*TODO account for new lines not counted in comment*/
- src_reset_token_data(src,0);
- best_state=current_state=start_state;
- skip_white_space(src,1);
+ return lex_defined_unary_operator(lexer_data,token_location);
}else
{
- return src_extract_token(src,best_state->keyword);
- }
+ if(finishing_node->data==NULL)
+ {
+ struct identifier *id;
- }else
- {
- best_state=current_state;
- src_assimilate_into_best_token(src);
- }
+ id=get_identifier(lexer_data->src->src+start_position,lexer_data->where_in_src-start_position);
+ finishing_node->data=id;
+ }
+ return get_id_token(finishing_node->data,token_location);
+ }
}
- if(best_state->keyword==KW_COMMENT || best_state->keyword==PKW_COMMENT)
- {
- return src_extract_token(src,KW_NOTYPE);
- }else
- {
- return src_extract_token(src,best_state->keyword);
- }
- return ret;
+ wonky_assert(SHOULD_NOT_REACH_HERE);
+ }
+
+ struct token *lex_preprocessing_directive(struct Lexer_Data *lexer_data,struct Source_Location *where)
+ {
+ if(lexer_eof(lexer_data))
+ return get_eof_token();
+ else
+ return preprocessing_lex_directive(lexer_data,where);
+
}
#endif
F diff --git a/src/frontend/lex/lexer.h b/src/frontend/lex/lexer.h
--- a/src/frontend/lex/lexer.h
+++ b/src/frontend/lex/lexer.h
#define LEXER_H LEXER_H
#include <lexer.hh>
- #include <stdio.h>
#include <queue.h>
#include <program.h>
#include <common.h>
#include <wonky_malloc.h>
+ #include <source_file.h>
#include <automata.h>
#include <token.h>
size_t which_column;
size_t which_row;
- size_t token_size;
-
-
- size_t best_token_size;
- size_t best_token_line;
- size_t best_token_column;
-
- size_t best_token_where_in_src_start;
- size_t best_token_where_in_src_end;
- _Bool best_token_beg_line;
-
-
_Bool is_in_the_begining_of_line;
+ enum Automata_View automata_view;
+
struct Source_File *src;
struct Program *program;
+
};
void lex(struct Source_Name *src_name,struct Program *program);
- struct Preprocessing_Translation_Unit* lex_inner(struct Lexer_Data *lexer_data);
+ struct Preprocessing_Translation_Unit* lex_inner_until(struct Lexer_Data *lexer_data);
struct Lexer_Data* get_lexer_data(struct Source_Name *src_name,struct Program *program);
void lexer_skip_white_space(struct Lexer_Data *lexer_data);
- inline _Bool lexer_eof(struct Lexer_Data *lexer_data);
+ _Bool lexer_eof(struct Lexer_Data *lexer_data);
struct token* lexer_extract_next_token(struct Lexer_Data *lexer_data);
+ struct Automata_Node* lexer_feed_automata_until_error(struct Lexer_Data *lexer_data);
+
+ struct Automata_Node *lexer_feed_automata_next_char(struct Lexer_Data *lexer_data,struct Automata_Node *node);
+ struct token *lexer_make_token_finishing_on_node(struct Lexer_Data *lexer_data,struct Automata_Node *finishing_node,size_t start_position);
- void delete_lexer_data(struct Lexer_Data *lexer_data);
+ struct token *lex_defined_unary_operator(struct Lexer_Data *lexer_data,struct Source_Location *where);
+ struct token *lex_preprocessing_directive(struct Lexer_Data *lexer_data,struct Source_Location *where);
+
+
+
+ void delete_lexer_data(struct Lexer_Data *lexer_data);
- void lex(struct Lexer_Data *lexer_data);
- struct token* get_next_token(struct Source_File *src,struct Automata_Node *start_state,char skip_new_line);
- struct Lexer_Data* get_lexer_data(struct Source_File *src,struct Program *program);
- void delete_lexer_data(struct Lexer_Data *lexer_data);
#endif
F diff --git a/src/frontend/lex/lexer.hh b/src/frontend/lex/lexer.hh
--- a/src/frontend/lex/lexer.hh
+++ b/src/frontend/lex/lexer.hh
struct Source_Name;
struct Source_File;
+ enum Automata_View
+ {
+ AUTOMATA_VIEW_NORMAL,
+ AUTOMATA_VIEW_PREPROCESSING_DIRECTIVE,
+ AUTOMATA_VIEW_END
+ };
+
+
+
#endif
F diff --git a/src/semantics/identifiers/denoted.h b/src/semantics/identifiers/denoted.h
--- a/src/semantics/identifiers/denoted.h
+++ b/src/semantics/identifiers/denoted.h
#include <map.h>
#include <lexer.h>
+ #include <identifier.h>
#include <type.h>
#include <scope.h>
#include <evaluation.h>
struct Denoted_Base
{
enum Denotation_Type denotation;
- struct token *id;
+ struct identifier *id;
struct Type *type;
};
struct Denoted_Function
{
enum Denotation_Type denotation;
enum Linkage_Type linkage;
- struct token *id;
+ struct identifier *id;
struct Type *type;
struct Memory_Location *location;
enum Denotation_Type denotation;
enum Linkage_Type linkage;
- struct token *id;
+ struct identifier *id;
struct Object *object;
};
struct Denoted_Type
{
enum Denotation_Type denotation;
- struct token *id;
+ struct identifier *id;
struct Type *type;
};
struct Denoted_Enum_Const
{
enum Denotation_Type denotation;
- struct token *id;
+ struct identifier *id;
struct Enum *parent;
int value;
F diff --git a/src/semantics/program/program.c b/src/semantics/program/program.c
--- a/src/semantics/program/program.c
+++ b/src/semantics/program/program.c
return ret;
}
- struct Translation_Data* get_translation_data(struct Map *types,struct Linkage *internal_linkage,struct Linkage *external_linkage)
+ struct Translation_Data* get_translation_data(struct Map *types,struct Linkage *internal_linkage,struct Program *program)
{
struct Translation_Data *ret;
ret=wonky_malloc(sizeof(struct Translation_Data));
ret->number_of_errors_when_last_checked=0;
ret->external_linkage=external_linkage;
- ret->internal_linkage=internal_linkage;
+ ret->program=program;
return ret;
}
F diff --git a/src/semantics/program/program.h b/src/semantics/program/program.h
--- a/src/semantics/program/program.h
+++ b/src/semantics/program/program.h
struct Program
{
+ /*AST*/
struct Queue *translation_units;
+
struct Queue *source_files;
struct Queue *errors;
struct Map *types;
struct Map *preprocessing_translation_units;
+ struct Queue *preprocessing_translation_units_to_be_compiled;
struct Linkage *external_linkage;
F diff --git a/src/semantics/value/type.h b/src/semantics/value/type.h
--- a/src/semantics/value/type.h
+++ b/src/semantics/value/type.h
enum Type_Specifier specifier;
struct Map *node;
- struct token *id;
+ struct identifier *id;
size_t size;
/* queue of denoted objects for
* preserving the order of the members
{
enum Type_Specifier specifier;
- struct token *id;
+ struct identifier *id;
struct Queue *consts;
char is_finished;
};
F diff --git a/src/syntax/automatas/automata.hh b/src/syntax/automatas/automata.hh
--- a/src/syntax/automatas/automata.hh
+++ b/src/syntax/automatas/automata.hh
enum Automata_Action
{
AUTOMATA_ACTION_DISPENSE_TOKEN,
- AUTOMATA_ACTION_SWITCH_AUTOMATA,
- AUTOMATA_ACTION_MACRO_EXPANSION,
AUTOMATA_ACTION_NO_ACTION,
};
KW_STRUCT,
KW_BREAK,
KW_ELSE,
- KW_DEFINED,
KW_LONG,
KW_SWITCH,
KW_CASE,
PKW_UNDEF,
PKW_LINE,
PKW_ERROR,
+ PKW_DEFINED,
PKW_PRAGMA,
PKW_COMMENT,
PKW_NOTYPE,
F diff --git a/src/syntax/automatas/generator/keyword_list.c b/src/syntax/automatas/generator/keyword_list.c
--- a/src/syntax/automatas/generator/keyword_list.c
+++ b/src/syntax/automatas/generator/keyword_list.c
},
{
.keyword="defined",
- .kw_string="KW_ID",
+ .kw_string="PKW_DEFINED",
.action_string="AUTOMATA_ACTION_DISPENSE_TOKEN",
.data_string="&defined_special_identifier"
},
F diff --git a/src/syntax/source_file.h b/src/syntax/source_file.h
--- a/src/syntax/source_file.h
+++ b/src/syntax/source_file.h
#ifndef WONKY_SOURCE_FILE_H
#define WONKY_SOURCE_FILE_H WONKY_SOURCE_FILE_H
#include <source_file.hh>
+
+ #include <stdio.h>
+ #include <automata.h>
+ #include <gcc_string.h>
extern char *well_known_locations_base[];
struct Source_Name
{
{
size_t line;
size_t column;
- };
- struct Source_File
- {
- enum Source_Text_Type type;
- char *src;
- size_t src_size;
-
+ size_t on_which_byte;
struct Source_Name *src_name;
};
- struct Source_Section
+ struct Source_File
{
enum Source_Text_Type type;
char *src;
size_t src_size;
struct Source_Name *src_name;
- struct Source_Location *where_in_source;
};
struct Source_File* extract_source_file(FILE *in,struct Source_Name *name);
struct Source_File* get_source_file(char *filename,char **where_to_search);
struct Source_Name* get_source_name(char *filename,char *base);
+ struct Source_Location* get_source_location(size_t line,size_t column,size_t on_which_byte,struct Source_Location *src_name);
void normalise_source_name(struct Source_Name *name);
char src_getc(struct Source_File *src,char skip_line_splice,char skip_comments,char skip_new_line);
void src_ungetc(struct Source_File *src);
F diff --git a/src/syntax/token/token.h b/src/syntax/token/token.h
--- a/src/syntax/token/token.h
+++ b/src/syntax/token/token.h
#include <wonky_assert.h>
#include <source_file.h>
+ /*the tokens are a bit heavy*/
+
struct token
{
enum LEXER_TYPE type;
struct token_defined_unary_operator
{
enum LEXER_TYPE type;
+ struct Source_Location *location;
+ struct identifier *id;
+ };
+ struct token_hashtag_unary_operator
+ {
+ enum LEXER_TYPE type;
+ struct Source_Location *location;
+ struct token_functionlike_define_directive *operand;
};
- struct token_unlexed_source_part
+ struct token_hashtag_hastag_unary_operator
{
enum LEXER_TYPE type;
- struct Source_Section *section;
+ struct Source_Location *location;
+ struct Queue *operands;
+ };
+
+ struct token_error
+ {
+ enum LEXER_TYPE type;
+ struct Source_Location *location;
+ struct Translation_Message *error;
};
/*
struct token_error_directive* get_error_directive_token(struct Source_Location *location,struct token_string *error_message);
struct token_pragma_directive* get_pragma_directive(struct Source_Location *location,enum Pragma_Type type);
struct token_defined_unary_operator* get_defined_unary_operator(struct Source_Location *location,struct identifier *id);
+ struct token* get_hashtag_unary_operator(struct Source_Location *location,struct token_functionlike_define_directive *operand);
+ struct token* get_hashtag_hashtag_unary_operator(struct Source_Location *location,struct Queue *operands);
+ struct token* get_error_token(const char *msg,struct Source_Location *location,struct Program *program, ...);
+ struct token* get_eof_token();
#endif
F diff --git a/src/syntax/token/token.hh b/src/syntax/token/token.hh
--- a/src/syntax/token/token.hh
+++ b/src/syntax/token/token.hh
struct token_error_directive;
struct token_pragma_directive;
struct token_defined_unary_operator;
+ struct token_hashtag_unary_operator;
+ struct token_hashtag_hastag_unary_operator;
+ struct token_error;
enum Punctuator_Token_Type
{
F diff --git a/src/wonky.h b/src/wonky.h
--- a/src/wonky.h
+++ b/src/wonky.h
#include <parse_expression.h>
#include <parse_statement.h>
#include <parse_translation_unit.h>
- #include <preprocessing.h>
+ #include <lex_preprocessing_directive.h>
#include <program.h>
#include <scope.h>
#include <evaluation.h>