F diff --git a/doc/scribblings/preprocessor_rewrite.txt b/doc/scribblings/preprocessor_rewrite.txt
new file mode 100644
--- /dev/null
+++ b/doc/scribblings/preprocessor_rewrite.txt
+
+
+
+
+
+
+
+
+
+
+
+ <chonky>---------------------------<defined macro in preprocessing directive>
+ |
+ <Map of all ids>
+ | |
+ Pointer to macro |
+ |
+ Id number the translation
+ unit that the macro was
+ last defined
+
+
+ |
+ |
+ |
+ |
+ |
+ |
+ |
+ | += ID
+ | |
+ +---------<Macro Map>
+ | |
+ | += token ( with the macro token type )
+ |
+ pushing tokens | gets stored in
+ lexing ----------------> <Preprocessing Translation Unit> ----------------------------> <Source Map>
+ ^ | | |
+ | | -------+ +------> <Preprocessing Translation Unit>
+ | | |
+ | | V
+ | | V
+ | | included as
+ | | (the string used to include the file)
+ | | ( or if specified on command line then there must be some indication of that)
+ | |
+ | |
+ | |
+ +---------------+ |
+ | | |
+ | | |
+ | if we meet an |
+ | include of a |
+ | macro expansion |
+ | we push the cur- |
+ | rent pointer and |
+ | jump into the |
+ | tokens of the |
+ | thing that is |
+ | being pointed to |
+ | ^ |
+ | | |
+ | | |
+ which token <-+ call stack |
+ is next \ ^ |
+ \ | |
+ <Token Ptr> ------|
+ / ^ |
+ +-------/ | |
+ | | |
+ | | |
+ | <Translation Data> |
+ Number of ^ | getting tokens ------> possible macro expansion
+ tokens in | V |
+ macro argument +----- parsing et al. |
+ remaining V
+ (better yet in current context) <Denoted>
+ __/ \
+ / +-----------------------------+
+ V |
+ <Denoted Functionlike Macro> |
+ | | |
+ | | |
+ V | |
+ <Macro Argument Tokens> V |
+ ^ <Macro Tokens> |
+ | | | |
+ | | V |
+ | | <Normal Token> |
+ | V |
+ +-----<Special Macro Argument Id Token> |
+ |
+ |
+ V
+ <Denoted Normal Macro>
+ |
+ |
+ V
+ <Macro Tokens>
+
+
+
+ Preprocessing Translation Unit
+ push_token_into_preprocessing_translation_unit
+ get_preprocessing_translation_unit
+ delete_preprocessing_translation_unit
+ Token Ptr
+ token_ptr_execute_preprocessing_directive
+ token_ptr_execute_include_directive
+ token_ptr_execute_define_directive
+ token_ptr_execute_undefine_directive
+ token_ptr_execute_if_directive
+ token_ptr_execute_ifdef_directive
+ token_ptr_execute_ifndef_directive
+ token_ptr_execute_line_directive
+ token_ptr_execute_error_token
+ token_ptr_execute_pragma_token
+ token_ptr_execute_functionlike_define_token
+ token_ptr_jump_to_preprocessing_translation_unit
+ token_ptr_jump_back_from_preprocessing_translation_unit
+ get_token_under_ptr_then_move
+ get_token_ptr
+ token_ptr_jump_to_function_like_macro_argument
+ delete_token_ptr
+ Source Name
+ normalise_source_name
+ Denoted Functionlike Macro
+ translation_unit_number
+ Denoted Macro
+ translation_unit_number
+
+ Translation_Unit
+ check_if_source_file_has_been_processed
+ get_processed_source_file_with_name
+ push_preprocessing_translation_unit_into_source_map
+ Source Map
+
+ lex_source_file
+ normalise_source_name
+ check_if_source_file_has_been_processed
+ load_source_file
+ lex_preprocessing_directive
+ lex_til_new_line
+ parse_preprocessing_directive_tokens
+ lex_source_file
+ push_token_into_preprocessing_translation_unit
+ push_preprocessing_translation_unit_into_source_map
+
+ token
+ get_preprocessing_include_directive_token
+ get_preprocessing_if_token
+ get_preprocessing_ifdef_token
+ get_preprocessing_ifndef_token
+ get_preprocessing_undef_token
+ get_preprocessing_normal_define_token
+ get_preprocessing_functionlike_define_token
+ get_preprocessing_undef_token
+ get_preprocessing_line_token
+ get_preprocessing_error_token
+ get_preprocessing_pragma_token
+ get_preprocessing_defined_functionlike_macro_token
+
+
+
+
+
+ token
+ | |
+ -----------+ +-----
+ | |
+ | specific token struct
+ token type members
+
+
+
+ token
+ |
+ |
+ Preprocessing Directive
+ | | |
+ ----------------+ | +---------------------
+ | | |
+ Include Directive | |
+ | | |
+ tokens | Conditional Directive
+ that might | | | |
+ expand into something | +------+ | +-----
+ | | where to go |
+ | condition if true |
+ | tokens where to go
+ | if false
+ |
+ |
+ |
+ |
+ |
+ |
+ |
+ ------------------------+-----------------+---------------+---------------------+------------------------------+
+ | | | | | |
+ | Undefine Directive Line Directive | | |
+ +Define Directive-+ | | | | Defined
+ | | ID tokens | | Functionlike
+ | | Error Directive | Macro
+ | | | |
+ Function like define Normal Define String |
+ | | | | | |
+ ID | tokens to ID tokens to |
+ | replace replace id Pragma Directive
+ | with with |
+ | type
+ array with
+ locations of
+ argument tokens
+ |
+ |
+ Function like define argument
+ | |
+ pointer to number of
+ first token tokens in argument
+
+
+
+
+
+
+
+
+ Translation_Data
+ |
+ Token Pointer-------+
+ | | |
+ Call stack Location |
+ |
+ Current Token
+
+
+
+
+
+
+ Token_Pointer
+ | | |
+ Current | Current
+ Token | Location
+ |
+ Call
+ Stack
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
F diff --git a/src/frontend/lex/automatas/automata.hh b/src/frontend/lex/automatas/automata.hh
--- a/src/frontend/lex/automatas/automata.hh
+++ b/src/frontend/lex/automatas/automata.hh
PKW_PRAGMA,
PKW_COMMENT,
PKW_NOTYPE,
+
KEYWORDS_END
};
F diff --git a/src/frontend/lex/lexer.c b/src/frontend/lex/lexer.c
--- a/src/frontend/lex/lexer.c
+++ b/src/frontend/lex/lexer.c
#define WONKY_LEXER_C WONKY_LEXER_C
/*asdf*/#include <lexer.h>
- char *well_known_locations_base[]={"","/usr/include/","/usr/include/x86_64-linux-gnu/",NULL};
void lex(struct Source_File *src,struct Translation_Data *translation_data)
{
}
word->data[back]=word->data[front];
}
- void goto_new_line(struct Source_File *src,struct Translation_Data *translation_data)
- {
- char hold_char;
- while( (hold_char=src_getc(src,1,1,0)) != '\0' && hold_char != '\n');
- src->is_in_the_begining_of_line=1;
- ++src->where_in_src;
- ++src->which_row;
- src->which_column=0;
- }
- void chase_new_line(struct Source_File *src,struct Translation_Data *translation_data)
- {
- char hold_char;
- for(hold_char=src_getc(src,1,1,0);hold_char!='\n' && hold_char!='\0';
- hold_char=src_getc(src,1,1,0));
-
- ++src->where_in_src;
- ++src->which_row;
- src->is_in_the_begining_of_line=1;
- src->which_column=0;
-
-
- }
- /*returns the number of bytes skipped*/
- size_t skip_line_splice(struct Source_File *src)
- {
- size_t current_size=0;
- while(src->where_in_src<src->src_size-1 && src->src[src->where_in_src]=='\\' && src->src[src->where_in_src+1]=='\n')
- {
- src->where_in_src+=2;
- current_size+=2;
- }
- return current_size;
- }
- void skip_white_space(struct Source_File *src,char skip_new_line)
- {
- char hold_char;
- while(hold_char=src_getc(src,1,1,skip_new_line))
- {
- if(hold_char=='\n' && !skip_new_line)
- {
- return ;
- }
- if(hold_char!=' ' && hold_char!='\t')
- {
- src_ungetc(src);
- return ;
- }
- }
- }
- struct token_vector Lex_Queue_Condense(struct Queue *tokens)
- {
- size_t i;
- struct token_vector ret;
- struct token *hold;
-
- ret.tokens=wonky_malloc(sizeof(struct token)*tokens->size);
- ret.size=tokens->size;
-
- for(i=0;tokens->size>0;++i)
- {
- hold=Queue_Pop(tokens);
- ret.tokens[i]=*hold;
- wonky_free(hold);
- }
-
- Queue_Destroy(tokens);
-
- return ret;
- }
-
- char check(struct Translation_Data *translation_data,enum KEYWORDS kw,size_t ahead)
- {
- size_t i;
- struct Queue_Node *current;
- if(translation_data->tokens->size<=ahead)
- {
- return 0;
- }else
- {
- for(i=0,current=translation_data->tokens->first;i<ahead;++i,current=current->prev);
-
- if( ((struct token*)(current->data))->type == kw )
- {
- return 1;
- }else
- {
- return 0;
- }
- }
- }
- char get_and_check(struct Translation_Data *translation_data,enum KEYWORDS kw)
- {
- struct token *hold_token;
- if(translation_data->tokens->size==0)
- {
- return 0;
- }else
- {
- hold_token=translation_data->tokens->first->data;
- if(hold_token->type!=kw)
- {
- return 0;
- }else
- {
- hold_token=Queue_Pop(translation_data->tokens);
- wonky_free(hold_token);
- return 1;
- }
- }
- }
- char get_and_check_unsafe(struct Translation_Data *translation_data,enum KEYWORDS kw)
- {
- struct token *hold_token;
- hold_token=translation_data->tokens->first->data;
- if(hold_token->type!=kw)
- {
- return 0;
- }else
- {
- hold_token=Queue_Pop(translation_data->tokens);
- wonky_free(hold_token);
- return 1;
- }
- }
- void chomp(struct Translation_Data *translation_data)
- {
- wonky_free(Queue_Pop(translation_data->tokens));
- }
-
- enum KEYWORDS kw_get(struct Translation_Data *translation_data)
- {
- if(translation_data->tokens->size==0)
- return KW_NOTYPE;
- return ((struct token*)(translation_data->tokens->first->data))->type;
-
- }
- char compare_tokens(struct token *a,struct token *b)
- {
- size_t i;
- if(a->data_size!=b->data_size)
- return 0;
- for(i=0;i<a->data_size;++i)
- {
- if(a->data[i]!=b->data[i])
- return 0;
- }
- return 1;
- }
struct token* get_next_token(struct Source_File *src,struct Automata_Node *start_state,char skip_new_line)
{
}
return ret;
}
- /*here be dragons*/
- char src_getc(struct Source_File *src,char skip_line_splice,char skip_comments,char skip_new_line)
- {
- superhack:
- if(src->src[src->where_in_src]=='\\' && skip_line_splice)
- {
- if(src->where_in_src < src->src_size-1 && src->src[src->where_in_src+1]=='\n')
- {
- src->where_in_src+=2;
- ++src->which_row;
- src->token_size+=2;
- src->which_column=0;
- goto superhack;
- }else
- {
- ++src->token_size;
- ++src->which_column;
- ++src->where_in_src;
- src->is_in_the_begining_of_line=0;
- return '\\';
- }
- }else
- {
- if(src->src[src->where_in_src]=='\n' && skip_new_line)
- {
- ++src->which_row;
- src->which_column=0;
- src->is_in_the_begining_of_line=1;
-
- ++src->where_in_src;
- goto superhack;
- }else if(src->src[src->where_in_src]=='/' && skip_comments)
- {
- if(src->src[src->where_in_src+1]=='*')
- {
- char hold_char;
-
-
- src->where_in_src+=2;
- hold_char=src_getc(src,1,0,1);
- while(hold_char)
- {
- if(hold_char=='*')
- {
- hold_char=src_getc(src,1,0,1);
- if(hold_char=='\0')
- {
- src->where_in_src=src->src_size;
- return '\0';
- }
- else if(hold_char=='/')
- {
- goto superhack;
- }
- }else
- {
- hold_char=src_getc(src,1,0,1);
- }
- }
- src->where_in_src=src->src_size;
- return '\0';
-
- }
- }else
- {
- ++src->which_column;
- }
- ++src->token_size;
- if(src->src[src->where_in_src]!='#' || src->is_in_the_begining_of_line!=1)
- src->is_in_the_begining_of_line=0;
- if(src->src[src->where_in_src]=='\n')
- {
- return '\n';
- }
-
- if(src->src[src->where_in_src]=='\0')
- return src->src[src->where_in_src];
- else
- return src->src[src->where_in_src++];
- }
- }
- void src_ungetc(struct Source_File *src)
- {
- --src->where_in_src;
- if(src->src[src->where_in_src]=='\n')
- {
- --src->which_row;
- src->which_column=0;
- }
- }
- struct token* copy_token(struct token *src)
- {
- struct token *cpy;
- cpy=wonky_malloc(sizeof(struct token));
- *cpy=*src;
- return cpy;
- }
- struct token* src_extract_token(struct Source_File *src,enum KEYWORDS kw)
- {
- struct token *ret;
- ret=wonky_malloc(sizeof(struct token));
- ret->type=kw;
-
- ret->data_size=src->best_token_size;
- ret->column=src->best_token_column;
- ret->line=src->best_token_line;
- ret->data=src->src+src->best_token_where_in_src_start;
- ret->filename=src->src_name->filename;
- handle_splicing(ret);
- src->where_in_src=src->best_token_where_in_src_end;
- src->is_in_the_begining_of_line=src->best_token_beg_line;
- return ret;
- }
-
- void src_reset_token_data(struct Source_File *src,char use_src_as_base)
- {
- src->token_size=0;
- src->best_token_size=0;
- src->best_token_line=src->which_row;
- src->best_token_column=src->which_column;
- if(use_src_as_base)
- {
- src->best_token_where_in_src_end=src->where_in_src;
- }else
- {
- src->where_in_src=src->best_token_where_in_src_end;
- }
- src->best_token_where_in_src_start=src->where_in_src;
- }
- void src_assimilate_into_best_token(struct Source_File *src)
- {
- src->best_token_size=src->token_size;
- src->best_token_line=src->which_row;
- src->best_token_column=src->which_column;
- src->best_token_where_in_src_end=src->where_in_src;
- src->best_token_beg_line=src->is_in_the_begining_of_line;
- }
- void delete_source_file(struct Source_File *src)
- {
- delete_source_name(src->src_name);
- wonky_free(src->src);
- wonky_free(src);
- }
- void delete_source_name(struct Source_Name *name)
- {
- wonky_free(name->filename);
- wonky_free(name->base);
- wonky_free(name);
- }
- void flush_tokens(struct Queue *tokens)
- {
- while(tokens->size>0)
- wonky_free(Queue_Pop(tokens));
- }
#endif
F diff --git a/src/frontend/lex/lexer.h b/src/frontend/lex/lexer.h
--- a/src/frontend/lex/lexer.h
+++ b/src/frontend/lex/lexer.h
#include <wonky_malloc.h>
#include <automata.h>
-
- extern char *well_known_locations_base[];
- struct token
- {
- enum KEYWORDS type;
- size_t data_size;
- char *data;
- size_t line,column;
- /*:X*/
- const char *filename;
- };
-
- struct token_vector
- {
- struct token *tokens;
- size_t size;
- };
-
-
-
- struct Source_Name
- {
- char *filename;
- char *base;
- };
-
- struct Source_File
- {
- struct Source_Name *src_name;
-
- char *src;
- size_t src_size;
- size_t where_in_src;
- size_t which_column;
- size_t which_row;
-
- size_t token_size;
-
-
- size_t best_token_size;
- size_t best_token_line;
- size_t best_token_column;
-
- size_t best_token_where_in_src_start;
- size_t best_token_where_in_src_end;
- char best_token_beg_line;
-
-
- char is_in_the_begining_of_line;
-
- };
-
-
+ #include <token.h>
void lex(struct Source_File *src,struct Translation_Data *translation_data);
struct token* get_next_token(struct Source_File *src,struct Automata_Node *start_state,char skip_new_line);
- struct token* copy_token(struct token *src);
- struct token_vector Lex_Queue_Condense(struct Queue *tokens);
- void handle_splicing(struct token *word);
- void chase_new_line(struct Source_File *src,struct Translation_Data *translation_data);
- void goto_new_line(struct Source_File *src,struct Translation_Data *translation_data);
- void skip_white_space(struct Source_File *src,char skip_new_line);
- size_t skip_line_splice(struct Source_File *src);
-
-
- char check(struct Translation_Data *translation_data,enum KEYWORDS kw,size_t ahead);
- char get_and_check(struct Translation_Data *translation_data,enum KEYWORDS kw);
- char get_and_check_unsafe(struct Translation_Data *translation_data,enum KEYWORDS kw);
- void chomp(struct Translation_Data *translation_data);
- enum KEYWORDS kw_get(struct Translation_Data *translation_data);
- char compare_tokens(struct token *a,struct token *b);
- char src_getc(struct Source_File *src,char skip_line_splice,char skip_comments,char skip_new_line);
- void src_ungetc(struct Source_File *src);
- struct token* src_extract_token(struct Source_File *src,enum KEYWORDS kw);
- void src_reset_token_data(struct Source_File *src,char use_src_as_base);
- void src_assimilate_into_best_token(struct Source_File *src);
- void delete_source_file(struct Source_File *src);
- void delete_source_name(struct Source_Name *name);
- void flush_tokens(struct Queue *tokens);
#endif
F diff --git a/src/semantics/program/program.c b/src/semantics/program/program.c
--- a/src/semantics/program/program.c
+++ b/src/semantics/program/program.c
return ret;
}
- struct Source_File* extract_source_file(FILE *in,struct Source_Name *name)
- {
- long file_size;
- struct Source_File *src;
-
-
- if(fseek(in,0,SEEK_END)==-1)
- return NULL;
- if((file_size=ftell(in))==-1)
- return NULL;
- if(fseek(in,0,SEEK_SET)==-1)
- return NULL;
-
- src=wonky_malloc(sizeof(struct Source_File));
-
- src->src_name=name;
-
- src->src=wonky_malloc(file_size+1);
- src->src_size=file_size;
-
- src->where_in_src=0;
-
- src->which_column=0;
- src->which_row=0;
- src->is_in_the_begining_of_line=1;
- src->src[file_size]='\0';
-
- fread(src->src,1,file_size,in);
- fclose(in);
- return src;
- }
struct Translation_Data* get_translation_data(struct Map *types,struct Linkage *internal_linkage,struct Linkage *external_linkage)
{
struct Translation_Data *ret;
normalise_source_name(ret);
return ret;
}
- /*where_to_search ends in a NULL pointer*/
- struct Source_File* get_source_file(char *filename,char **where_to_search)
- {
- FILE *in;
- char *temp_name;
- char is_directory=0;
- struct Source_Name *name;
- struct Source_File *file;
-
- wonky_assert(where_to_search!=NULL);
- wonky_assert(*where_to_search!=NULL);
- do
- {
- temp_name=gstr_append(*where_to_search,filename);
- in=fopen(temp_name,"r");
- wonky_free(temp_name);
- if(in==NULL)
- continue;
-
- name=get_source_name(filename,*where_to_search);
- file=extract_source_file(in,name);
- if(file!=NULL)
- {
- return file;
- }else
- {
- delete_source_name(name);
- }
- }while(*(++where_to_search));
- return NULL;
- }
-
- /*this might cause compatability issues TODO*/
- void normalise_source_name(struct Source_Name *name)
- {
- size_t offset;
- size_t i;
- size_t last_slash;
- char *hold_base;
-
- for(last_slash=offset=0;name->filename[offset];++offset)
- {
- if(name->filename[offset]=='/')
- {
- last_slash=offset;
- }
- }
-
- if(last_slash==0)
- return;
-
- if(name->base==NULL)
- {
- offset=0;
- name->base=wonky_malloc(last_slash+1);
- name->base[last_slash]='\0';
- name->base[last_slash-1]='/';
-
- }else
- {
- offset=gstrlen((char*)name->base);
- hold_base=wonky_malloc(offset+last_slash+2);
- strmv(hold_base,(char*)name->base);
-
- hold_base[last_slash+offset]='/';
- hold_base[last_slash+offset+1]='\0';
- wonky_free((void*)name->base);
-
- name->base=hold_base;
- }
-
- for(i=0;i<last_slash;++i)
- name->base[offset+i]=name->filename[i];
-
-
- ++i;
- /*prune the filename*/
- offset=gstrlen(name->filename+i);
- hold_base=wonky_malloc(offset+1);
- strmv(hold_base,name->filename+i);
- wonky_free(name->filename);
- name->filename=hold_base;
- }
-
struct Program* parse_program(char **base_source_names)
{
struct Source_File *base_file;
Queue_Push(where_to_push,tree);
}
}
+ char check(struct Translation_Data *translation_data,enum KEYWORDS kw,size_t ahead)
+ {
+ size_t i;
+ struct Queue_Node *current;
+ if(translation_data->tokens->size<=ahead)
+ {
+ return 0;
+ }else
+ {
+ for(i=0,current=translation_data->tokens->first;i<ahead;++i,current=current->prev);
+
+ if( ((struct token*)(current->data))->type == kw )
+ {
+ return 1;
+ }else
+ {
+ return 0;
+ }
+ }
+ }
+ char get_and_check(struct Translation_Data *translation_data,enum KEYWORDS kw)
+ {
+ struct token *hold_token;
+ if(translation_data->tokens->size==0)
+ {
+ return 0;
+ }else
+ {
+ hold_token=translation_data->tokens->first->data;
+ if(hold_token->type!=kw)
+ {
+ return 0;
+ }else
+ {
+ hold_token=Queue_Pop(translation_data->tokens);
+ wonky_free(hold_token);
+ return 1;
+ }
+ }
+ }
+ char get_and_check_unsafe(struct Translation_Data *translation_data,enum KEYWORDS kw)
+ {
+ struct token *hold_token;
+ hold_token=translation_data->tokens->first->data;
+ if(hold_token->type!=kw)
+ {
+ return 0;
+ }else
+ {
+ hold_token=Queue_Pop(translation_data->tokens);
+ wonky_free(hold_token);
+ return 1;
+ }
+ }
+ void chomp(struct Translation_Data *translation_data)
+ {
+ wonky_free(Queue_Pop(translation_data->tokens));
+ }
+
+ enum KEYWORDS kw_get(struct Translation_Data *translation_data)
+ {
+ if(translation_data->tokens->size==0)
+ return KW_NOTYPE;
+ return ((struct token*)(translation_data->tokens->first->data))->type;
+
+ }
+ void flush_tokens(struct Queue *tokens)
+ {
+ while(tokens->size>0)
+ wonky_free(Queue_Pop(tokens));
+ }
#endif
F diff --git a/src/semantics/program/program.h b/src/semantics/program/program.h
--- a/src/semantics/program/program.h
+++ b/src/semantics/program/program.h
};
struct Program* get_program();
- struct Source_Name* get_source_name(char *filename,char *base);
-
- struct Source_File* extract_source_file(FILE *in,struct Source_Name *name);
- struct Source_File* get_source_file(char *filename,char **where_to_search);
- void normalise_source_name(struct Source_Name *name);
-
struct Translation_Data* get_translation_data(struct Map *types,struct Linkage *internal_linkage,struct Linkage *external_linkage);
-
-
struct Program* parse_program(char **base_source_names);
+
void lex_program(struct Translation_Data *hold,struct Source_File *file);
void entype_program(struct Program *program);
-
_Bool has_new_errors(struct Translation_Data *translation_data);
_Bool has_no_tokens(struct Translation_Data *translation_data);
void push_if_tree_is_undefined_function(struct AST *tree,struct Queue *where_to_push);
void push_if_tree_is_uninitialised_object(struct AST *tree,struct Queue *where_to_push);
+ char check(struct Translation_Data *translation_data,enum KEYWORDS kw,size_t ahead);
+ char get_and_check(struct Translation_Data *translation_data,enum KEYWORDS kw);
+ char get_and_check_unsafe(struct Translation_Data *translation_data,enum KEYWORDS kw);
+ void chomp(struct Translation_Data *translation_data);
+ enum KEYWORDS kw_get(struct Translation_Data *translation_data);
+
+
+ void flush_tokens(struct Queue *tokens);
#endif
F diff --git a/src/semantics/program/source_file.c b/src/semantics/program/source_file.c
new file mode 100644
--- /dev/null
+++ b/src/semantics/program/source_file.c
+ #ifndef WONKY_SOURCE_FILE_C
+ #define WONKY_SOURCE_FILE_C WONKY_SOURCE_FILE_C
+ #include <source_file.h>
+
+ char *well_known_locations_base[]={"","/usr/include/","/usr/include/x86_64-linux-gnu/",NULL};
+ struct Source_File* extract_source_file(FILE *in,struct Source_Name *name)
+ {
+ long file_size;
+ struct Source_File *src;
+
+
+ if(fseek(in,0,SEEK_END)==-1)
+ return NULL;
+ if((file_size=ftell(in))==-1)
+ return NULL;
+ if(fseek(in,0,SEEK_SET)==-1)
+ return NULL;
+
+ src=wonky_malloc(sizeof(struct Source_File));
+
+ src->src_name=name;
+
+ src->src=wonky_malloc(file_size+1);
+ src->src_size=file_size;
+
+ src->where_in_src=0;
+
+ src->which_column=0;
+ src->which_row=0;
+ src->is_in_the_begining_of_line=1;
+
+ src->src[file_size]='\0';
+
+ fread(src->src,1,file_size,in);
+ fclose(in);
+ return src;
+ }
+ /*this might cause compatability issues TODO*/
+ void normalise_source_name(struct Source_Name *name)
+ {
+ size_t offset;
+ size_t i;
+ size_t last_slash;
+ char *hold_base;
+
+ for(last_slash=offset=0;name->filename[offset];++offset)
+ {
+ if(name->filename[offset]=='/')
+ {
+ last_slash=offset;
+ }
+ }
+
+ if(last_slash==0)
+ return;
+
+ if(name->base==NULL)
+ {
+ offset=0;
+ name->base=wonky_malloc(last_slash+1);
+ name->base[last_slash]='\0';
+ name->base[last_slash-1]='/';
+
+ }else
+ {
+ offset=gstrlen((char*)name->base);
+ hold_base=wonky_malloc(offset+last_slash+2);
+ strmv(hold_base,(char*)name->base);
+
+ hold_base[last_slash+offset]='/';
+ hold_base[last_slash+offset+1]='\0';
+ wonky_free((void*)name->base);
+
+ name->base=hold_base;
+ }
+
+ for(i=0;i<last_slash;++i)
+ name->base[offset+i]=name->filename[i];
+
+
+ ++i;
+ /*prune the filename*/
+ offset=gstrlen(name->filename+i);
+ hold_base=wonky_malloc(offset+1);
+ strmv(hold_base,name->filename+i);
+ wonky_free(name->filename);
+ name->filename=hold_base;
+
+
+ }
+ /*here be dragons*/
+ char src_getc(struct Source_File *src,char skip_line_splice,char skip_comments,char skip_new_line)
+ {
+ superhack:
+ if(src->src[src->where_in_src]=='\\' && skip_line_splice)
+ {
+ if(src->where_in_src < src->src_size-1 && src->src[src->where_in_src+1]=='\n')
+ {
+ src->where_in_src+=2;
+ ++src->which_row;
+ src->token_size+=2;
+ src->which_column=0;
+ goto superhack;
+ }else
+ {
+ ++src->token_size;
+ ++src->which_column;
+ ++src->where_in_src;
+ src->is_in_the_begining_of_line=0;
+ return '\\';
+ }
+ }else
+ {
+ if(src->src[src->where_in_src]=='\n' && skip_new_line)
+ {
+ ++src->which_row;
+ src->which_column=0;
+ src->is_in_the_begining_of_line=1;
+
+ ++src->where_in_src;
+ goto superhack;
+ }else if(src->src[src->where_in_src]=='/' && skip_comments)
+ {
+ if(src->src[src->where_in_src+1]=='*')
+ {
+ char hold_char;
+
+
+ src->where_in_src+=2;
+ hold_char=src_getc(src,1,0,1);
+ while(hold_char)
+ {
+ if(hold_char=='*')
+ {
+ hold_char=src_getc(src,1,0,1);
+ if(hold_char=='\0')
+ {
+ src->where_in_src=src->src_size;
+ return '\0';
+ }
+ else if(hold_char=='/')
+ {
+ goto superhack;
+ }
+ }else
+ {
+ hold_char=src_getc(src,1,0,1);
+ }
+ }
+ src->where_in_src=src->src_size;
+ return '\0';
+
+ }
+ }else
+ {
+ ++src->which_column;
+ }
+ ++src->token_size;
+ if(src->src[src->where_in_src]!='#' || src->is_in_the_begining_of_line!=1)
+ src->is_in_the_begining_of_line=0;
+ if(src->src[src->where_in_src]=='\n')
+ {
+ return '\n';
+ }
+
+ if(src->src[src->where_in_src]=='\0')
+ return src->src[src->where_in_src];
+ else
+ return src->src[src->where_in_src++];
+ }
+ }
+ void src_ungetc(struct Source_File *src)
+ {
+ --src->where_in_src;
+ if(src->src[src->where_in_src]=='\n')
+ {
+ --src->which_row;
+ src->which_column=0;
+ }
+ }
+ struct token* src_extract_token(struct Source_File *src,enum KEYWORDS kw)
+ {
+ struct token *ret;
+ ret=wonky_malloc(sizeof(struct token));
+ ret->type=kw;
+
+ ret->data_size=src->best_token_size;
+ ret->column=src->best_token_column;
+ ret->line=src->best_token_line;
+ ret->data=src->src+src->best_token_where_in_src_start;
+ ret->filename=src->src_name->filename;
+ handle_splicing(ret);
+ src->where_in_src=src->best_token_where_in_src_end;
+ src->is_in_the_begining_of_line=src->best_token_beg_line;
+ return ret;
+ }
+
+ void src_reset_token_data(struct Source_File *src,char use_src_as_base)
+ {
+ src->token_size=0;
+ src->best_token_size=0;
+ src->best_token_line=src->which_row;
+ src->best_token_column=src->which_column;
+ if(use_src_as_base)
+ {
+ src->best_token_where_in_src_end=src->where_in_src;
+ }else
+ {
+ src->where_in_src=src->best_token_where_in_src_end;
+ }
+ src->best_token_where_in_src_start=src->where_in_src;
+ }
+ void src_assimilate_into_best_token(struct Source_File *src)
+ {
+ src->best_token_size=src->token_size;
+ src->best_token_line=src->which_row;
+ src->best_token_column=src->which_column;
+ src->best_token_where_in_src_end=src->where_in_src;
+ src->best_token_beg_line=src->is_in_the_begining_of_line;
+ }
+ void delete_source_file(struct Source_File *src)
+ {
+ delete_source_name(src->src_name);
+ wonky_free(src->src);
+ wonky_free(src);
+ }
+ void delete_source_name(struct Source_Name *name)
+ {
+ wonky_free(name->filename);
+ wonky_free(name->base);
+ wonky_free(name);
+ }
+ void goto_new_line(struct Source_File *src,struct Translation_Data *translation_data)
+ {
+ char hold_char;
+ while( (hold_char=src_getc(src,1,1,0)) != '\0' && hold_char != '\n');
+ src->is_in_the_begining_of_line=1;
+ ++src->where_in_src;
+ ++src->which_row;
+ src->which_column=0;
+ }
+ void chase_new_line(struct Source_File *src,struct Translation_Data *translation_data)
+ {
+ char hold_char;
+ for(hold_char=src_getc(src,1,1,0);hold_char!='\n' && hold_char!='\0';
+ hold_char=src_getc(src,1,1,0));
+
+ ++src->where_in_src;
+ ++src->which_row;
+ src->is_in_the_begining_of_line=1;
+ src->which_column=0;
+
+
+ }
+ /*returns the number of bytes skipped*/
+ size_t skip_line_splice(struct Source_File *src)
+ {
+ size_t current_size=0;
+ while(src->where_in_src<src->src_size-1 && src->src[src->where_in_src]=='\\' && src->src[src->where_in_src+1]=='\n')
+ {
+ src->where_in_src+=2;
+ current_size+=2;
+ }
+ return current_size;
+ }
+
+ void skip_white_space(struct Source_File *src,char skip_new_line)
+ {
+ char hold_char;
+ while(hold_char=src_getc(src,1,1,skip_new_line))
+ {
+ if(hold_char=='\n' && !skip_new_line)
+ {
+ return ;
+ }
+ if(hold_char!=' ' && hold_char!='\t')
+ {
+ src_ungetc(src);
+ return ;
+ }
+ }
+ }
+ /*where_to_search ends in a NULL pointer*/
+ struct Source_File* get_source_file(char *filename,char **where_to_search)
+ {
+ FILE *in;
+ char *temp_name;
+ char is_directory=0;
+ struct Source_Name *name;
+ struct Source_File *file;
+
+ wonky_assert(where_to_search!=NULL);
+ wonky_assert(*where_to_search!=NULL);
+ do
+ {
+ temp_name=gstr_append(*where_to_search,filename);
+ in=fopen(temp_name,"r");
+ wonky_free(temp_name);
+ if(in==NULL)
+ continue;
+
+ name=get_source_name(filename,*where_to_search);
+ file=extract_source_file(in,name);
+ if(file!=NULL)
+ {
+ return file;
+ }else
+ {
+ delete_source_name(name);
+ }
+ }while(*(++where_to_search));
+ return NULL;
+ }
+ #endif
F diff --git a/src/semantics/program/source_file.h b/src/semantics/program/source_file.h
new file mode 100644
--- /dev/null
+++ b/src/semantics/program/source_file.h
+ #ifndef WONKY_SOURCE_FILE_H
+ #define WONKY_SOURCE_FILE_H WONKY_SOURCE_FILE_H
+ #include <source_file.hh>
+ extern char *well_known_locations_base[];
+ struct Source_Name
+ {
+ char *filename;
+ char *base;
+ };
+ struct Source_File
+ {
+ struct Source_Name *src_name;
+
+ char *src;
+ size_t src_size;
+ size_t where_in_src;
+ size_t which_column;
+ size_t which_row;
+
+ size_t token_size;
+
+
+ size_t best_token_size;
+ size_t best_token_line;
+ size_t best_token_column;
+
+ size_t best_token_where_in_src_start;
+ size_t best_token_where_in_src_end;
+ char best_token_beg_line;
+
+
+ char is_in_the_begining_of_line;
+
+ };
+
+ struct Source_File* extract_source_file(FILE *in,struct Source_Name *name);
+ struct Source_File* get_source_file(char *filename,char **where_to_search);
+ struct Source_Name* get_source_name(char *filename,char *base);
+ void normalise_source_name(struct Source_Name *name);
+ char src_getc(struct Source_File *src,char skip_line_splice,char skip_comments,char skip_new_line);
+ void src_ungetc(struct Source_File *src);
+ struct token* src_extract_token(struct Source_File *src,enum KEYWORDS kw);
+ void src_reset_token_data(struct Source_File *src,char use_src_as_base);
+ void src_assimilate_into_best_token(struct Source_File *src);
+ void chase_new_line(struct Source_File *src,struct Translation_Data *translation_data);
+ void goto_new_line(struct Source_File *src,struct Translation_Data *translation_data);
+ void skip_white_space(struct Source_File *src,char skip_new_line);
+ size_t skip_line_splice(struct Source_File *src);
+ void delete_source_file(struct Source_File *src);
+ void delete_source_name(struct Source_Name *name);
+ #endif
F diff --git a/src/semantics/program/source_file.hh b/src/semantics/program/source_file.hh
new file mode 100644
--- /dev/null
+++ b/src/semantics/program/source_file.hh
+ #ifndef WONKY_SOURCE_FILE_HH
+ #define WONKY_SOURCE_FILE_HH WONKY_SOURCE_FILE_HH
+
+
+ #endif
F diff --git a/src/semantics/program/token.c b/src/semantics/program/token.c
new file mode 100644
--- /dev/null
+++ b/src/semantics/program/token.c
+ #ifndef WONKY_TOKEN_C
+ #define WONKY_TOKEN_C WONKY_TOKEN_C
+ #include <token.h>
+
+ struct token* copy_token(struct token *src)
+ {
+ struct token *cpy;
+ cpy=wonky_malloc(sizeof(struct token));
+ *cpy=*src;
+ return cpy;
+ }
+
+ char compare_tokens(struct token *a,struct token *b)
+ {
+ size_t i;
+ if(a->data_size!=b->data_size)
+ return 0;
+ for(i=0;i<a->data_size;++i)
+ {
+ if(a->data[i]!=b->data[i])
+ return 0;
+ }
+ return 1;
+ }
+
+ struct identifier* get_identifier(char *data,size_t size)
+ {
+ struct identifier *ret;
+ ret=wonky_malloc(sizeof(struct identifier));
+ ret->size=size;
+ ret->data=data;
+ ret->last_defined_macro_with_this_id=NULL;
+ ret->last_use_as_a_macro_argument=NULL;
+
+ return ret;
+ }
+ struct token_identifier* get_id_token(struct identifier *id,struct Location *location)
+ {
+ struct token_identifier *ret;
+ ret=wonky_malloc(sizeof(struct token_identifier));
+ ret->type=KW_ID;
+ ret->location=location;
+ ret->id=id;
+
+
+ return ret;
+ }
+ struct token_keyword* get_keyword_token(enum KEYWORDS type,struct Location *location)
+ {
+ struct token_keyword *ret;
+ ret=wonky_malloc(sizeof(struct token_keyword));
+ ret->type=type;
+ ret->location=location;
+
+ return ret;
+ }
+ struct token_punctuator* get_punctuator_token(enum KEYWORDS type,struct Location *location)
+ {
+ struct token_punctuator *ret;
+ ret=wonky_malloc(sizeof(struct token_punctuator));
+ ret->type=type;
+ ret->location=location;
+ ret->punctuator_type=PUNCTUATOR_NORMAL;/*TODO*/
+
+ return ret;
+ }
+ struct token_constant* get_constant_token(enum KEYWORDS bare_type,struct Location *location,char *data,size_t size)
+ {
+ struct token_constant *ret;
+ ret=wonky_malloc(sizeof(struct token_constant));
+ ret->location=location;
+ ret->type=bare_type;/*TODO*/
+ ret->encoding=CONSTANT_TOKEN_DECIMAL;
+ ret->specifier=CONSTANT_TOKEN_SPECIFIER_NONE;
+ ret->signedness=CONSTANT_TOKEN_SIGNED;
+ ret->value=0;
+
+ return ret;
+ }
+ struct token_string* get_string_token(enum KEYWORDS bare_type,struct Location *location,char *data,size_t size)
+ {
+ struct token_string *ret;
+ ret=wonky_malloc(sizeof(struct token_string));
+ ret->type=bare_type;
+ ret->location=location;
+ ret->size=size;
+ ret->data=data;
+
+
+ return ret;
+ }
+ struct token_include_directive* get_include_directive_token(struct Location *location,struct Queue *tokens)
+ {
+ struct token_include_directive *ret;
+ ret=wonky_malloc(sizeof(struct token_string));
+ ret->location=location;
+ ret->tokens=tokens;
+ return ret;
+ }
+ struct token_if_directive* get_if_directive_token(struct Location *location,struct Queue *controlling_tokens,struct Queue_Node *if_true,struct Queue_Node *if_false,struct Queue_Node *end_of_if_directive)
+ {
+ struct token_if_directive *ret;
+ ret=wonky_malloc(sizeof(struct token_if_directive));
+ ret->
+
+ }
+
+ struct token_ifdef_directive* get_ifdef_directive_token(struct Location *location,struct identifier *id,struct Queue_Node *if_true,struct Queue_Node *if_false,struct Queue_Node *end_of_if_directive);
+
+ struct token_ifdef_directive* get_ifdef_directive_token(struct Location *location,struct identifier *id,struct Queue_Node *if_true,struct Queue_Node *if_false,struct Queue_Node *end_of_if_directive);
+
+ struct token_normal_define_directive* get_normal_define_directive_token(struct Location *location,struct identifier *id,struct Queue *replacement_tokens);
+
+ struct token_functionlike_define_directive* get_functionlike_define_directive_token(struct Location *location,struct identifier *id,struct Queue *argument_id_list_tokens,struct Queue *replacement_tokens);
+
+ struct token_undef_directive* get_undef_directive_token(struct Location *location,struct identifier *id);
+ struct token_line_directive* get_line_directive_token(struct Location *location,struct Location *new_location);
+ struct token_error_directive* get_error_directive_token(struct Location *location,struct token_string *error_message);
+ struct token_pragma_directive* get_pragma_directive(struct Location *location,enum Pragma_Type type);
+ struct token_defined_unary_operator* get_defined_unary_operator(struct Location *location,struct identifier *id);
+ #endif
F diff --git a/src/semantics/program/token.h b/src/semantics/program/token.h
new file mode 100644
--- /dev/null
+++ b/src/semantics/program/token.h
+ #ifndef WONKY_TOKEN_H
+ #define WONKY_TOKEN_H WONKY_TOKEN_H
+ #include <token.hh>
+
+ #include <automata.h>
+ #include <constant.h>
+
+ struct token
+ {
+ enum KEYWORDS type;
+ };
+ struct identifier /*there is only one of these per id*/
+ {
+ size_t size;
+ char *data;
+ struct token *last_defined_macro_with_this_id;
+ struct functionlike_define_directive_argument *last_use_as_a_macro_argument;
+ };
+ struct token_identifier
+ {
+ enum KEYWORDS type;
+ struct Location *location;
+ struct identifier *id;
+ };
+ struct token_keyword
+ {
+ enum KEYWORDS type;
+ struct Location *location;
+ };
+ struct token_punctuator
+ {
+ enum KEYWORDS type;
+ struct Location *location;
+ enum Punctuator_Token_Type punctuator_type;
+ };
+ struct token_constant
+ {
+ enum KEYWORDS type;
+ struct Location *location;
+ struct Constant *constant;
+ };
+ struct token_string
+ {
+ enum KEYWORDS type;
+ struct Location *location;
+ enum String_Token_Type string_type;
+ size_t size;
+ char *data;
+ };
+ struct token_include_directive
+ {
+ enum KEYWORDS type;
+ struct Location *location;
+ struct Queue *tokens;
+ };
+ struct token_if_directive
+ {
+ enum KEYWORDS type;
+ struct Location *location;
+ struct Queue *controlling_expression;
+ struct Queue_Node *if_true;
+ struct Queue_Node *if_false;
+ struct Queue_Node *end_of_if_directive;
+ };
+ struct token_ifdef_directive
+ {
+ enum KEYWORDS type;
+ struct Location *location;
+ struct token_identifier *id;
+ struct Queue_Node *if_defined;
+ struct Queue_Node *if_undefined;
+ struct Queue_Node *end_of_ifdef_directive;
+ };
+ struct token_ifndef_directive
+ {
+ enum KEYWORDS type;
+ struct Location *location;
+ struct token_identifier *id;
+ struct Queue_Node *if_undefined;
+ struct Queue_Node *if_defined;
+ struct Queue_Node *end_of_ifndef_directive;
+
+ };
+ struct token_normal_define_directive
+ {
+ enum KEYWORDS type;
+ struct Location *location;
+ struct token_identifier *id;
+ struct Queue *replacement_tokens;
+ struct Translation_Unit *the_last_place_this_macro_was_defined;
+ };
+ struct functionlike_define_directive_argument
+ {
+ struct token_functionlike_define_directive *belongs_to;
+ struct token *first_in_argument_substitution_tokens;
+ size_t number_of_substitution_tokens;
+ };
+ struct token_functionlike_define_directive
+ {
+ enum KEYWORDS type;
+ struct Location *location;
+ struct identifier *id;
+ struct Queue *arguments;
+ struct Queue *replacement_tokens;
+ struct Translation_Unit *the_last_place_this_macro_was_defined;
+ };
+ struct token_undef_directive
+ {
+ enum KEYWORDS type;
+ struct Location *location;
+ struct identifier *id;
+ };
+ struct token_line_directive
+ {
+ enum KEYWORDS type;
+ struct Location *real_location;
+ struct Location *new_location;
+ };
+ struct token_error_directive
+ {
+ enum KEYWORDS type;
+ struct Location *location;
+ struct token_string *error_message;
+ };
+ struct token_pragma_directive
+ {
+ enum KEYWORDS type;
+ struct Location *location;
+ enum Pragma_Type pragma_type;
+ };
+ struct token_defined_unary_operator
+ {
+ enum KEYWORDS type;
+ struct Location *location;
+ struct identifier *id;
+ };
+
+ /*
+ * OLD TOKEN STRUCT
+ struct token
+ {
+ enum KEYWORDS type;
+ size_t data_size;
+ char *data;
+ size_t line,column;
+ const char *filename;
+ };
+ */
+ struct token* copy_token(struct token *src);
+ void handle_splicing(struct token *word);
+ char compare_tokens(struct token *a,struct token *b);
+
+
+ struct identifier* get_identifier(char *data,size_t size);
+ struct token_identifier* get_id_token(struct identifier *id,struct Location *location);
+ struct token_keyword* get_keyword_token(enum KEYWORDS type,struct Location *location);
+ struct token_punctuator* get_punctuator_token(enum KEYWORDS type,struct Location *location);
+ struct token_constant* get_constant_token(enum KEYWORDS bare_type,struct Location *location,char *data,size_t size);
+ struct token_string* get_string_token(enum KEYWORDS bare_type,struct Location *location,char *data,size_t size);
+ struct token_include_directive* get_include_directive_token(struct Location *location,struct Queue *tokens);
+ struct token_if_directive* get_if_directive_token(struct Location *location,struct Queue *controlling_tokens,struct Queue_Node *if_true,struct Queue_Node *if_false,struct Queue_Node *end_of_if_directive);
+
+ struct token_ifdef_directive* get_ifdef_directive_token(struct Location *location,struct identifier *id,struct Queue_Node *if_true,struct Queue_Node *if_false,struct Queue_Node *end_of_if_directive);
+
+ struct token_ifdef_directive* get_ifdef_directive_token(struct Location *location,struct identifier *id,struct Queue_Node *if_true,struct Queue_Node *if_false,struct Queue_Node *end_of_if_directive);
+
+ struct token_normal_define_directive* get_normal_define_directive_token(struct Location *location,struct identifier *id,struct Queue *replacement_tokens);
+
+ struct token_functionlike_define_directive* get_functionlike_define_directive_token(struct Location *location,struct identifier *id,struct Queue *argument_id_list_tokens,struct Queue *replacement_tokens);
+
+ struct token_undef_directive* get_undef_directive_token(struct Location *location,struct identifier *id);
+ struct token_line_directive* get_line_directive_token(struct Location *location,struct Location *new_location);
+ struct token_error_directive* get_error_directive_token(struct Location *location,struct token_string *error_message);
+ struct token_pragma_directive* get_pragma_directive(struct Location *location,enum Pragma_Type type);
+ struct token_defined_unary_operator* get_defined_unary_operator(struct Location *location,struct identifier *id);
+
+ #endif
F diff --git a/src/semantics/program/token.hh b/src/semantics/program/token.hh
new file mode 100644
--- /dev/null
+++ b/src/semantics/program/token.hh
+ #ifndef WONKY_TOKEN_HH
+ #define WONKY_TOKEN_HH WONKY_TOKEN_HH
+
+ struct token;
+ struct identifier;
+ struct token_identifier;
+ struct token_keyword;
+ struct token_punctuator;
+ struct token_constant;
+ struct token_string;
+ struct token_include_directive;
+ struct token_if_directive;
+ struct token_ifdef_directive;
+ struct token_ifndef_directive;
+ struct token_normal_define_directive;
+ struct functionlike_define_directive_argument;
+ struct token_functionlike_define_directive;
+ struct token_undef_directive;
+ struct token_line_directive;
+ struct token_error_directive;
+ struct token_pragma_directive;
+ struct token_defined_unary_operator;
+
+ enum Punctuator_Token_Type
+ {
+ PUNCTUATOR_NORMAL,
+ PUNCTUATOR_DIGRAPH,
+ PUNCTUATOR_TRIGRAPH,
+ PUNCTUATOR_TYPE_END
+ };
+ enum Constant_Token_Encoding
+ {
+ CONSTANT_TOKEN_HEXADECIMAL,
+ CONSTANT_TOKEN_DECIMAL,
+ CONSTANT_TOKEN_OCTAL,
+ CONSTANT_TOKEN_TYPE_END
+ };
+ enum Constant_Token_Specifier
+ {
+ CONSTANT_TOKEN_SPECIFIER_NONE,
+ CONSTANT_TOKEN_SPECIFIER_LONG,
+ CONSTANT_TOKEN_SPECIFIER_LONG_LONG,
+ CONSTANT_TOKEN_SPECIFIER_END
+
+ };
+ enum Constant_Token_Signedness
+ {
+ CONSTANT_TOKEN_SIGNED,
+ CONSTANT_TOKEN_UNSIGNED,
+ CONSTANT_TOKEN_SIGNEDNESS_END
+ };
+ enum String_Token_Type
+ {
+ STRING_TOKEN_NORMAL,
+ STRING_TOKEN_WIDE,
+ STRING_TOKEN_TYPE_END
+ };
+ enum Pragma_Type
+ {
+ PRAGMA_TYPE_END
+ };
+
+ #endif
F diff --git a/src/semantics/program/translation_unit.c b/src/semantics/program/translation_unit.c
new file mode 100644
--- /dev/null
+++ b/src/semantics/program/translation_unit.c
+ #ifndef WONKY_TRANSLATION_UNIT_C
+ #define WONKY_TRANSLATION_UNIT_C WONKY_TRANSLATION_UNIT_C
+ #include <translation_unit.h>
+
+ struct Preprocessing_Translation_Unit* get_preprocessing_translation_unit(struct Source_File *source)
+ {
+ struct Preprocessing_Translation_Unit *ret;
+ ret=wonky_malloc(sizeof(struct Preprocessing_Translation_Unit));
+ ret->tokens=wonky_malloc(sizeof(struct Queue));
+ Queue_Init(ret->tokens);
+
+ return ret;
+ }
+ void delete_preprocessing_translation_unit(struct Preprocessing_Translation_Unit *unit)
+ {
+ return;
+ }
+
+ void push_token_into_preprocessing_translation_unit(struct Preprocessing_Translation_Unit *unit,struct token *token)
+ {
+ Queue_Push(unit->tokens,token);
+ }
+ struct token* get_token_under_pointer(struct Token_Pointer *token_pointer)
+ {
+
+ }
+ void goto_next_token(struct Token_Pointer *token_pointer);
+
+ struct Token_Pointer* get_token_ptr(struct Preprocessing_Translation_Unit *unit);
+
+ void token_ptr_execute_include_directive(struct Token_Pointer *ptr,struct token_include_directive *include_directive);
+ void token_ptr_execute_if_directive(struct Token_Pointer *ptr,struct token_if_directive *if_directive);
+ void token_ptr_execute_ifdef_directive(struct Token_Pointer *ptr,struct token_ifdef_directive *ifdef_directive);
+ void token_ptr_execute_ifndef_directive(struct Token_Pointer *ptr,struct token_ifndef_directive *ifndef_directive);
+ void token_ptr_execute_normal_define_directive(struct Token_Pointer *ptr,struct token_normal_define_directive *define_directive);
+ void token_ptr_execute_functionlike_define_directive(struct Token_Pointer *ptr,struct token_functionlike_define_directive *define_directive);
+ void token_ptr_execute_undef_directive(struct Token_Pointer *ptr,struct token_undef_directive *undef_directive);
+ void token_ptr_execute_line_directive(struct Token_Pointer *ptr,struct token_line_directive *line_directive);
+ void token_ptr_execute_error_directive(struct Token_Pointer *ptr,struct token_error_directive *error_directive);
+ void token_ptr_execute_pragma_directive(struct Token_Pointer *ptr,struct token_pragma_directive *pragma_directive);
+ void token_ptr_execute_defined_unary_operator(struct Token_Pointer *ptr,struct token_defined_unary_operator *operator);
+
+ #endif
F diff --git a/src/semantics/program/translation_unit.h b/src/semantics/program/translation_unit.h
new file mode 100644
--- /dev/null
+++ b/src/semantics/program/translation_unit.h
+ #ifndef WONKY_TRANSLATION_UNIT_H
+ #define WONKY_TRANSLATION_UNIT_H WONKY_TRANSLATION_UNIT_H
+ #include <translation_unit.hh>
+
+ #include <queue.h>
+ #include <source_file.h>
+ #include <location.h>
+ #include <wonky_malloc.h>
+ #include <queue.h>
+
+ struct Preprocessing_Translation_Unit
+ {
+ struct Queue *tokens;
+ };
+ struct Token_Pointer
+ {
+ struct Queue_Node *current_token_node;
+ struct Stack *call_stack;
+ struct Location *current_location;
+ struct Translation_Unit *translation_unit;
+ };
+ struct Preprocessing_Translation_Unit* get_preprocessing_translation_unit(struct Source_File *source);
+ void delete_preprocessing_translation_unit(struct Preprocessing_Translation_Unit *unit);
+
+ void push_token_into_preprocessing_translation_unit(struct Preprocessing_Translation_Unit *unit,struct token *token);
+ struct token* get_token_under_pointer(struct Token_Pointer *token_pointer);
+ void goto_next_token(struct Token_Pointer *token_pointer);
+
+ struct Token_Pointer* get_token_ptr(struct Preprocessing_Translation_Unit *unit);
+
+ void token_ptr_execute_include_directive(struct Token_Pointer *ptr,struct token_include_directive *include_directive);
+ void token_ptr_execute_if_directive(struct Token_Pointer *ptr,struct token_if_directive *if_directive);
+ void token_ptr_execute_ifdef_directive(struct Token_Pointer *ptr,struct token_ifdef_directive *ifdef_directive);
+ void token_ptr_execute_ifndef_directive(struct Token_Pointer *ptr,struct token_ifndef_directive *ifndef_directive);
+ void token_ptr_execute_normal_define_directive(struct Token_Pointer *ptr,struct token_normal_define_directive *define_directive);
+ void token_ptr_execute_functionlike_define_directive(struct Token_Pointer *ptr,struct token_functionlike_define_directive *define_directive);
+ void token_ptr_execute_undef_directive(struct Token_Pointer *ptr,struct token_undef_directive *undef_directive);
+ void token_ptr_execute_line_directive(struct Token_Pointer *ptr,struct token_line_directive *line_directive);
+ void token_ptr_execute_error_directive(struct Token_Pointer *ptr,struct token_error_directive *error_directive);
+ void token_ptr_execute_pragma_directive(struct Token_Pointer *ptr,struct token_pragma_directive *pragma_directive);
+ void token_ptr_execute_defined_unary_operator(struct Token_Pointer *ptr,struct token_defined_unary_operator *operator);
+
+ #endif
F diff --git a/src/semantics/program/translation_unit.hh b/src/semantics/program/translation_unit.hh
new file mode 100644
--- /dev/null
+++ b/src/semantics/program/translation_unit.hh
+ #ifndef WONKY_TRANSLATION_UNIT_HH
+ #define WONKY_TRANSLATION_UNIT_HH WONKY_TRANSLATION_UNIT_HH
+
+
+ #endif