glsl/pp: Do processing inline with tokenisation.

This commit is contained in:
Michal Krol 2009-12-20 13:50:16 +01:00
parent 7631dca25b
commit d696cb279d
13 changed files with 525 additions and 270 deletions

View file

@ -32,7 +32,8 @@
struct sl_pp_context *
sl_pp_context_create(void)
sl_pp_context_create(const char *input,
const struct sl_pp_purify_options *options)
{
struct sl_pp_context *context;
@ -46,12 +47,18 @@ sl_pp_context_create(void)
return NULL;
}
context->getc_buf = malloc(64 * sizeof(char));
context->getc_buf_capacity = 64;
context->getc_buf = malloc(context->getc_buf_capacity * sizeof(char));
if (!context->getc_buf) {
sl_pp_context_destroy(context);
return NULL;
}
if (sl_pp_token_buffer_init(&context->tokens, context)) {
sl_pp_context_destroy(context);
return NULL;
}
context->macro_tail = &context->macro;
context->if_ptr = SL_PP_MAX_IF_NESTING;
context->if_value = 1;
@ -60,6 +67,8 @@ sl_pp_context_create(void)
context->line = 1;
context->file = 0;
sl_pp_purify_state_init(&context->pure, input, options);
return context;
}
@ -70,6 +79,7 @@ sl_pp_context_destroy(struct sl_pp_context *context)
free(context->cstr_pool);
sl_pp_macro_free(context->macro);
free(context->getc_buf);
sl_pp_token_buffer_destroy(&context->tokens);
free(context);
}
}

View file

@ -31,6 +31,7 @@
#include "sl_pp_dict.h"
#include "sl_pp_macro.h"
#include "sl_pp_purify.h"
#include "sl_pp_token_util.h"
#define SL_PP_MAX_IF_NESTING 64
@ -81,6 +82,8 @@ struct sl_pp_context {
char *getc_buf;
unsigned int getc_buf_size;
unsigned int getc_buf_capacity;
struct sl_pp_token_buffer tokens;
};
#endif /* SL_PP_CONTEXT_H */

View file

@ -31,55 +31,50 @@
#include "sl_pp_process.h"
static void
skip_whitespace(const struct sl_pp_token_info *input,
unsigned int *pi)
{
while (input[*pi].token == SL_PP_WHITESPACE) {
(*pi)++;
}
}
static int
_parse_defined(struct sl_pp_context *context,
const struct sl_pp_token_info *input,
unsigned int *pi,
struct sl_pp_token_buffer *buffer,
struct sl_pp_process_state *state)
{
struct sl_pp_token_info input;
int parens = 0;
int macro_name;
struct sl_pp_macro *macro;
int defined = 0;
struct sl_pp_token_info result;
skip_whitespace(input, pi);
if (input[*pi].token == SL_PP_LPAREN) {
(*pi)++;
skip_whitespace(input, pi);
if (sl_pp_token_buffer_skip_white(buffer, &input)) {
return -1;
}
if (input.token == SL_PP_LPAREN) {
if (sl_pp_token_buffer_skip_white(buffer, &input)) {
return -1;
}
parens = 1;
}
if (input[*pi].token != SL_PP_IDENTIFIER) {
if (input.token != SL_PP_IDENTIFIER) {
strcpy(context->error_msg, "expected an identifier");
return -1;
}
macro_name = input[*pi].data.identifier;
macro_name = input.data.identifier;
for (macro = context->macro; macro; macro = macro->next) {
if (macro->name == macro_name) {
defined = 1;
break;
}
}
(*pi)++;
if (parens) {
skip_whitespace(input, pi);
if (input[*pi].token != SL_PP_RPAREN) {
if (sl_pp_token_buffer_skip_white(buffer, &input)) {
return -1;
}
if (input.token != SL_PP_RPAREN) {
strcpy(context->error_msg, "expected `)'");
return -1;
}
(*pi)++;
}
result.token = SL_PP_UINT;
@ -108,12 +103,10 @@ _evaluate_if_stack(struct sl_pp_context *context)
static int
_parse_if(struct sl_pp_context *context,
const struct sl_pp_token_info *input,
unsigned int first,
unsigned int last)
struct sl_pp_token_buffer *buffer)
{
unsigned int i;
struct sl_pp_process_state state;
int found_end = 0;
struct sl_pp_token_info eof;
int result;
@ -123,34 +116,40 @@ _parse_if(struct sl_pp_context *context,
}
memset(&state, 0, sizeof(state));
for (i = first; i < last;) {
switch (input[i].token) {
while (!found_end) {
struct sl_pp_token_info input;
sl_pp_token_buffer_get(buffer, &input);
switch (input.token) {
case SL_PP_WHITESPACE:
i++;
break;
case SL_PP_IDENTIFIER:
if (input[i].data.identifier == context->dict.defined) {
i++;
if (_parse_defined(context, input, &i, &state)) {
if (input.data.identifier == context->dict.defined) {
if (_parse_defined(context, buffer, &state)) {
free(state.out);
return -1;
}
} else {
if (sl_pp_macro_expand(context, input, &i, NULL, &state, sl_pp_macro_expand_unknown_to_0)) {
sl_pp_token_buffer_unget(buffer, &input);
if (sl_pp_macro_expand(context, buffer, NULL, &state, sl_pp_macro_expand_unknown_to_0)) {
free(state.out);
return -1;
}
}
break;
case SL_PP_NEWLINE:
case SL_PP_EOF:
found_end = 1;
break;
default:
if (sl_pp_process_out(&state, &input[i])) {
if (sl_pp_process_out(&state, &input)) {
strcpy(context->error_msg, "out of memory");
free(state.out);
return -1;
}
i++;
}
}
@ -198,11 +197,9 @@ _parse_else(struct sl_pp_context *context)
int
sl_pp_process_if(struct sl_pp_context *context,
const struct sl_pp_token_info *input,
unsigned int first,
unsigned int last)
struct sl_pp_token_buffer *buffer)
{
return _parse_if(context, input, first, last);
return _parse_if(context, buffer);
}
int
@ -301,9 +298,7 @@ sl_pp_process_ifndef(struct sl_pp_context *context,
int
sl_pp_process_elif(struct sl_pp_context *context,
const struct sl_pp_token_info *input,
unsigned int first,
unsigned int last)
struct sl_pp_token_buffer *buffer)
{
if (_parse_else(context)) {
return -1;
@ -311,7 +306,7 @@ sl_pp_process_elif(struct sl_pp_context *context,
if (context->if_stack[context->if_ptr] & 1) {
context->if_ptr++;
if (_parse_if(context, input, first, last)) {
if (_parse_if(context, buffer)) {
return -1;
}
}

View file

@ -33,39 +33,44 @@
int
sl_pp_process_line(struct sl_pp_context *context,
const struct sl_pp_token_info *input,
unsigned int first,
unsigned int last,
struct sl_pp_token_buffer *buffer,
struct sl_pp_process_state *pstate)
{
unsigned int i;
struct sl_pp_process_state state;
int found_end = 0;
int line_number = -1;
int file_number = -1;
unsigned int line;
unsigned int file;
memset(&state, 0, sizeof(state));
for (i = first; i < last;) {
switch (input[i].token) {
while (!found_end) {
struct sl_pp_token_info input;
sl_pp_token_buffer_get(buffer, &input);
switch (input.token) {
case SL_PP_WHITESPACE:
i++;
break;
case SL_PP_IDENTIFIER:
if (sl_pp_macro_expand(context, input, &i, NULL, &state, sl_pp_macro_expand_normal)) {
sl_pp_token_buffer_unget(buffer, &input);
if (sl_pp_macro_expand(context, buffer, NULL, &state, sl_pp_macro_expand_normal)) {
free(state.out);
return -1;
}
break;
case SL_PP_NEWLINE:
case SL_PP_EOF:
found_end = 1;
break;
default:
if (sl_pp_process_out(&state, &input[i])) {
if (sl_pp_process_out(&state, &input)) {
strcpy(context->error_msg, "out of memory");
free(state.out);
return -1;
}
i++;
}
}

View file

@ -88,15 +88,6 @@ sl_pp_macro_reset(struct sl_pp_macro *macro)
_macro_init(macro);
}
static void
skip_whitespace(const struct sl_pp_token_info *input,
unsigned int *pi)
{
while (input[*pi].token == SL_PP_WHITESPACE) {
(*pi)++;
}
}
static int
_out_number(struct sl_pp_context *context,
struct sl_pp_process_state *state,
@ -119,24 +110,28 @@ _out_number(struct sl_pp_context *context,
int
sl_pp_macro_expand(struct sl_pp_context *context,
const struct sl_pp_token_info *input,
unsigned int *pi,
struct sl_pp_token_buffer *tokens,
struct sl_pp_macro *local,
struct sl_pp_process_state *state,
enum sl_pp_macro_expand_behaviour behaviour)
{
int mute = (behaviour == sl_pp_macro_expand_mute);
struct sl_pp_token_info input;
int macro_name;
struct sl_pp_macro *macro = NULL;
struct sl_pp_macro *actual_arg = NULL;
unsigned int j;
if (input[*pi].token != SL_PP_IDENTIFIER) {
if (sl_pp_token_buffer_get(tokens, &input)) {
return -1;
}
if (input.token != SL_PP_IDENTIFIER) {
strcpy(context->error_msg, "expected an identifier");
return -1;
}
macro_name = input[*pi].data.identifier;
macro_name = input.data.identifier;
/* First look for predefined macros.
*/
@ -145,21 +140,18 @@ sl_pp_macro_expand(struct sl_pp_context *context,
if (!mute && _out_number(context, state, context->line)) {
return -1;
}
(*pi)++;
return 0;
}
if (macro_name == context->dict.___FILE__) {
if (!mute && _out_number(context, state, context->file)) {
return -1;
}
(*pi)++;
return 0;
}
if (macro_name == context->dict.___VERSION__) {
if (!mute && _out_number(context, state, 110)) {
return -1;
}
(*pi)++;
return 0;
}
@ -175,7 +167,6 @@ sl_pp_macro_expand(struct sl_pp_context *context,
return -1;
}
}
(*pi)++;
return 0;
}
}
@ -187,7 +178,6 @@ sl_pp_macro_expand(struct sl_pp_context *context,
if (!mute && _out_number(context, state, 1)) {
return -1;
}
(*pi)++;
return 0;
}
}
@ -215,25 +205,26 @@ sl_pp_macro_expand(struct sl_pp_context *context,
return -1;
}
} else if (!mute) {
if (sl_pp_process_out(state, &input[*pi])) {
if (sl_pp_process_out(state, &input)) {
strcpy(context->error_msg, "out of memory");
return -1;
}
}
(*pi)++;
return 0;
}
(*pi)++;
if (macro->num_args >= 0) {
skip_whitespace(input, pi);
if (input[*pi].token != SL_PP_LPAREN) {
if (sl_pp_token_buffer_skip_white(tokens, &input)) {
return -1;
}
if (input.token != SL_PP_LPAREN) {
strcpy(context->error_msg, "expected `('");
return -1;
}
(*pi)++;
skip_whitespace(input, pi);
if (sl_pp_token_buffer_skip_white(tokens, &input)) {
return -1;
}
sl_pp_token_buffer_unget(tokens, &input);
}
if (macro->num_args > 0) {
@ -242,103 +233,85 @@ sl_pp_macro_expand(struct sl_pp_context *context,
for (j = 0; j < (unsigned int)macro->num_args; j++) {
struct sl_pp_process_state arg_state;
unsigned int i;
int done = 0;
unsigned int paren_nesting = 0;
struct sl_pp_token_info eof;
memset(&arg_state, 0, sizeof(arg_state));
for (i = *pi; !done;) {
switch (input[i].token) {
while (!done) {
if (sl_pp_token_buffer_get(tokens, &input)) {
goto fail_arg;
}
switch (input.token) {
case SL_PP_WHITESPACE:
i++;
break;
case SL_PP_COMMA:
if (!paren_nesting) {
if (j < (unsigned int)macro->num_args - 1) {
done = 1;
i++;
} else {
strcpy(context->error_msg, "too many actual macro arguments");
return -1;
goto fail_arg;
}
} else {
if (sl_pp_process_out(&arg_state, &input[i])) {
if (sl_pp_process_out(&arg_state, &input)) {
strcpy(context->error_msg, "out of memory");
free(arg_state.out);
return -1;
goto fail_arg;
}
i++;
}
break;
case SL_PP_LPAREN:
paren_nesting++;
if (sl_pp_process_out(&arg_state, &input[i])) {
strcpy(context->error_msg, "out of memory");
free(arg_state.out);
return -1;
if (sl_pp_process_out(&arg_state, &input)) {
goto oom_arg;
}
i++;
break;
case SL_PP_RPAREN:
if (!paren_nesting) {
if (j == (unsigned int)macro->num_args - 1) {
done = 1;
i++;
} else {
strcpy(context->error_msg, "too few actual macro arguments");
return -1;
goto fail_arg;
}
} else {
paren_nesting--;
if (sl_pp_process_out(&arg_state, &input[i])) {
strcpy(context->error_msg, "out of memory");
free(arg_state.out);
return -1;
if (sl_pp_process_out(&arg_state, &input)) {
goto oom_arg;
}
i++;
}
break;
case SL_PP_IDENTIFIER:
if (sl_pp_macro_expand(context, input, &i, local, &arg_state, sl_pp_macro_expand_normal)) {
free(arg_state.out);
return -1;
sl_pp_token_buffer_unget(tokens, &input);
if (sl_pp_macro_expand(context, tokens, local, &arg_state, sl_pp_macro_expand_normal)) {
goto fail_arg;
}
break;
case SL_PP_EOF:
strcpy(context->error_msg, "too few actual macro arguments");
return -1;
goto fail_arg;
default:
if (sl_pp_process_out(&arg_state, &input[i])) {
strcpy(context->error_msg, "out of memory");
free(arg_state.out);
return -1;
if (sl_pp_process_out(&arg_state, &input)) {
goto oom_arg;
}
i++;
}
}
(*pi) = i;
eof.token = SL_PP_EOF;
if (sl_pp_process_out(&arg_state, &eof)) {
strcpy(context->error_msg, "out of memory");
free(arg_state.out);
return -1;
goto oom_arg;
}
*pmacro = sl_pp_macro_new();
if (!*pmacro) {
strcpy(context->error_msg, "out of memory");
free(arg_state.out);
return -1;
goto oom_arg;
}
(**pmacro).name = formal_arg->name;
@ -346,47 +319,95 @@ sl_pp_macro_expand(struct sl_pp_context *context,
formal_arg = formal_arg->next;
pmacro = &(**pmacro).next;
continue;
oom_arg:
strcpy(context->error_msg, "out of memory");
fail_arg:
free(arg_state.out);
goto fail;
}
}
/* Right paren for non-empty argument list has already been eaten. */
if (macro->num_args == 0) {
skip_whitespace(input, pi);
if (input[*pi].token != SL_PP_RPAREN) {
strcpy(context->error_msg, "expected `)'");
return -1;
if (sl_pp_token_buffer_skip_white(tokens, &input)) {
goto fail;
}
if (input.token != SL_PP_RPAREN) {
strcpy(context->error_msg, "expected `)'");
goto fail;
}
(*pi)++;
}
for (j = 0;;) {
switch (macro->body[j].token) {
case SL_PP_NEWLINE:
if (sl_pp_process_out(state, &macro->body[j])) {
strcpy(context->error_msg, "out of memory");
return -1;
}
j++;
break;
/* XXX: This is all wrong, we should be ungetting all tokens
* back to the main token buffer.
*/
{
struct sl_pp_token_buffer buffer;
case SL_PP_IDENTIFIER:
if (sl_pp_macro_expand(context, macro->body, &j, actual_arg, state, behaviour)) {
return -1;
}
break;
/* Seek to the end.
*/
for (j = 0; macro->body[j].token != SL_PP_EOF; j++) {
}
j++;
case SL_PP_EOF:
sl_pp_macro_free(actual_arg);
return 0;
/* Create a context-less token buffer since we are not going to underrun
* its internal buffer.
*/
if (sl_pp_token_buffer_init(&buffer, NULL)) {
strcpy(context->error_msg, "out of memory");
goto fail;
}
default:
if (!mute) {
if (sl_pp_process_out(state, &macro->body[j])) {
/* Unget the tokens in reverse order so later they will be fetched correctly.
*/
for (; j > 0; j--) {
sl_pp_token_buffer_unget(&buffer, &macro->body[j - 1]);
}
/* Expand.
*/
for (;;) {
struct sl_pp_token_info input;
sl_pp_token_buffer_get(&buffer, &input);
switch (input.token) {
case SL_PP_NEWLINE:
if (sl_pp_process_out(state, &input)) {
strcpy(context->error_msg, "out of memory");
return -1;
sl_pp_token_buffer_destroy(&buffer);
goto fail;
}
break;
case SL_PP_IDENTIFIER:
sl_pp_token_buffer_unget(&buffer, &input);
if (sl_pp_macro_expand(context, &buffer, actual_arg, state, behaviour)) {
sl_pp_token_buffer_destroy(&buffer);
goto fail;
}
break;
case SL_PP_EOF:
sl_pp_token_buffer_destroy(&buffer);
sl_pp_macro_free(actual_arg);
return 0;
default:
if (!mute) {
if (sl_pp_process_out(state, &input)) {
strcpy(context->error_msg, "out of memory");
sl_pp_token_buffer_destroy(&buffer);
goto fail;
}
}
}
j++;
}
}
fail:
sl_pp_macro_free(actual_arg);
return -1;
}

View file

@ -64,8 +64,7 @@ enum sl_pp_macro_expand_behaviour {
int
sl_pp_macro_expand(struct sl_pp_context *context,
const struct sl_pp_token_info *input,
unsigned int *pi,
struct sl_pp_token_buffer *tokens,
struct sl_pp_macro *local,
struct sl_pp_process_state *state,
enum sl_pp_macro_expand_behaviour behaviour);

View file

@ -31,15 +31,6 @@
#include "sl_pp_public.h"
static void
skip_whitespace(const struct sl_pp_token_info *input,
unsigned int *pi)
{
while (input[*pi].token == SL_PP_WHITESPACE) {
(*pi)++;
}
}
int
sl_pp_process_out(struct sl_pp_process_state *state,
const struct sl_pp_token_info *token)
@ -68,12 +59,10 @@ sl_pp_process_out(struct sl_pp_process_state *state,
int
sl_pp_process(struct sl_pp_context *context,
const struct sl_pp_token_info *input,
struct sl_pp_token_info **output)
{
unsigned int i = 0;
int found_eof = 0;
struct sl_pp_process_state state;
int found_eof = 0;
memset(&state, 0, sizeof(state));
@ -96,103 +85,113 @@ sl_pp_process(struct sl_pp_context *context,
}
while (!found_eof) {
skip_whitespace(input, &i);
if (input[i].token == SL_PP_HASH) {
i++;
skip_whitespace(input, &i);
switch (input[i].token) {
struct sl_pp_token_info input;
if (sl_pp_token_buffer_skip_white(&context->tokens, &input)) {
return -1;
}
if (input.token == SL_PP_HASH) {
if (sl_pp_token_buffer_skip_white(&context->tokens, &input)) {
return -1;
}
switch (input.token) {
case SL_PP_IDENTIFIER:
{
int name;
int found_eol = 0;
unsigned int first;
unsigned int last;
struct sl_pp_token_info endof;
struct sl_pp_token_peek peek;
int result;
/* Directive name. */
name = input[i].data.identifier;
i++;
skip_whitespace(input, &i);
name = input.data.identifier;
first = i;
if (sl_pp_token_buffer_skip_white(&context->tokens, &input)) {
return -1;
}
sl_pp_token_buffer_unget(&context->tokens, &input);
if (sl_pp_token_peek_init(&peek, &context->tokens)) {
return -1;
}
while (!found_eol) {
switch (input[i].token) {
if (sl_pp_token_peek_get(&peek, &input)) {
sl_pp_token_peek_destroy(&peek);
return -1;
}
switch (input.token) {
case SL_PP_NEWLINE:
/* Preserve newline just for the sake of line numbering. */
endof = input[i];
i++;
endof = input;
found_eol = 1;
break;
case SL_PP_EOF:
endof = input[i];
i++;
endof = input;
found_eof = 1;
found_eol = 1;
break;
default:
i++;
}
}
last = i - 1;
if (name == context->dict._if) {
if (sl_pp_process_if(context, input, first, last)) {
return -1;
struct sl_pp_token_buffer buffer;
result = sl_pp_token_peek_to_buffer(&peek, &buffer);
if (result == 0) {
result = sl_pp_process_if(context, &buffer);
sl_pp_token_buffer_destroy(&buffer);
}
} else if (name == context->dict.ifdef) {
if (sl_pp_process_ifdef(context, input, first, last)) {
return -1;
}
result = sl_pp_process_ifdef(context, peek.tokens, 0, peek.size - 1);
} else if (name == context->dict.ifndef) {
if (sl_pp_process_ifndef(context, input, first, last)) {
return -1;
}
result = sl_pp_process_ifndef(context, peek.tokens, 0, peek.size - 1);
} else if (name == context->dict.elif) {
if (sl_pp_process_elif(context, input, first, last)) {
return -1;
struct sl_pp_token_buffer buffer;
result = sl_pp_token_peek_to_buffer(&peek, &buffer);
if (result == 0) {
result = sl_pp_process_elif(context, &buffer);
sl_pp_token_buffer_destroy(&buffer);
}
} else if (name == context->dict._else) {
if (sl_pp_process_else(context, input, first, last)) {
return -1;
}
result = sl_pp_process_else(context, peek.tokens, 0, peek.size - 1);
} else if (name == context->dict.endif) {
if (sl_pp_process_endif(context, input, first, last)) {
return -1;
}
result = sl_pp_process_endif(context, peek.tokens, 0, peek.size - 1);
} else if (context->if_value) {
if (name == context->dict.define) {
if (sl_pp_process_define(context, input, first, last)) {
return -1;
}
result = sl_pp_process_define(context, peek.tokens, 0, peek.size - 1);
} else if (name == context->dict.error) {
sl_pp_process_error(context, input, first, last);
return -1;
sl_pp_process_error(context, peek.tokens, 0, peek.size - 1);
result = -1;
} else if (name == context->dict.extension) {
if (sl_pp_process_extension(context, input, first, last, &state)) {
return -1;
}
result = sl_pp_process_extension(context, peek.tokens, 0, peek.size - 1, &state);
} else if (name == context->dict.line) {
if (sl_pp_process_line(context, input, first, last, &state)) {
return -1;
struct sl_pp_token_buffer buffer;
result = sl_pp_token_peek_to_buffer(&peek, &buffer);
if (result == 0) {
result = sl_pp_process_line(context, &buffer, &state);
sl_pp_token_buffer_destroy(&buffer);
}
} else if (name == context->dict.pragma) {
if (sl_pp_process_pragma(context, input, first, last, &state)) {
return -1;
}
result = sl_pp_process_pragma(context, peek.tokens, 0, peek.size - 1, &state);
} else if (name == context->dict.undef) {
if (sl_pp_process_undef(context, input, first, last)) {
return -1;
}
result = sl_pp_process_undef(context, peek.tokens, 0, peek.size - 1);
} else {
strcpy(context->error_msg, "unrecognised directive name");
return -1;
result = -1;
}
}
sl_pp_token_peek_commit(&peek);
sl_pp_token_peek_destroy(&peek);
if (result) {
return result;
}
if (sl_pp_process_out(&state, &endof)) {
strcpy(context->error_msg, "out of memory");
return -1;
@ -203,21 +202,19 @@ sl_pp_process(struct sl_pp_context *context,
case SL_PP_NEWLINE:
/* Empty directive. */
if (sl_pp_process_out(&state, &input[i])) {
if (sl_pp_process_out(&state, &input)) {
strcpy(context->error_msg, "out of memory");
return -1;
}
context->line++;
i++;
break;
case SL_PP_EOF:
/* Empty directive. */
if (sl_pp_process_out(&state, &input[i])) {
if (sl_pp_process_out(&state, &input)) {
strcpy(context->error_msg, "out of memory");
return -1;
}
i++;
found_eof = 1;
break;
@ -228,36 +225,40 @@ sl_pp_process(struct sl_pp_context *context,
} else {
int found_eol = 0;
sl_pp_token_buffer_unget(&context->tokens, &input);
while (!found_eol) {
switch (input[i].token) {
if (sl_pp_token_buffer_get(&context->tokens, &input)) {
return -1;
}
switch (input.token) {
case SL_PP_WHITESPACE:
/* Drop whitespace all together at this point. */
i++;
break;
case SL_PP_NEWLINE:
/* Preserve newline just for the sake of line numbering. */
if (sl_pp_process_out(&state, &input[i])) {
if (sl_pp_process_out(&state, &input)) {
strcpy(context->error_msg, "out of memory");
return -1;
}
context->line++;
i++;
found_eol = 1;
break;
case SL_PP_EOF:
if (sl_pp_process_out(&state, &input[i])) {
if (sl_pp_process_out(&state, &input)) {
strcpy(context->error_msg, "out of memory");
return -1;
}
i++;
found_eof = 1;
found_eol = 1;
break;
case SL_PP_IDENTIFIER:
if (sl_pp_macro_expand(context, input, &i, NULL, &state,
sl_pp_token_buffer_unget(&context->tokens, &input);
if (sl_pp_macro_expand(context, &context->tokens, NULL, &state,
context->if_value ? sl_pp_macro_expand_normal : sl_pp_macro_expand_mute)) {
return -1;
}
@ -265,12 +266,11 @@ sl_pp_process(struct sl_pp_context *context,
default:
if (context->if_value) {
if (sl_pp_process_out(&state, &input[i])) {
if (sl_pp_process_out(&state, &input)) {
strcpy(context->error_msg, "out of memory");
return -1;
}
}
i++;
}
}
}

View file

@ -53,9 +53,7 @@ sl_pp_process_undef(struct sl_pp_context *context,
int
sl_pp_process_if(struct sl_pp_context *context,
const struct sl_pp_token_info *input,
unsigned int first,
unsigned int last);
struct sl_pp_token_buffer *input);
int
sl_pp_process_ifdef(struct sl_pp_context *context,
@ -71,9 +69,7 @@ sl_pp_process_ifndef(struct sl_pp_context *context,
int
sl_pp_process_elif(struct sl_pp_context *context,
const struct sl_pp_token_info *input,
unsigned int first,
unsigned int last);
struct sl_pp_token_buffer *buffer);
int
sl_pp_process_else(struct sl_pp_context *context,
@ -109,9 +105,7 @@ sl_pp_process_extension(struct sl_pp_context *context,
int
sl_pp_process_line(struct sl_pp_context *context,
const struct sl_pp_token_info *input,
unsigned int first,
unsigned int last,
struct sl_pp_token_buffer *buffer,
struct sl_pp_process_state *state);
int

View file

@ -37,7 +37,8 @@ struct sl_pp_context;
struct sl_pp_context *
sl_pp_context_create(void);
sl_pp_context_create(const char *input,
const struct sl_pp_purify_options *options);
void
sl_pp_context_destroy(struct sl_pp_context *context);
@ -70,13 +71,10 @@ sl_pp_context_cstr(const struct sl_pp_context *context,
int
sl_pp_version(struct sl_pp_context *context,
const struct sl_pp_token_info *input,
unsigned int *version,
unsigned int *tokens_eaten);
unsigned int *version);
int
sl_pp_process(struct sl_pp_context *context,
const struct sl_pp_token_info *input,
struct sl_pp_token_info **output);
#endif /* SL_PP_PUBLIC_H */

View file

@ -507,7 +507,7 @@ _tokenise_number(struct sl_pp_context *context,
}
static int
int
sl_pp_token_get(struct sl_pp_context *context,
struct sl_pp_token_info *out)
{
@ -809,20 +809,16 @@ sl_pp_token_get(struct sl_pp_context *context,
int
sl_pp_tokenise(struct sl_pp_context *context,
const char *input,
const struct sl_pp_purify_options *options,
struct sl_pp_token_info **output)
{
struct sl_pp_token_info *out = NULL;
unsigned int out_len = 0;
unsigned int out_max = 0;
sl_pp_purify_state_init(&context->pure, input, options);
for (;;) {
struct sl_pp_token_info info;
if (sl_pp_token_get(context, &info)) {
if (sl_pp_token_buffer_get(&context->tokens, &info)) {
free(out);
return -1;
}

View file

@ -122,10 +122,12 @@ struct sl_pp_token_info {
struct sl_pp_purify_options;
int
sl_pp_token_get(struct sl_pp_context *context,
struct sl_pp_token_info *out);
int
sl_pp_tokenise(struct sl_pp_context *context,
const char *input,
const struct sl_pp_purify_options *options,
struct sl_pp_token_info **output);
#endif /* SL_PP_TOKEN_H */

View file

@ -0,0 +1,211 @@
/**************************************************************************
*
* Copyright 2009 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef SL_PP_TOKEN_UTIL_H
#define SL_PP_TOKEN_UTIL_H
#include <assert.h>
#include <stdlib.h>
#include "sl_pp_token.h"
struct sl_pp_context;
/*
* A token buffer allows one to get and unget a token
* from a preprocessor context.
*/
struct sl_pp_token_buffer {
struct sl_pp_context *context;
unsigned int size;
unsigned int capacity;
struct sl_pp_token_info *tokens;
};
static int
sl_pp_token_buffer_init(struct sl_pp_token_buffer *buffer,
struct sl_pp_context *context)
{
buffer->context = context;
buffer->size = 0;
buffer->capacity = 64;
buffer->tokens = malloc(buffer->capacity * sizeof(struct sl_pp_token_info));
if (!buffer->tokens) {
return -1;
}
return 0;
}
static void
sl_pp_token_buffer_destroy(struct sl_pp_token_buffer *buffer)
{
free(buffer->tokens);
}
static int
sl_pp_token_buffer_get(struct sl_pp_token_buffer *buffer,
struct sl_pp_token_info *out)
{
/* Pop from stack first if not empty. */
if (buffer->size) {
*out = buffer->tokens[--buffer->size];
return 0;
}
assert(buffer->context);
return sl_pp_token_get(buffer->context, out);
}
static void
sl_pp_token_buffer_unget(struct sl_pp_token_buffer *buffer,
const struct sl_pp_token_info *in)
{
/* Resize if needed. */
if (buffer->size == buffer->capacity) {
buffer->capacity += 64;
buffer->tokens = realloc(buffer->tokens,
buffer->capacity * sizeof(struct sl_pp_token_info));
assert(buffer->tokens);
}
/* Push token on stack. */
buffer->tokens[buffer->size++] = *in;
}
static int
sl_pp_token_buffer_skip_white(struct sl_pp_token_buffer *buffer,
struct sl_pp_token_info *out)
{
if (sl_pp_token_buffer_get(buffer, out)) {
return -1;
}
while (out->token == SL_PP_WHITESPACE) {
if (sl_pp_token_buffer_get(buffer, out)) {
return -1;
}
}
return 0;
}
/*
* A token peek allows one to get a number of tokens from a buffer
* and then either commit the operation or abort it,
* effectively ungetting the peeked tokens.
*/
struct sl_pp_token_peek {
struct sl_pp_token_buffer *buffer;
unsigned int size;
unsigned int capacity;
struct sl_pp_token_info *tokens;
};
static int
sl_pp_token_peek_init(struct sl_pp_token_peek *peek,
struct sl_pp_token_buffer *buffer)
{
peek->buffer = buffer;
peek->size = 0;
peek->capacity = 64;
peek->tokens = malloc(peek->capacity * sizeof(struct sl_pp_token_info));
if (!peek->tokens) {
return -1;
}
return 0;
}
static void
sl_pp_token_peek_destroy(struct sl_pp_token_peek *peek)
{
/* Abort. */
while (peek->size) {
sl_pp_token_buffer_unget(peek->buffer, &peek->tokens[--peek->size]);
}
free(peek->tokens);
}
static int
sl_pp_token_peek_get(struct sl_pp_token_peek *peek,
struct sl_pp_token_info *out)
{
/* Get token from buffer. */
if (sl_pp_token_buffer_get(peek->buffer, out)) {
return -1;
}
/* Save it. */
if (peek->size == peek->capacity) {
peek->capacity += 64;
peek->tokens = realloc(peek->tokens,
peek->capacity * sizeof(struct sl_pp_token_info));
assert(peek->tokens);
}
peek->tokens[peek->size++] = *out;
return 0;
}
static void
sl_pp_token_peek_commit(struct sl_pp_token_peek *peek)
{
peek->size = 0;
}
static int
sl_pp_token_peek_to_buffer(const struct sl_pp_token_peek *peek,
struct sl_pp_token_buffer *buffer)
{
unsigned int i;
if (sl_pp_token_buffer_init(buffer, NULL)) {
return -1;
}
for (i = peek->size; i > 0; i--) {
sl_pp_token_buffer_unget(buffer, &peek->tokens[i - 1]);
}
return 0;
}
static int
sl_pp_token_peek_skip_white(struct sl_pp_token_peek *peek,
struct sl_pp_token_info *out)
{
if (sl_pp_token_peek_get(peek, out)) {
return -1;
}
while (out->token == SL_PP_WHITESPACE) {
if (sl_pp_token_peek_get(peek, out)) {
return -1;
}
}
return 0;
}
#endif /* SL_PP_TOKEN_UTIL_H */

View file

@ -33,21 +33,23 @@
int
sl_pp_version(struct sl_pp_context *context,
const struct sl_pp_token_info *input,
unsigned int *version,
unsigned int *tokens_eaten)
unsigned int *version)
{
unsigned int i = 0;
struct sl_pp_token_peek peek;
unsigned int line = context->line;
/* Default values if `#version' is not present. */
*version = 110;
*tokens_eaten = 0;
if (sl_pp_token_peek_init(&peek, &context->tokens)) {
return -1;
}
/* There can be multiple `#version' directives present.
* Accept the value of the last one.
*/
for (;;) {
struct sl_pp_token_info input;
int found_hash = 0;
int found_version = 0;
int found_number = 0;
@ -55,82 +57,101 @@ sl_pp_version(struct sl_pp_context *context,
/* Skip whitespace and newlines and seek for hash. */
while (!found_hash) {
switch (input[i].token) {
if (sl_pp_token_peek_get(&peek, &input)) {
sl_pp_token_peek_destroy(&peek);
return -1;
}
switch (input.token) {
case SL_PP_NEWLINE:
line++;
/* pass thru */
break;
case SL_PP_WHITESPACE:
i++;
break;
case SL_PP_HASH:
i++;
found_hash = 1;
break;
default:
sl_pp_token_peek_destroy(&peek);
return 0;
}
}
/* Skip whitespace and seek for `version'. */
while (!found_version) {
switch (input[i].token) {
if (sl_pp_token_peek_get(&peek, &input)) {
sl_pp_token_peek_destroy(&peek);
return -1;
}
switch (input.token) {
case SL_PP_WHITESPACE:
i++;
break;
case SL_PP_IDENTIFIER:
if (input[i].data.identifier != context->dict.version) {
if (input.data.identifier != context->dict.version) {
sl_pp_token_peek_destroy(&peek);
return 0;
}
i++;
found_version = 1;
break;
default:
sl_pp_token_peek_destroy(&peek);
return 0;
}
}
sl_pp_token_peek_commit(&peek);
/* Skip whitespace and seek for version number. */
while (!found_number) {
switch (input[i].token) {
if (sl_pp_token_buffer_get(&context->tokens, &input)) {
sl_pp_token_peek_destroy(&peek);
return -1;
}
switch (input.token) {
case SL_PP_WHITESPACE:
i++;
break;
case SL_PP_UINT:
*version = atoi(sl_pp_context_cstr(context, input[i].data._uint));
i++;
*version = atoi(sl_pp_context_cstr(context, input.data._uint));
found_number = 1;
break;
default:
strcpy(context->error_msg, "expected version number after `#version'");
sl_pp_token_peek_destroy(&peek);
return -1;
}
}
/* Skip whitespace and seek for either newline or eof. */
while (!found_end) {
switch (input[i].token) {
if (sl_pp_token_buffer_get(&context->tokens, &input)) {
sl_pp_token_peek_destroy(&peek);
return -1;
}
switch (input.token) {
case SL_PP_WHITESPACE:
i++;
break;
case SL_PP_NEWLINE:
line++;
/* pass thru */
case SL_PP_EOF:
i++;
*tokens_eaten = i;
context->line = line;
found_end = 1;
break;
default:
strcpy(context->error_msg, "expected end of line after version number");
sl_pp_token_peek_destroy(&peek);
return -1;
}
}