Overview

The code belows shows generally how the tokenizer can be used.

Javascript

var in        = new FleInputStream( file );
var tokenizer = new Tokenizer();
    tokenizer.setInput( in );

while ( tokenizer.hasMoreTokens() )
{
    var token = tokenizer.nextToken();

    switch ( token.getTokenType() )
    {
    case OPEN:
        // do something
        break;

    case CLOSE:
        // do something
        break;
    }
}

Tokenizer

C Example

#include <stdlib.h>
#include <stdio.h>

#include "libtokenizer/Runtime.h"
#include "libtokenizer/Tokenizer.h"

int main( int argc, char** argv )
{
    const char* filepath = "./test/Sample.txt";

    PushbackReader* p = new_PushbackReader( filepath );
    {
        Tokenizer* t = new_Tokenizer( p );
        {
            while ( Tokenizer_hasMoreTokens( t ) )
            {
                Token* token = Tokenizer_nextToken( t );
                {
                    Token_print( token, stdout );
                    //const char* token_content = Token_getContent( token );
                    //fprintf( stdout, "%s", token_content );
                }
                free_Token( token );
            }
        }
        free_Tokenizer( t );
    }
    free_PushbackReader( p );

    if ( Runtime_allocated() )
    {
        fprintf( stderr, "Memory leak: %i\n", Runtime_allocated() );
    }
}

Javascript Example

const fs           = require( 'fs' );
const libtokenizer = require( '/Users/daniel/Documents/Dropbox/Dropspace-Sites/_CA/com.libtokenizer/_gen/lib/js/libtokenizer' );

function main()
{
    var content = fs.readFile( './test/Sample.txt', "ascii", mainHandler );
}

function mainHandler( error, content )
{
    if ( error )
    {
        console.error( error );
        return;
    }
    else
    {
        console.log( content );

        if ( true )
        {
            var reader    = new libtokenizer.PushbackReader( content );
            var tokenizer = new libtokenizer.Tokenizer     ( reader  );

            while( tokenizer.hasMoreTokens() )
            {
                token = tokenizer.nextToken();

                console.log( token );
            }
        }
    }
}

main();

Class Definitions

Ix

public class
{
    @reader : PushbackReader*
    @queue  : Queue<Token*>

    public method new( reader: PushbackReader* ) : PushbackReader*
    public method nextToken()                    : Token*
    public method hasMoreTokens()                : boolean
}

C

#ifndef LIBTOKENIZER_TOKENIZER_H
#define LIBTOKENIZER_TOKENIZER_H

#include "libtokenizer/Base.h"
#include "libtokenizer/PushbackReader.h"
#include "libtokenizer/Queue.h"
#include "libtokenizer/Token.h"

typedef struct _Tokenizer
{
    PushbackReader* reader;
    Queue*          queue;

} Tokenizer;

Tokenizer* new_Tokenizer          ( PushbackReader* reader );
Tokenizer* free_Tokenizer         ( Tokenizer*      self   );
Token*     Tokenizer_nextToken    ( Tokenizer*      self   );
bool       Tokenizer_hasMoreTokens( Tokenizer*      self   ); 

#endif
#include <stdlib.h>
#include "libtokenizer/Runtime.h"
#include "libtokenizer/StringBuffer.h"
#include "libtokenizer/Tokenizer.h"
#include "libtokenizer/TokenGroup.h"

static void   primeQueue( Tokenizer* self );
static Token*       next( Tokenizer* self );
import java.io.*;


public class Tokenizer {

InputStream input = null;
Token       token = null;
}

Constructor

Ix

public new( reader : PushbackReader )
{
    @reader = reader
    @queue  = new Queue<Token>()

    primeQueue();
}

C

Tokenizer* new_Tokenizer( PushbackReader* reader )
{
    Tokenizer* self = Runtime_calloc( 1, sizeof( Tokenizer ) );

    if ( self )
    {   
        self->reader = reader;
        self->queue  = new_Queue();

        primeQueue( self );
    }
    return self;
}

Javascript

function Tokenizer( reader )
{
    this.reader = reader;
    this.queue  = new Queue();

    this.primeQueue();
}
module.exports
=
{
    PushbackReader: PushbackReader,
    Tokenizer:      Tokenizer
}
var libtokenizer
=
{
    exports:
    {
        Tokenizer: Tokenizer
    }
}

Deconstructor

Ix

Ix does not require an explicit deconstructor.

C

Tokenizer* free_Tokenizer( Tokenizer* self )
{
    if ( self )
    {   
        self->reader = 0;
        self->queue = free_Queue( self->queue );

        Runtime_free( self );
    }
    return 0;
}

nextToken

'nextToken' returns the next available token; otherwise null/undefined.

In reality, 'primeQueue' is called to supply the token queue with another token, then the head token of the queue is returned, if available.

Ix

public nextToken()
{
    primeQueue();

    if ( @queue.length > 0 )
    {
        return @queue.removeFirst()
    }
    else
    {
        return null;
    }
}

C

Token* Tokenizer_nextToken( Tokenizer* self )
{
    primeQueue( self );

    if ( Queue_getLength( self->queue ) > 0 )
    {
        return (Token*) Queue_removeHead( self->queue );
    }
    else
    {
        return NULL;
    }
}

Javascript

Tokenizer.prototype.nextToken
=
function()
{
    this.primeQueue();

    if (this.queue.getLength() > 0 )
    {
        return this.queue.removeHead();
    }
    else
    {
        return undefined;
    }
}

hasMoreTokens

Returns true if there are more tokens available; otherwise false.

As both the constructor and 'nextToken' call 'primeQueue' to ensure that the 'queue' always has a token to return, if available; there are no more tokens left if the queue is found to be empty.

Ix

public hasMoreTokens() : boolean
{
    return (@queue.getLength() > 0);
}

C

bool Tokenizer_hasMoreTokens( Tokenizer* self )
{
    return (Queue_getLength( self->queue ) > 0);
}

Javascript

Tokenizer.prototype.hasMoreTokens
=
function()
{
    return (this.queue.getLength() > 0);
}

Prime Queue

The 'primeQueue' method simply calls 'next' to retrieve the next token, then adds it to the end of the queue if one is able to be retrieved.

Ix

private primeQueue()
{
    if ( var token = next() )
    {
        @queue.addTail( token )
    }
}

C

static void primeQueue( Tokenizer* self )
{
    Token* token = NULL;

    if ( (token = next( self )) )
    {
        Queue_addTail( self->queue, token );
    }
}

Javascript

Tokenizer.prototype.primeQueue
=
function()
{
    var token;

    if ( (token = this.next()) )
    {
        this.queue.addTail( token );
    }
}

Next

The 'next' method reads characters from the PushbackReader - 'reader' - and appends them to an initially empty string buffer. The method determines the token group using the first character, and then calls the TokenGroup.matches method for each additional character, which decides when the character can be appended to the previous characters to form a token.

The method must also handle the following special cases:

  1. When the first character indicates an escape code ().
  2. When the first character indicates the start of a string (").
  3. When the first character indicates the start of a character constant (').

Usually, the last action the 'next' method will do is decide that the character most recently read does not belong in the current token and will push it back into the reader. For characters and strings, however, they will read the terminating (") or (') and will exit the loop.

Ix

private next() : string
{
    var token: Token*

    if ( var ch = @reader.read() )
    {
        var sb    = new StringBuffer()
        var group = new TokenGroup( ch )

        sb.append( ch )

        while ( var ch2 = @reader.read() )
        {
            if ( EnumGroupType.ESCAPE == group.groupType )
            {
                sb  = sb.append( ch2 )
                ch2 = @reader.read()
                break;
            }
            else
            if ( group.matches( ch2 ) )
            {
                if ( '\\' == ch2 )
                {
                    sb  = sb.append( ch2 )
                    ch2 = @reader.read()
                    sb  = sb.append( ch2 )
                }
                else
                {
                    sb = sb.append( ch2 )
                }
            }
            else
            if ( EnumGroupType.STRING == group.groupType )
            {
                sb = sb.append( ch2 )
                c2 = @reader.read()
                break
            }
            else
            if ( EnumGroupType.CHAR == group.groupType )
            {
                sb = sb.append( ch2 )
                c2 = @reader.read()
                break
            }
            else
            {
                break
            }
        }

        if ( c2 )
        {
            @reader.pushback()
        }

        if ( !sb.isEmpty()  )
        {
            token = new Token( this, sb.getContent(), group );
        }
    }
    return token;
}

C

static Token* next( Tokenizer* self )
{
    Token* token = NULL;
    int    ch    = 0;
    int    ch2   = 0;

    if ( (ch = PushbackReader_read( self->reader )) )
    {
        StringBuffer* sb = new_StringBuffer();
        TokenGroup group = TokenGroup_DetermineType( ch );

        sb = StringBuffer_append_char( sb, ch );

        while ( (ch2 = PushbackReader_read( self->reader )) )
        {
            if ( ESCAPE == group )
            {
                sb  = StringBuffer_append_char( sb, ch2 );
                ch2 = PushbackReader_read( self->reader );
                break;
            }
            else
            if ( TokenGroup_Matches( group, ch2 ) )
            {
                if ( '\\' == ch2 )
                {
                    sb  = StringBuffer_append_char( sb, ch2 );
                    ch2 = PushbackReader_read( self->reader );
                    sb  = StringBuffer_append_char( sb, ch2 );
                }
                else
                {
                    sb  = StringBuffer_append_char( sb, ch2 );
                }
            }
            else
            if ( STRING == group )
            {
                sb = StringBuffer_append_char( sb, ch2 );
                ch2 = PushbackReader_read( self->reader );
                break;
            }
            else
            if ( CHAR == group )
            {
                sb = StringBuffer_append_char( sb, ch2 );
                ch2 = PushbackReader_read( self->reader );
                break;
            }
            else
            {
                break;
            }
        }

        if ( ch2 )
        {
            PushbackReader_pushback( self->reader );
        }

        if ( !StringBuffer_isEmpty( sb ) )
        {
            token = new_Token( self, sb->content, group );
        }

        free_StringBuffer( sb );
    }
    return token;
}

Javascript

Tokenizer.prototype.next
=
function()
{
    var token = "";
    var ch;
    var ch2;

    if ( (ch = this.reader.read()) )
    {
        var group = new TokenGroup( ch );

        token = token + ch;

        while ( (ch2 = this.reader.read()) )
        {
            if ( group.matches( ch2 ) )
            {
                token = token + ch2;
            }
            else
            if ( "STRING" == group.groupType )
            {
                token = token + ch2;
                this.reader.read();
                break;
            }
            else
            if ( "CHAR" == group.groupType )
            {
                token = token + ch2;
                this.reader.read();
                break;
            }
            else
            {
                break;
            }
        }
        this.reader.pushback();
    }
    return ("" == token) ? undefined : token;
}

Token

Example

#include <stdio.h>
#include "libtokenizer/Token.h"

int main( int argc, char** argv )
{
    Token* token = new_Token( 0, "String", ALPHANUMERIC );

    fprintf( stdout, "%s\n", Token_getContent   ( token ) );
    fprintf( stdout, "%i\n", Token_getTokenGroup( token ) );
    fprintf( stdout, "%i\n", Token_getTokenType ( token ) );

    free_Token( token );
}

Class Definitions

Ix

public class
{
    @t       : Tokenizer&;
    @content : string*;
    @length  : integer;
    @group   : TokenGroup;
    @type    : TokenType;
}

C

#include "TokenGroup.h"
#include "TokenType.h"

typedef struct _Tokenizer Tokenizer;

typedef struct _Token
{
    Tokenizer* t;
    char*      content;
    int        length;
    TokenGroup group;
    TokenType  type;

} Token;

Token*      new_Token                      ( Tokenizer* t, const char* content, TokenGroup aGroup );
Token*      free_Token                     ( Token* this );
const char* Token_getContent               ( Token* this );
TokenGroup  Token_getTokenGroup            ( Token* this );
TokenType   Token_getTokenType             ( Token* this );
void        Token_print                    ( Token* this, void* stream );

Javascript

Class definition is in constructor.

Java

public class Token {

Tokenizer  t;
String     content;
int        length;
TokenGroup group;
TokenType  type;

Constructor

Ix

public new( t: Tokenizer&, content: string&, aGroup: TokenGroup )
{
    @t       = t;
    @content = content.clone();
    @length  = content.length;
    @group   = aGroup;
    @type    = DetermineTokenType( aGroup, @content );
}

C

#include <stdlib.h>
#include <stdio.h>
#include "libtokenizer/Runtime.h"
#include "libtokenizer/String.h"
#include "libtokenizer/Term.h"
#include "libtokenizer/Token.h"

TokenType   Token_DetermineTokenType       ( TokenGroup group, const char* content );
TokenType   Token_DetermineWhitespaceType  ( const char* content );
TokenType   Token_DetermineSymbolicType    ( const char* content );
TokenType   Token_DetermineAlphanumericType( const char* content );
TokenType   Token_DetermineOpenType        ( const char* content );
TokenType   Token_DetermineCloseType       ( const char* content );

Token* new_Token( Tokenizer* t, const char* content, TokenGroup aGroup )
{
    Token* this   = Runtime_calloc( 1, sizeof(Token) );

    this->t       = t;
    this->content = StringCopy  ( content );
    this->length  = StringLength( content );
    this->group   = aGroup;
    this->type    = Token_DetermineTokenType( aGroup, content );

    return this;
}

Javascript

function Token( t, content, length, aGroup )
{
    this.t       = t;
    this.content = content;
    this.length  = content.length;
    this.group   = aGroup;
    this.type    = Token.DetermineTokenType( aGroup, content );
}
public Token( Tokenizer t, String content, TokenGroup aGroup )
{
    this.t       = t;
    this.content = content;
    this.length  = content.length();
    this.group   = aGroup;
    this.type    = DetermineTokenType( aGroup, content );
}

Deconstructor

Ix

Ix does not require explicit deconstructor.

public delete()
{}

C

Token* free_Token( Token* token )
{
    free( token->content );

    token->t       = NULL;
    token->content = NULL;

    Runtime_free( token );

    return NULL;
}

Javascript

Javascript does not require explicit deconstructor.

Token.getContent

Ix

public getContent() : const string&
{
    return @content;
}

C

const char* Token_getContent( Token* this )
{
    return this->content;
}

Javascript

Token.prototype.getContent
=
function()
{
    return this.content;
}

Token.getLength

Ix

public getLength() : integer
{
    return @length;
}

C

int Token_getLength( Token* this )
{
    return this->length;
}

Javascript

Token.prototype.getLength
=
function()
{
    return this.length;
}

Token.getTokenGroup

Ix

public getTokenGroup : TokenGroup
{
    return @group;
}

C

TokenGroup Token_getTokenGroup( Token* this )
{
    return this->group;
}

Javascript

Token.prototype.getTokenGroup
=
function()
{
    return this.group;
}

Token.getTokenType

Ix

public getTokenType : TokenType
{
    return @type;
}

C

TokenType Token_getTokenType( Token* this )
{
    return this->type;
}

Javascript

Token.prototype.getTokenType
=
function()
{
    return this.type;
}

Token.DetermineTokenType

Ix

private DetermineTokenType( group: TokenGroup, content: string& )
{
    var type = TokenType.UNKNOWN;

    switch( group )
    {
    case WHITESPACE:
        type = Token.DetermineWhitespaceType( content );
        break;

    case SYMBOLIC:
        type = Token.DetermineSymbolicType( content );
        break;

    case ALPHANUMERIC:
        type = Token.DetermineAlhanumericType( content );
        break;

    case VALUE:
        type = TokenType.VALUE;
        break;

    case HEX_VALUE:
        type = TokenType.HEX;
        break;

    case OPEN:
        type = TokenType.UNKNOWN_TYPE;
        break;

    case CLOSE:
        type = TokenType.UNKNOWN_TYPE;
        break;

    case UNKNOWN_GROUP:
        type = TokenType.UNKNOWN_TYPE;
        break;
    }

    return type;
}

C

TokenType Token_DetermineTokenType( TokenGroup group, const char* content )
{
    TokenType type = UNKNOWN_TYPE;

    switch ( group )
    {
    case UNKNOWN_GROUP:
        type = UNKNOWN_TYPE;
        break;

    case WHITESPACE:
        type = Token_DetermineWhitespaceType( content );
        break;

    case OPEN:
        type = Token_DetermineOpenType( content );
        break;

    case CLOSE:
        type = Token_DetermineCloseType( content );
        break;

    case SYMBOLIC:
        type = Token_DetermineSymbolicType( content );
        break;

    case ALPHANUMERIC:
        type = Token_DetermineAlphanumericType( content );
        break;

    case STRING:
        type = UNKNOWN_TYPE;
        break;

    case CHAR:
        type = FLOAT;
        break;

    case VALUE:
        type = FLOAT;
        break;

    case HEX_VALUE:
        type = HEX;
        break;

    default:
        type = UNKNOWN_TYPE;
    }

    return type;
}

Javascript

Token.DetermineTokenType
=
function( group, content )
{
    var type = TokenType.UNKNOWN;

    switch( group )
    {
    case TokenGroup.WHITESPACE:
        type = Token.DetermineWhitespaceType( content );
        break;



    case TokenGroup.SYMBOLIC:
        type = Token.DetermineSymbolicType( content );
        break;

    case TokenGroup.ALPHANUMERIC:
        type = Token.DetermineAlhanumericType( content );
        break;

    case TokenGroup.VALUE:
        type = TokenType.VALUE;
        break;

    case TokenGroup.HEX_VALUE:
        type = TokenType.HEX;
        break;
    }
}
TokenType Token_DetermineWhitespaceType( const char* content )
{
    switch( content[0] )
    {
    case ' ':
        return SPACE;
    case '\t':
        return TAB;
    case '\n':
        return NEWLINE;
    default:
        return UNKNOWN_WHITESPACE;
    }
}

TokenType Token_DetermineOpenType( const char* content )
{
    switch ( content[0] )
    {
    case '{':
        return STARTBLOCK;
    case '(':
        return STARTEXPRESSION;
    case '[':
        return STARTSUBSCRIPT;
    case '<':
        return STARTTAG;
    default:
        return UNKNOWN_OPEN;
    }
}

TokenType Token_DetermineCloseType( const char* content )
{
    switch ( content[0] )
    {
    case '}':
        return ENDBLOCK;
    case ')':
        return ENDEXPRESSION;
    case ']':
        return ENDSUBSCRIPT;
    case '>':
        return ENDTAG;
    default:
        return UNKNOWN_OPEN;
    }
}

TokenType Token_DetermineSymbolicType( const char* content )
{
    switch ( content[0] )
    {
    case '~':   return SYMBOL;
    case '!':
        switch ( content[1] )
        {
        case '=':  return INFIXOP;
        default:   return PREFIXOP;
        }
        break;

    case '@':   return SYMBOL;
    case '#':   return SYMBOL;
    case '$':   return SYMBOL;
    case '%':
        switch ( content[1] )
        {
        case '=':  return ASSIGNMENTOP;
        default:   return INFIXOP;
        }
        break;

    case '^':
        switch ( content[1] )
        {
        case '=':  return ASSIGNMENTOP;
        default:   return INFIXOP;
        }
        break;

    case '&':
        switch ( content[1] )
        {
        case '&':  return INFIXOP;
        case '=':  return ASSIGNMENTOP;
        default:   return INFIXOP;
        }
        break;

    case '*':
        switch ( content[1] )
        {
        case '=':  return ASSIGNMENTOP;
        default:   return INFIXOP;
        }
        break;

    case '-':
        switch ( content[1] )
        {
        case '-':  return PREPOSTFIXOP;
        case '=':  return ASSIGNMENTOP;
        default:   return INFIXOP;
        }
        break;

    case '+':
        switch ( content[1] )
        {
        case '+':  return PREPOSTFIXOP;
        case '=':  return ASSIGNMENTOP;
        default:   return INFIXOP;
        }
        break;

    case '=':
        switch ( content[1] )
        {
        case '=':  return INFIXOP;
        default:   return ASSIGNMENTOP;
        }
        break;

    case '/':
        switch ( content[1] )
        {
        case '/':  return LINECOMMENT;
        case '*':  return COMMENT;
        case '=':  return ASSIGNMENTOP;
        default:   return INFIXOP;
        }
        break;

    case ':':   return OPERATOR;
    case ';':   return STOP;
    case '<':   return INFIXOP;
    case '>':   return INFIXOP;
    default:    return SYMBOL;
    }
}

TokenType Token_DetermineAlphanumericType( const char* content )
{
         if ( StringEquals( content, "class"      ) ) return CLASS;
    else if ( StringEquals( content, "import"     ) ) return IMPORT;
    else if ( StringEquals( content, "include"    ) ) return INCLUDE;
    else if ( StringEquals( content, "interface"  ) ) return INTERFACE;
    else if ( StringEquals( content, "package"    ) ) return PACKAGE;

    else if ( StringEquals( content, "public"     ) ) return MODIFIER;
    else if ( StringEquals( content, "protected"  ) ) return MODIFIER;
    else if ( StringEquals( content, "private"    ) ) return MODIFIER;

    else if ( StringEquals( content, "bool"       ) ) return PRIMITIVE;
    else if ( StringEquals( content, "boolean"    ) ) return PRIMITIVE;
    else if ( StringEquals( content, "byte"       ) ) return PRIMITIVE;
    else if ( StringEquals( content, "char"       ) ) return PRIMITIVE;
    else if ( StringEquals( content, "const"      ) ) return PRIMITIVE;
    else if ( StringEquals( content, "double"     ) ) return PRIMITIVE;
    else if ( StringEquals( content, "float"      ) ) return PRIMITIVE;
    else if ( StringEquals( content, "int"        ) ) return PRIMITIVE;
    else if ( StringEquals( content, "integer"    ) ) return PRIMITIVE;
    else if ( StringEquals( content, "long"       ) ) return PRIMITIVE;
    else if ( StringEquals( content, "short"      ) ) return PRIMITIVE;
    else if ( StringEquals( content, "signed"     ) ) return PRIMITIVE;
    else if ( StringEquals( content, "string"     ) ) return PRIMITIVE;
    else if ( StringEquals( content, "unsigned"   ) ) return PRIMITIVE;
    else if ( StringEquals( content, "void"       ) ) return PRIMITIVE;

    else if ( StringEquals( content, "break"      ) ) return KEYWORD;
    else if ( StringEquals( content, "case"       ) ) return KEYWORD;
    else if ( StringEquals( content, "catch"      ) ) return KEYWORD;
    else if ( StringEquals( content, "default"    ) ) return KEYWORD;
    else if ( StringEquals( content, "extends"    ) ) return KEYWORD;
    else if ( StringEquals( content, "implements" ) ) return KEYWORD;
    else if ( StringEquals( content, "for"        ) ) return KEYWORD;
    else if ( StringEquals( content, "foreach"    ) ) return KEYWORD;
    else if ( StringEquals( content, "let"        ) ) return KEYWORD;
    else if ( StringEquals( content, "namespace"  ) ) return KEYWORD;
    else if ( StringEquals( content, "return"     ) ) return KEYWORD;
    else if ( StringEquals( content, "switch"     ) ) return KEYWORD;
    else if ( StringEquals( content, "try"        ) ) return KEYWORD;
    else if ( StringEquals( content, "var"        ) ) return KEYWORD;
    else                                              return WORD;
}
void Token_print( Token* self, void* stream )
{
    switch ( self->group )
    {
    case OPEN:
    case CLOSE:
    case SYMBOLIC:
        switch( self->type )
        {
        case COMMENT:
        case LINECOMMENT:
            Term_Colour( stream, COLOR_COMMENT );
            break;

        default:
            Term_Colour( stream, COLOR_BOLD );
        }
        break;

    case STRING:
        Term_Colour( stream, COLOR_STRING );
        break;

    case CHAR:
        Term_Colour( stream, COLOR_CHAR );
        break;

    case ALPHANUMERIC:
        switch ( self->type )
        {
        case PRIMITIVE:
            Term_Colour( stream, COLOR_TYPE );
            break;

        case CLASS:
        case KEYWORD:
        case MODIFIER:
            Term_Colour( stream, COLOR_MODIFIER );
            break;

        case WORD:
            Term_Colour( stream, COLOR_NORMAL );
            break;

        default:
            Term_Colour( stream, COLOR_LIGHT );
        }
        break;

    case VALUE:
        Term_Colour( stream, COLOR_VALUE );
        break;        

    case UNKNOWN_GROUP:
        Term_Colour( stream, COLOR_UNKNOWN );
        break;        

    default:
        Term_Colour( stream, COLOR_NORMAL );
    }
    fprintf( stream, "%s", self->content );
    Term_Colour( stream, COLOR_NORMAL );
}
public enum TokenGroup
{
    UNKNOWN      : "UNKNOWN"
    WHITESPACE   : "WHITESPACE"     #   
    OPEN         : "OPEN"           # { ( [ <
    CLOSE        : "CLOSE"          # } ) ] >
    SYMBOLIC     : "SYMBOLIC"       # ~!@#$%^&*-
    ALPHANUMERIC : "ALPHANUMERIC"   # _ A-Z a-z 0-9
    VALUE        : "VALUE"          # 9, 
    STRING       : "STRING"         # 9, 
    HEX_VALUE    : "HEX_VALUE"      # 0x9999
}
EnumTokenGroup
=
new Enum([
    "UNKNOWN",
    "WHITESPACE",
    "OPEN",
    "CLOSE",
    "SYMBOLIC",
    "ALPHANUMERIC",
    "VALUE",
    "STRING",
    "CHAR",
    "HEX_VALUE"
]);
function TokenGroup( character )
{
    this.character = character
    this.groupType = TokenGroup.DetermineType( character );
}
TokenGroup.prototype.matches
=
function( character )
{
    if ( "" == character )
    {
        return false;
    }
    else
    {
        var secondType = TokenGroup.DetermineType( character );
        var char_code  = character.charCodeAt( 0 );

        switch ( this.groupType )
        {
        case EnumTokenGroup.SYMBOLIC:
            switch ( secondType )
            {
            case EnumTokenGroup.SYMBOLIC:
                return true;

            default:
                return false;
            }
            break;

        case EnumTokenGroup.STRING:
            switch ( secondType )
            {
            case EnumTokenGroup.STRING:
                return false;

            default:
                return true;
            }
            break;

        case EnumTokenGroup.CHAR:
            return false;
            break;

        case EnumTokenGroup.ALPHANUMERIC:
            switch ( secondType )
            {
            case EnumTokenGroup.ALPHANUMERIC:
            case EnumTokenGroup.VALUE:
                return true;

            default:
                return false;
            }
            break;

        case EnumTokenGroup.WHITESPACE:
            switch ( secondType )
            {
            case EnumTokenGroup.WHITESPACE:
                return true;

            default:
                return false;
            }
            break;

        case EnumTokenGroup.VALUE:
            switch ( secondType )
            {
            case EnumTokenGroup.VALUE:
                return true;

            case EnumTokenGroup.ALPHANUMERIC:
                if ( ('65' <= char_code) && (char_code <= 70) )
                {
                    return true;
                }
                else
                if ( ('97' <= char_code) && (char_code <= 102) )
                {
                    return true;
                }
                else
                return ("x" == character);

            default:
                return false;
            }
            break;

        case EnumTokenGroup.UNKNOWN:
            switch ( secondType )
            {
            case EnumTokenGroup.UNKNOWN:
                return true;

            default:
                return false;
            }
            break;

        default:
            return false;
        }
    }
}
TokenGroup.DetermineType
=
function( ch )
{
    switch ( ch )
    {
    case '~':
    case '!':
    case '@':
    case '#':
    case '$':
    case '%':
    case '^':
    case '&':
    case '*':
    case '-':
    case '+':
    case '=':
    case '|':
    case ':':
    case ';':
    case '.':
        return EnumTokenGroup.SYMBOLIC;

    case '(':
    case '{':
    case '[':
    case '<':
        return EnumTokenGroup.OPEN;

    case ')':
    case '}':
    case ']':
    case '>':
        return EnumTokenGroup.CLOSE;

    case '"':
        return EnumTokenGroup.STRING;

    case '\'':
        return EnumTokenGroup.CHAR;

    case '_':
        return EnumTokenGroup.ALPHANUMERIC;

    default:
        var char_code = ch.charCodeAt( 0 )

        switch ( char_code )
        {
        case 10: // LF
        case 11: // VT
        case 12: // FF
        case 13: // CR
        case 14: // SO
        case 15: // SI
        case 32: // SPACE
            return EnumTokenGroup.WHITESPACE

        default:
            if ( (48 <= char_code) && (char_code <= 57) )
            {
                return EnumTokenGroup.VALUE;
            }
            else
            if ( (65 <= char_code) && (char_code <= 90) ) // uppercase
            {
                return EnumTokenGroup.ALPHANUMERIC
            }
            else
            if ( (97 <= char_code) && (char_code <= 122) ) // lowercase
            {
                return EnumTokenGroup.ALPHANUMERIC
            }
            return EnumTokenGroup.UNKNOWN;
        }
    }
}
#ifndef LIBTOKENIZER_TOKENGROUP_H
#define LIBTOKENIZER_TOKENGROUP_H

#include "libtokenizer/Base.h"

typedef enum _TokenGroup
{
    UNKNOWN_GROUP,
    WHITESPACE,
    OPEN,
    CLOSE,
    SYMBOLIC,
    ESCAPE,
    ALPHANUMERIC,
    STRING,
    CHAR,
    VALUE,
    HEX_VALUE

} TokenGroup;

TokenGroup TokenGroup_DetermineType( char ch );
bool       TokenGroup_Matches( TokenGroup self, char ch );

#endif
#include "libtokenizer/TokenGroup.h"

TokenGroup TokenGroup_DetermineType( char ch )
{
    switch ( ch )
    {
    case '~':
    case '!':
    case '@':
    case '#':
    case '$':
    case '%':
    case '^':
    case '&':
    case '*':
    case '-':
    case '+':
    case '=':
    case '|':
    case ':':
    case ';':
    case ',':
    case '.':
    case '?':
    case '/':
        return SYMBOLIC;

    case '\\':
        return ESCAPE;

    case '(':
    case '{':
    case '[':
    case '<':
        return OPEN;

    case ')':
    case '}':
    case ']':
    case '>':
        return CLOSE;

    case '"':
        return STRING;

    case '\'':
        return CHAR;

    case '_':
        return ALPHANUMERIC;

    default:
        switch ( ch )
        {
        case  9: // TAB
        case 10: // LF
        case 11: // VT
        case 12: // FF
        case 13: // CR
        case 14: // SO
        case 15: // SI
        case 32: // SPACE
            return WHITESPACE;

        default:
            if ( (48 <= ch) && (ch <= 57) )
            {
                return VALUE;
            }
            else
            if ( (65 <= ch) && (ch <= 90) ) // uppercase
            {
                return ALPHANUMERIC;
            }
            else
            if ( (97 <= ch) && (ch <= 122) ) // lowercase
            {
                return ALPHANUMERIC;
            }
            return UNKNOWN_GROUP;
        }
    }
}

bool TokenGroup_Matches( TokenGroup self, char ch )
{
    if ( '\0' == ch )
    {
        return FALSE;
    }
    else
    {
        TokenGroup secondType = TokenGroup_DetermineType( ch );

        switch( self )
        {
        case SYMBOLIC:
            switch( secondType )
            {
            case SYMBOLIC:
                return TRUE;

            default:
                return FALSE;
            }
            break;

        case STRING:
            switch ( secondType )
            {
            case STRING:
                return FALSE;

            default:
                return TRUE;
            }
            break;

        case CHAR:
            switch ( secondType )
            {
            case CHAR:
                return FALSE;

            default:
                return TRUE;
            }
            break;

        case ALPHANUMERIC:
            switch ( secondType )
            {
            case ALPHANUMERIC:
            case VALUE:
                return TRUE;

            default:
                return FALSE;
            }
            break;

        case WHITESPACE:
            switch ( secondType )
            {
            case WHITESPACE:
                return TRUE;

            default:
                return FALSE;
            }
            break;

        case VALUE:
            switch ( secondType )
            {
            case VALUE:
                return TRUE;

            case ALPHANUMERIC:
                if ( (65 <= ch) && (ch <= 70) )
                {
                    return TRUE;
                }
                else
                if ( (97 <= ch) && (ch <= 102) )
                {
                    return TRUE;
                }
                else
                return ('x' == ch);

            default:
                return FALSE;
            }
            break;

        case UNKNOWN_GROUP:
            switch ( secondType )
            {
            case UNKNOWN_GROUP:
                return TRUE;

            default:
                return FALSE;
            }
            break;

        default:
            return FALSE;
        }
    }
}
public enum TokenGroup
{
    UNKNOWN,
    WHITESPACE,
    OPEN,
    CLOSE,
    SYMBOLIC,
    ALPHANUMERIC,
    VALUE,
    HEX_VALUE
}
public enum TokenType
{
    UNKNOWN_TYPE
    WORD            # Alphanumeric
    FILEPATH
    PACKAGE
    IMPORT
    INCLUDE
    CLASS
    CLASSNAME
    INTERFACE
    ENUM
    ENUMNAME
    GENERIC
    ANNOTATION
    IMETHOD
    METHOD
    BLOCK
    STARTBLOCK      # Open
    ENDBLOCK
    MEMBER
    EXPRESSION
    STARTEXPRESSION
    ENDEXPRESSION
    CLAUSE
    PARAMETERS
    PARAMETER
    ARGUMENTS
    ARGUMENT
    STATEMENT
    DECLARATION
    COMMENT
    JAVADOC
    BLANKLINE
    TOKEN
    SYMBOL
    KEYWORD
    MODIFIED
    PRIMITIVE
    TYPE
    METHODNAME
    VARIABLE
    NAME
    METHODCALL
    CONSTRUCTOR
    OPERATOR
    ASSIGNMENTOP
    PREFIXOP
    INFIXOP
    POSTFIXOP
    PREINFIXOP
    PREPOSTFIXOP
    SELECTOR
    VALUE
    FLOAT
    INTEGER
    NUMBER
    HEX
    OCTAL
    DOUBLEQUOTE
    QUOTE
    STOP
    TAB
    SPACE
    WHITESPACE
    NEWLINE
    LINECOMMENT
    ESCAPED
    OTHER
}
TokenType
=
Enum
([
    "UNKNOWN_TYPE",
    "WORD",
    "FILE",
    "PACKAGE",
    "IMPORT",
    "INCLUDE",
    "CLASS",
    "CLASSNAME",
    "INTERFACE",
    "ENUM",
    "ENUMNAME",
    "srcERIC",
    "ANNOTATION",
    "IMETHOD",
    "METHOD",
    "BLOCK",
    "STARTBLOCK",
    "ENDBLOCK",
    "MEMBER",
    "EXPRESSION",
    "STARTEXPRESSION",
    "ENDEXPRESSION",
    "CLAUSE",
    "PARAMETERS",
    "PARAMETER",
    "ARGUMENTS",
    "ARGUMENT",
    "STATEMENT",
    "DECLARATION",
    "COMMENT",
    "JAVADOC",
    "BLANKLINE",
    "TOKEN",
    "SYMBOL",
    "KEYWORD",
    "MODIFIED",
    "PRIMITIVE",
    "TYPE",
    "METHODNAME",
    "VARIABLE",
    "NAME",
    "METHODCALL",
    "CONSTRUCTOR",
    "OPERATOR",
    "ASSIGNMENTOP",
    "PREFIXOP",
    "INFIXOP",
    "POSTFIXOP",
    "PREINFIXOP",
    "PREPOSTFIXOP",
    "SELECTOR",
    "VALUE",
    "FLOAT",
    "INTEGER",
    "NUMBER",
    "HEX",
    "OCTAL",
    "DOUBLEQUOTE",
    "QUOTE",
    "STOP",
    "TAB",
    "SPACE",
    "WHITESPACE",
    "NEWLINE",
    "LINECOMMENT",
    "ESCAPED",
    "OTHER"
]);
typedef enum _TokenType
{
	UNKNOWN_TYPE,
    UNKNOWN_WHITESPACE,
    UNKNOWN_OPEN,
    UNKNOWN_CLOSE,

    //  Whitespace
    SPACE,
    TAB,
    NEWLINE,

    //  Open
    STARTBLOCK,
    STARTEXPRESSION,
    STARTSUBSCRIPT,
    STARTTAG,

    //  Close
    ENDBLOCK,
    ENDEXPRESSION,
    ENDSUBSCRIPT,
    ENDTAG,

    //  Symbolic
    OPERATOR,
    ASSIGNMENTOP,
    PREFIXOP,
    INFIXOP,
    POSTFIXOP,
    PREINFIXOP,
    PREPOSTFIXOP,
    STOP,
    LINECOMMENT,
    COMMENT,

    //  Words


    //  Composite
	WORD,
	FILEPATH,
	PACKAGE,
	IMPORT,
	INCLUDE,
	CLASS,
	CLASSNAME,
	INTERFACE,
	ENUM,
	ENUMNAME,
	GENERIC,
	ANNOTATION,
	IMETHOD,
	METHOD,
	BLOCK,
	MEMBER,
	MEMBERNAME,
	EXPRESSION,
	CLAUSE,
	PARAMETERS,
	PARAMETER,
	ARGUMENTS,
	ARGUMENT,
	STATEMENT,
	DECLARATION,
	JAVADOC,
	BLANKLINE,
	TOKEN,
	SYMBOL,
	KEYWORD,
	MODIFIER,
	PRIMITIVE,
	TYPE,
	METHODNAME,
	VARIABLE,
	NAME,
	METHODCALL,
	CONSTRUCTOR,
	SELECTOR,
	FLOAT,
	INTEGER,
	NUMBER,
	HEX,
	OCTAL,
	DOUBLEQUOTE,
	QUOTE,
	ESCAPED,
	OTHER
} TokenType;
public enum TokenType
{
    UNKNOWN_TYPE,
    WORD,
    FILE,
    PACKAGE,
    IMPORT,
    INCLUDE,
    CLASS,
    CLASSNAME,
    INTERFACE,
    ENUM,
    ENUMNAME,
    srcERIC,
    ANNOTATION,
    IMETHOD,
    METHOD,
    BLOCK,
    STARTBLOCK,
    ENDBLOCK,
    MEMBER,
    EXPRESSION,
    STARTEXPRESSION,
    ENDEXPRESSION,
    CLAUSE,
    PARAMETERS,
    PARAMETER,
    ARGUMENTS,
    ARGUMENT,
    STATEMENT,
    DECLARATION,
    COMMENT,
    JAVADOC,
    BLANKLINE,
    TOKEN,
    SYMBOL,
    KEYWORD,
    MODIFIED,
    PRIMITIVE,
    TYPE,
    METHODNAME,
    VARIABLE,
    NAME,
    METHODCALL,
    CONSTRUCTOR,
    OPERATOR,
    ASSIGNMENTOP,
    PREFIXOP,
    INFIXOP,
    POSTFIXOP,
    PREINFIXOP,
    PREPOSTFIXOP,
    SELECTOR,
    VALUE,
    FLOAT,
    INTEGER,
    NUMBER,
    HEX,
    OCTAL,
    DOUBLEQUOTE,
    QUOTE,
    STOP,
    TAB,
    SPACE,
    WHITESPACE,
    NEWLINE,
    LINECOMMENT,
    ESCAPED,
    OTHER
}

Array

C

#ifndef LIBTOKENIZER_ARRAY_H
#define LIBTOKENIZER_ARRAY_H

typedef struct _Array
{
    void** objects;
    int    length;
    int    size;

} Array;

Array* new_Array();
Array* free_Array( Array* self );
Array* Array_push   ( Array* self, void* object );
void*  Array_shift  ( Array* self );
Array* Array_unshift( Array* self, void* object );
int    Array_length ( Array* self );

#endif
#include <stdlib.h>
#include "libtokenizer/Array.h"
#include "libtokenizer/Runtime.h"

void Array_expand( Array* self )
{
    if ( 0 == self->size )
    {
        self->objects = (void**) Runtime_calloc( 1, sizeof( void* ) );
        self->size    = 1;
    }
    else
    {
        int new_size = self->size * 2;

        void** tmp = (void**) Runtime_calloc( new_size, sizeof( void* ) );

        for ( int i=0; i < self->length; i++ )
        {
            tmp[i] = self->objects[i];
        }

        Runtime_free( self->objects );

        self->objects = tmp;
        self->size    = new_size;
    }
}

Array* new_Array()
{
    return (Array*) Runtime_calloc( 1, sizeof( Array ) );
}

Array* free_Array( Array* self )
{
    for ( int i=0; i < self->length; i++ )
    {
        if ( self->objects[i] )
        {
            free( self->objects[i] );
            self->objects[i] = 0;
        }
    }

    Runtime_free( self->objects );

    self->length  = 0;
    self->size    = 0;
    self->objects = 0;

    self = Runtime_free( self );

    return self;
}

Array* Array_push( Array* self, void* object )
{
    if ( self->length == self->size )
    {
        Array_expand( self );
    }

    self->objects[self->length++] = object;

    return self;
}

void* Array_shift( Array* self )
{
    if ( self->length )
    {
        void* head = self->objects[0];

        for ( int i=1; i < self->length; i++ )
        {    
            self->objects[i-1] = self->objects[i];
            self->objects[i]   = 0;
        }
        self->length--;
        return head;
    }
    else
    {
        return NULL;
    }
}

Array* Array_unshift( Array* self, void* object )
{
    if ( self->length == self->size )
    {
        Array_expand( self );
    }

    for ( int i=self->length; 0 < i; i-- )
    {    
        self->objects[i]   = self->objects[i-1];
        self->objects[i-1] = 0;
    }
    self->objects[0] = object;

    self->length++;

    return self;
}

int Array_length( Array* self )
{
    return self->length;
}
#include <stdlib.h>
#include <stdio.h>
#include "libtokenizer/Array.h"
#include "libtokenizer/String.h"

int main( int argc, char** argv )
{
    Array* array  = new_Array();
    Array* target = new_Array();

    for ( int i=0; i < 100; i++ )
    {
        char* test = StringCopy( "test" );

        Array_push( array, test );
    }

    int len = Array_length( array );

    //fprintf( stdout, "Removing %i items\n", len );
    for ( int i=0; i < len; i++ )
    {
        char* test = (char*) Array_shift( array );

        //fprintf( stdout, "%2i: %s\n", i, test );

        Array_unshift( target, test );
    }
    len = Array_length( array );

    //fprintf( stdout, "%i items left\n", len );

    if ( 0 != len )
    {
        fprintf( stderr, "Unusual circumstance\n" );
        exit( -1 );
    }

    if ( (char*) Array_shift( array ) )
    {
        fprintf( stderr, "Unusual circumstance\n" );
        exit( -1 );
    }
    free_Array( array );

    len = Array_length( target );
    //fprintf( stdout, "Removing %i items from target\n", len );
    for ( int i=0; i < len; i++ )
    {
        char* test = (char*) Array_shift( target );

        //fprintf( stdout, "%2i: %s\n", i, test );

        free( test );
    }
    len = Array_length( target );

    //fprintf( stdout, "%i items left\n", len );

    if ( 0 != len )
    {
        fprintf( stderr, "Unusual circumstance\n" );
        exit( -1 );
    }

    if ( (char*) Array_shift( target ) )
    {
        fprintf( stderr, "Unusual circumstance\n" );
        exit( -1 );
    }
}
function Token( t, content, length, aGroup )
{
    this.t       = t;
    this.content = content;
    this.length  = content.length;
    this.group   = aGroup;
    this.type    = Token.DetermineTokenType( aGroup, content );
}

Token.prototype.free
=
function()
{}

Token.prototype.getContent
=
function()
{
    return this.content;

}

Token.prototype.getLength
=
function()
{
    return this.length;
}

Token.prototype.getTokenGroup
=
function()
{
    return this.group;
}

Token.prototype.getTokenType
=
function()
{
    return this.type;
}

Token.DetermineTokenType
=
function( group, content )
{
    var type = TokenType.UNKNOWN;

    switch( group )
    {
    case TokenGroup.WHITESPACE:
        type = Token.DetermineWhitespaceType( content );
        break;

    case TokenGroup.SYMBOLIC:
        type = Token.DetermineSymbolicType( content );
        break;

    case TokenGroup.ALPHANUMERIC:
        type = Token.DetermineAlhanumericType( content );
        break;

    case TokenGroup.VALUE:
        type = TokenType.VALUE;
        break;

    case TokenGroup.HEX_VALUE:
        type = TokenType.HEX;
        break;
    }
}

Base

#ifndef LIBTOKENIZER_BASE_H
#define LIBTOKENIZER_BASE_H

#ifndef bool
#define bool int
#endif

#ifndef TRUE
#define TRUE 1
#endif

#ifndef FALSE
#define FALSE 0
#endif

#endif
#ifndef LIBTOKENIZER_STRING_H
#define LIBTOKENIZER_STRING_H

#include "libtokenizer/Base.h"

typedef struct _String
{
    char* content;
    int   length;

} String;

String* new_String( const char* content );
String* free_String( String* self );

const char* String_content( const String* self );
int         String_length ( const String* self );
String*     String_copy   ( const String* self );
String*     String_cat    ( const String* self, const String* other );
bool        String_equals ( const String* self, const String* other );

int   StringLength( const char* s                  );
char* StringCopy  ( const char* s                  );
char* StringCat   ( const char* s1, const char* s2 );
bool  StringEquals( const char* s1, const char* s2 );

#endif
#include <stdlib.h>
#include <string.h>

#include "libtokenizer/Runtime.h"
#include "libtokenizer/String.h"

String* new_String( const char* content )
{
    String* self = Runtime_calloc( 1, sizeof( String ) );

    if ( self )
    {
        self->content = StringCopy  ( content );
        self->length  = StringLength( content );
    }
    return self;
}

String* free_String( String* self )
{
    if ( self )
    {
        free( self->content ); self->content = 0;
        self->length = 0;

        self = Runtime_free( self );
    }
    return self;
}

const char* String_content( const String* self )
{
    return self->content;
}

int String_length( const String* self )
{
    return self->length;
}

String* String_copy( const String* self )
{
    return new_String( self->content );
}

String* String_cat( const String* self, const String* other )
{
    char* tmp = StringCat( self->content, other->content );
    String* ret = new_String( tmp );
    free( tmp );

    return ret;
}

bool String_equals( const String* self, const String* other )
{
    return StringEquals( self->content, other->content );
}

int StringLength( const char* s )
{
    return strlen( s );
}

char* StringCopy( const char* s )
{
    int   len  = StringLength( s ) + 2;
    char* copy = calloc( len, sizeof( char ) );

    return strcpy( copy, s );
}

char* StringCat( const char* s1, const char* s2 )
{
    int len1 = StringLength( s1 );
    int len2 = StringLength( s2 );
    int len  = len1 + len2 + 1;

    char* concatenated = calloc( len, sizeof( char ) );

    int t=0;

    for ( int i=0; i < len1; i++ )
    {
        concatenated[t++] = s1[i];
    }

    for ( int i=0; i < len2; i++ )
    {
        concatenated[t++] = s2[i];
    }

    concatenated[t] = '\0';

    return concatenated;
}

bool StringEquals( const char* s1, const char* s2 )
{
    return (0 == strcmp( s1, s2 ));    
}
public class
{




}
#ifndef LIBTOKENIZER_INPUTSTREAM_H
#define LIBTOKENIZER_INPUTSTREAM_H

typedef struct _InputStream
{
    const char* filepath;
    void* f;

} InputStream;

InputStream* new_InputStream ( const char*  filepath );
InputStream* free_InputStream( InputStream* self     );
int          InputStream_read( InputStream* self     );

#endif
#include <stdio.h>

#include "Libtokenizer/InputStream.h"
#include "Libtokenizer/String.h"

InputStream* new_InputStream( const char* filepath )
{
    InputStream* self = calloc( 1, sizeof( InputStream ) );

    if ( self )
    {
        self->filepath = StringCopy( filepath );
    }
    return self;
}
function Enum( array )
{
    for ( var i in array )
    {
        this[array[i]] = array[i];
    }
}

File

C

#ifndef LIBTOKENIZER_FILE_H
#define LIBTOKENIZER_FILE_H

#include "libtokenizer/Base.h"

bool  File_Exists      ( const char* filepath );
char* File_Get_Contents( const char* filepath );

#endif
#include <stdlib.h>
#include <stdio.h>
#include <sys/stat.h>

#include "libtokenizer/File.h"

char* readline( FILE* stream );

bool File_Exists( const char* filepath )
{
    struct stat buf;

    return (0 == lstat( filepath, &buf ));
}

char* File_Get_Contents( const char* filepath )
{
    char* content = NULL;
    FILE* fp      = fopen( filepath, "r" );

    if ( fp )
    {
        struct stat buf;

        if( 0 == lstat( filepath, &buf ) )
        {
            int size = buf.st_size;

            content = calloc( size + 1, sizeof( char ) );

            int red = fread( content, size, 1, fp );
        }
    }
    return content;
}
#include <stdlib.h>
#include <stdio.h>

#include "libtokenizer/File.h"

int main( int argc, char** argv )
{
    const char* filepath = "./source/mt/1-Overview.txt";

    if ( ! File_Exists( filepath ) )
    {
        fprintf( stderr, "Could not file: %s\n", filepath );
        fflush( stderr );
    }
    else
    {
        char* content = File_Get_Contents( filepath );

        fprintf( stdout, "%s\n", content );

        free( content );
    }

    return 0;
}

Pushback Reader

#include <stdlib.h>
#include <stdio.h>

#include "libtokenizer/File.h"
#include "libtokenizer/PushbackReader.h"
#include "libtokenizer/String.h"

int main( int argc, char** argv )
{
    const char* filepath = "./source/mt/1-Overview.txt";

    char*           c = File_Get_Contents ( filepath );
    PushbackReader* r = new_PushbackReader( filepath );
    {
        int  len = StringLength( c );
        int  i   = 0;
        char ch;

        while ( 0 != (ch = PushbackReader_read( r )) )
        {
            if ( i == len )
            {
                fprintf( stderr, "Exceeded filelength!!!" );
                exit( -1 );
            }

            if ( c[i] != ch )
            {
                fprintf( stderr, "Character mismatch: %x != %x\n", c[i], ch );
                exit( -1 );
            }

            fprintf( stdout, "#" );

            int rnum = rand();

            if ( rnum < (RAND_MAX/2) )
            {
                fprintf( stdout, "<>" );

                PushbackReader_pushback( r );
                PushbackReader_pushback( r );
                PushbackReader_pushback( r );

                PushbackReader_read( r );
                PushbackReader_read( r );
                PushbackReader_read( r );
            }
            i++;
        }
        fprintf( stdout, "\n" );
    }
    free_PushbackReader( r );

    return 0;
}
#ifndef LIBTOKENIZER_PUSHBACKREADER_H
#define LIBTOKENIZER_PUSHBACKREADER_H

typedef struct _PushbackReader
{
    char* content;
    int   head;
    int   length;

} PushbackReader;

PushbackReader* new_PushbackReader     ( const char*     filepath );
PushbackReader* free_PushbackReader    ( PushbackReader* self     );
int             PushbackReader_read    ( PushbackReader* self     );
PushbackReader* PushbackReader_pushback( PushbackReader* self     );

#endif
#include <stdlib.h>
#include <stdio.h>
#include "libtokenizer/File.h"
#include "libtokenizer/PushbackReader.h"
#include "libtokenizer/Runtime.h"
#include "libtokenizer/String.h"

PushbackReader* new_PushbackReader( const char* filepath )
{
    PushbackReader* self = Runtime_calloc( 1, sizeof( PushbackReader ) );

    if ( self )
    {
        self->head = 0;

        if ( File_Exists( filepath ) )
        {
            self->content = File_Get_Contents( filepath );
            self->length  = StringLength( self->content );
        }
        else
        {
            self->content = StringCopy( "" );
            self->length  = 0;
        }
    }
    return self;
}

PushbackReader* free_PushbackReader( PushbackReader* self )
{
    if ( self )
    {
        free( self->content ); self->content = 0;
        self->length = 0;

        Runtime_free( self );
    }
    return 0;
}

int PushbackReader_read( PushbackReader* self )
{
    return (self && (self->head < self->length)) ? self->content[self->head++] : 0;
}

PushbackReader* PushbackReader_pushback( PushbackReader* self )
{
    self->head--;
    return self;
}
function PushbackReader( content )
{
    this.content  = content;
    this.head     = 0;
    this.delta    = 0;
}
PushbackReader.prototype.read
=
function()
{
    var ch = this.content[this.head++];

    this.delta = 1;

    if ( "\\" == ch )
    {
        ch += this.content[this.head++];
        this.delta = 2;
    }
    else
    if ( "'" == ch )
    {
        if ( this.content[this.head] )
        {
            ch += this.content[this.head++];
            this.delta++;
        }

        if ( this.content[this.head] )
        {
            ch += this.content[this.head++];
            this.delta++;
        }
    }

    return ch;
}
PushbackReader.prototype.pushback
=
function()
{
    this.head -= this.delta;

    this.delta = 0;
}

Queue

C

#ifndef LIBTOKENIZER_QUEUE_H
#define LIBTOKENIZER_QUEUE_H

#include "libtokenizer/Array.h"

typedef struct _Queue
{
    Array* inner;

} Queue;

Queue* new_Queue       ();
Queue* free_Queue      ( Queue* self );
Queue* Queue_addHead   ( Queue* self, void* object );
Queue* Queue_addTail   ( Queue* self, void* object );
void*  Queue_removeHead( Queue* self );
int    Queue_getLength ( Queue* self );

#endif
#include <stdlib.h>
#include "libtokenizer/Queue.h"
#include "libtokenizer/Runtime.h"

Queue* new_Queue()
{
    Queue* self = Runtime_calloc( 1, sizeof( Queue ) );

    if ( self )
    {
        self->inner = new_Array();
    }
    return self;
}

Queue* free_Queue( Queue* self )
{
    if ( self )
    {
        free_Array( self->inner );
    }
    Runtime_free( self );

    return 0;
}

Queue* Queue_addHead( Queue* self, void* object )
{
    Array_unshift( self->inner, object );

    return self;
}

Queue* Queue_addTail( Queue* self, void* object )
{
    Array_push( self->inner, object );

    return self;
}

void* Queue_removeHead( Queue* self )
{
    return Array_shift( self->inner );
}

int Queue_getLength( Queue* self )
{
    return Array_length( self->inner );
}
#include <stdlib.h>
#include <stdio.h>
#include "libtokenizer/Queue.h"
#include "libtokenizer/String.h"

int main( int argc, char** argv )
{
    Queue* q1 = new_Queue();
    Queue* q2 = new_Queue();

    for ( int i=0; i < 100; i++ )
    {
        char* test = StringCopy( "test" );

        Queue_addTail( q1, test );
    }

    int len = Queue_getLength( q1 );

    fprintf( stdout, "Removing %i items\n", len );
    for ( int i=0; i < len; i++ )
    {
        char* test = (char*) Queue_removeHead( q1 );

        fprintf( stdout, "%2i: %s\n", i, test );

        Queue_addHead( q2, test );
    }
    len = Queue_getLength( q1 );

    fprintf( stdout, "%i items left\n", len );

    q1 = free_Queue( q1 );

    len = Queue_getLength( q2 );
    fprintf( stdout, "Removing %i items from target\n", len );
    for ( int i=0; i < len; i++ )
    {
        char* test = (char*) Queue_removeHead( q2 );

        fprintf( stdout, "%2i: %s\n", i, test );

        free( test );
    }
    len = Queue_getLength( q2 );

    fprintf( stdout, "%i items left\n", len );

    if ( 0 != len )
    {
        fprintf( stderr, "Unusual circumstance: length\n" );
        exit( -1 );
    }

    if ( (char*) Queue_removeHead( q2 ) )
    {
        fprintf( stderr, "Unusual circumstance: head\n" );
        exit( -1 );
    }        

    fprintf( stdout, "%i items left\n", len );

    q2 = free_Queue( q2 );
}

Javascript

function Queue()
{
    this.inner = Array();
}

Queue.prototype.addTail
=
function( object )
{
    this.inner.push( object );
}

Queue.prototype.removeHead
=
function()
{
    return this.inner.shift();
}

Queue.prototype.addHead
=
function( object )
{
    this.inner.unshift( object );
}

Queue.prototype.getLength
=
function()
{
    return this.inner.length;
}
#ifndef LIBTOKENIZER_RUNTIME_H
#define LIBTOKENIZER_RUNTIME_H

void* Runtime_calloc( size_t count, size_t size );
void* Runtime_free  ( void* ptr  );
int   Runtime_allocated();

#endif
#include <stdlib.h>
#include "libtokenizer/Base.h"
#include "libtokenizer/Runtime.h"

static int allocated = 0;

void* Runtime_calloc( size_t count, size_t size )
{
    allocated++;

    return calloc( count, size );
}

void* Runtime_free( void* ptr  )
{
    allocated--;

    free( ptr );

    return NULL;
}

int Runtime_allocated()
{
    return allocated;
}

String Buffer

public class
{
    @content: string*
}

public append( suffix: string& )
{
    @content = @content.concat( suffix );
}
#ifndef LIBTOKENIZER_STRINGBUFFER_H
#define LIBTOKENIZER_STRINGBUFFER_H

#include "libtokenizer/Base.h"

typedef struct _StringBuffer
{
    char* content;
    int   length;

} StringBuffer;

StringBuffer* new_StringBuffer        ();
StringBuffer* free_StringBuffer       ( StringBuffer* self                     );
StringBuffer* StringBuffer_append     ( StringBuffer* self, const char* suffix );
StringBuffer* StringBuffer_append_char( StringBuffer* self, char        ch     );
const char*   StringBuffer_content    ( StringBuffer* self                     );
bool          StringBuffer_isEmpty    ( StringBuffer* self                     );

#endif
#include <stdlib.h>
#include "libtokenizer/Runtime.h"
#include "libtokenizer/String.h"
#include "libtokenizer/StringBuffer.h"

StringBuffer* new_StringBuffer()
{
    StringBuffer* self = Runtime_calloc( 1, sizeof( StringBuffer ) );

    if ( self )
    {
        self->content = StringCopy( "" );
        self->length  = 0;
    }
    return self;
}

StringBuffer* free_StringBuffer( StringBuffer* self )
{
    free( self->content );
    self->length = 0;
    Runtime_free( self );

    return 0;
}

StringBuffer* StringBuffer_append( StringBuffer* self, const char* suffix )
{
    self->length += StringLength( suffix );
    char* tmp = self->content;
    self->content = StringCat( tmp, suffix );
    free( tmp );

    return self;
}

StringBuffer* StringBuffer_append_char( StringBuffer* self, char ch )
{
    char suffix[2] = { ch , '\0' };

    return StringBuffer_append( self, suffix );
}

const char* StringBuffer_content( StringBuffer* self )
{
    return self->content;
}

bool StringBuffer_isEmpty( StringBuffer* self )
{
    return (0 == StringLength( self->content ));
}
#include <stdio.h>
#include "libtokenizer/StringBuffer.h"

int main( int argc, char** argv )
{
    StringBuffer* sb = new_StringBuffer();

    for ( int i=0; i < 10; i++ )
    {
        StringBuffer_append( sb, "test" );

        const char* content = StringBuffer_content( sb );

        fprintf( stdout, "%2i: %s\n", i, content );
    }

    free_StringBuffer( sb );
}
public class StringBuffer {

    java.lang.StringBuffer inner;
function StringBuffer()
{
    this.inner = "";
}

StringBuffer.prototype.append
=
function( string )
{
    this.inner += string;
}
#ifndef LIBTOKENIZER_TERM_H
#define LIBTOKENIZER_TERM_H

#define COLOR_NORMAL   "\033[00m"
#define COLOR_BOLD     "\033[01m"
#define COLOR_LIGHT    "\033[02m"
#define COLOR_STRING   "\033[33m"
#define COLOR_TYPE     "\033[36m"
#define COLOR_MODIFIER "\033[94m"
#define COLOR_VALUE    "\033[33m"
#define COLOR_CHAR     "\033[33m"
#define COLOR_COMMENT  "\033[32m"
#define COLOR_UNKNOWN  "\033[41m"

void Term_Colour( void* stream, const char* color );

#endif
#include <stdio.h>

void Term_Colour( void* stream, const char* color )
{
    fprintf( stream, "%s", color );
}