__file__ = 'ky_token.py' __usage__ = ''' = token( token type, token value, column index, line number, previous token, next token ) token type - eg. token.NUMBER, token.IDENT, etc. token val - eg. 12, 'test', ie. any arbitrary value column idx - eg. 3 ie. lexical position of token line num - eg. 2 ie. line # of token previous - previous token in stream next - next token in stream''' __description__ = '''This class is the basic token class returned by the lexical scanner. This may become a base class whereby all tokens are sub classes. Basically just a data storage class.''' __author__ = 'Timothy Wakeham (timmeh)' __contact__ = 'timmeh@hiddenworlds.org' __updated__ = '28-03-2008 @ 14:57:40' __todo__ = None #start class Token: ID = 'KY_TOKEN' NUMBER = 1 LBRACKET = 2 RBRACKET = 3 OPERATOR = 4 IDENT = 5 KEYWORD = 6 UNKNOWN = 7 EOF = 8 UNOP = 9 END = 0 MAPPING = ['End', 'Number', 'Left Bracket', 'Right Bracket', 'Operator', 'Identifier', 'Keyword', 'Unknown', 'EOF', 'Unary Op'] def __init__(self, tok_type, tok_val, idx=None, line=None, prev=None, next=None): # token details self.token_type = tok_type self.token_value = tok_val # lexer state self.idx = idx self.line = line # pointers to context tokens self.prev = prev self.next= next def next(self): return self.next def previous(self): return self.prev def __str__(self): return '' % (Token.MAPPING[self.token_type], self.token_value)