PEP8 compliance, reduce munging on tagged names

This commit is contained in:
Ryan Hitchman 2010-03-27 02:42:27 -06:00
parent 9a9922a3ca
commit 2a11825779
18 changed files with 2652 additions and 2652 deletions

2
bot.py
View File

@ -41,7 +41,7 @@ try:
except Exception, e:
print 'ERROR: malformed config file', Exception, e
sys.exit()
bot.persist_dir = os.path.abspath('persist')
if not os.path.exists(bot.persist_dir):
os.mkdir(bot.persist_dir)

View File

@ -17,7 +17,7 @@ class Input(dict):
conn.msg(chan, msg)
def reply(msg):
if chan == nick: # PMs don't need prefixes
if chan == nick: # PMs don't need prefixes
conn.msg(chan, msg)
else:
conn.msg(chan, nick + ': ' + msg)
@ -68,7 +68,7 @@ def do_sieve(sieve, bot, input, func, type, args):
traceback.print_exc()
return None
class Handler(object):
'''Runs plugins in their own threads (ensures order)'''
def __init__(self, func):
@ -106,12 +106,12 @@ def dispatch(input, kind, func, args):
input = do_sieve(sieve, bot, input, func, kind, args)
if input == None:
return
if func._thread:
bot.threads[func].put(input)
else:
thread.start_new_thread(run, (func, input))
def main(conn, out):
inp = Input(conn, *out)
@ -120,14 +120,13 @@ def main(conn, out):
for func, args in bot.events[inp.command] + bot.events['*']:
dispatch(Input(conn, *out), "event", func, args)
if inp.command == 'PRIVMSG':
# COMMANDS
if inp.chan == inp.nick: # private message, no command prefix
if inp.chan == inp.nick: # private message, no command prefix
prefix = r'^(?:[.!]?|'
else:
prefix = r'^(?:[.!]|'
command_re = prefix + inp.conn.nick
command_re += r'[:,]*\s+)(\w+)(?:$|\s+)(.*)'
@ -139,10 +138,10 @@ def main(conn, out):
input = Input(conn, *out)
input.inp_unstripped = m.group(2)
input.inp = m.group(2).strip()
func, args = bot.commands[command]
dispatch(input, "command", func, args)
# REGEXES
for func, args in bot.plugs['regex']:
m = args['re'].search(inp.lastparam)

View File

@ -28,11 +28,9 @@ def format_plug(plug, kind='', lpad=0, width=40):
if kind == 'regex':
out += ' ' * (50 - len(out)) + plug[1]['regex']
return out
def reload(init=False):
changed = False
@ -41,12 +39,12 @@ def reload(init=False):
bot.threads = {}
core_fileset = set(glob.glob(os.path.join("core", "*.py")))
for filename in core_fileset:
mtime = os.stat(filename).st_mtime
if mtime != mtimes.get(filename):
mtimes[filename] = mtime
changed = True
try:
@ -82,7 +80,7 @@ def reload(init=False):
mtime = os.stat(filename).st_mtime
if mtime != mtimes.get(filename):
mtimes[filename] = mtime
changed = True
try:
@ -95,7 +93,7 @@ def reload(init=False):
# remove plugins already loaded from this filename
for name, data in bot.plugs.iteritems():
bot.plugs[name] = [x for x in data
bot.plugs[name] = [x for x in data
if x[0]._filename != filename]
for func, handler in list(bot.threads.iteritems()):
@ -107,7 +105,7 @@ def reload(init=False):
if hasattr(obj, '_hook'): # check for magic
if obj._thread:
bot.threads[obj] = Handler(obj)
for type, data in obj._hook:
bot.plugs[type] += [data]
@ -141,7 +139,7 @@ def reload(init=False):
if bot.commands:
# hack to make commands with multiple aliases
# print nicely
print ' command:'
commands = collections.defaultdict(list)
@ -149,11 +147,11 @@ def reload(init=False):
commands[make_signature(func)].append(name)
for sig, names in sorted(commands.iteritems()):
names.sort(key=lambda x: (-len(x), x)) # long names first
names.sort(key=lambda x: (-len(x), x)) # long names first
out = ' ' * 6 + '%s:%s:%s' % sig
out += ' ' * (50 - len(out)) + ', '.join(names)
print out
for kind, plugs in sorted(bot.plugs.iteritems()):
if kind == 'command':
continue

View File

@ -10,7 +10,7 @@ from util import hook
api_url = "http://encyclopediadramatica.com/api.php?action=opensearch&search="
ed_url = "http://encyclopediadramatica.com/"
ua_header = ('User-Agent','Skybot/1.0 http://bitbucket.org/Scaevolus/skybot/')
ua_header = ('User-Agent', 'Skybot/1.0 http://bitbucket.org/Scaevolus/skybot/')
@hook.command('ed')
@ -20,7 +20,7 @@ def drama(inp):
'''article on <phrase>'''
if not inp:
return drama.__doc__
q = api_url + (urllib2.quote(inp, safe=''))
request = urllib2.Request(q)
request.add_header(*ua_header)
@ -28,12 +28,12 @@ def drama(inp):
if not j[1]:
return 'no results found'
article_name = j[1][0].replace(' ', '_')
url = ed_url + (urllib2.quote(article_name))
request = urllib2.Request(url)
request.add_header(*ua_header)
page = html.fromstring(urllib2.build_opener().open(request).read())
for p in page.xpath('//div[@id="bodyContent"]/p'):
if p.text_content():
summary = ' '.join(p.text_content().splitlines())

View File

@ -11,11 +11,12 @@ from util import hook
# say('(--[. ]-[ .] /')
# say('(_______o__)')
@hook.command
@hook.command('gs')
def goonsay(inp):
".gs/.goonsay <id|add [message]> -- Get's the goonsay.com result for <id> or "
"add a new :goonsay: to the database. If no arg it will get a random result."
".gs/.goonsay <id|add [message]> -- Get's the goonsay.com result for <id> "
" or add a new :goonsay: to the database. With no args, random result."
url = "http://goonsay.com/api/goonsays"
@ -44,7 +45,8 @@ def goonsay(inp):
if len(inp):
try:
req = urllib2.Request('%s/%d/' % (url, int(inp)), None, req_headers)
req = urllib2.Request('%s/%d/' % (url, int(inp)), None,
req_headers)
j = json.loads(urllib2.urlopen(req).read())
except urllib2.HTTPError, e:
if e.code == 410 or e.code == 404:

View File

@ -14,7 +14,7 @@ def mem(inp):
status = dict(line_pairs)
keys = 'VmSize VmLib VmData VmExe VmRSS VmStk'.split()
return ', '.join(key + ':' + status[key] for key in keys)
elif os.name == 'nt':
cmd = "tasklist /FI \"PID eq %s\" /FO CSV /NH" % os.getpid()
out = os.popen(cmd).read()

View File

@ -28,7 +28,6 @@ def mtg(inp):
text = re.sub(r'\(.*?\)', '', text) # strip parenthetical explanations
text = re.sub(r'\.(\S)', r'. \1', text) # fix spacing
printings = card.find('td/small').text_content()
printings = re.search(r'Editions:(.*)Languages:', printings).group(1)
printings = re.findall(r'\s*(.+?(?: \([^)]+\))*) \((.*?)\)',

View File

@ -1,98 +1,98 @@
#-----------------------------------------------------------------
# pycparser: cdecl.py
#
# Example of the CDECL tool using pycparser. CDECL "explains"
# C type declarations in plain English.
#
# The AST generated by pycparser from the given declaration is
# traversed recursively to build the explanation.
# Note that the declaration must be a valid external declaration
# in C. All the types used in it must be defined with typedef,
# or parsing will fail. The definition can be arbitrary, it isn't
# really used - by pycparser must know which tokens are types.
#
# For example:
#
# 'typedef int Node; const Node* (*ar)[10];'
# =>
# ar is a pointer to array[10] of pointer to const Node
#
# Copyright (C) 2008, Eli Bendersky
# License: LGPL
#-----------------------------------------------------------------
import sys
from pycparser import c_parser, c_ast
def explain_c_declaration(c_decl):
""" Parses the declaration in c_decl and returns a text
explanation as a string.
The last external node of the string is used, to allow
earlier typedefs for used types.
"""
parser = c_parser.CParser()
node = parser.parse(c_decl, filename='<stdin>')
if ( not isinstance(node, c_ast.FileAST) or
not isinstance(node.ext[-1], c_ast.Decl)):
return "Last external node is invalid type"
return _explain_decl_node(node.ext[-1])
def _explain_decl_node(decl_node):
""" Receives a c_ast.Decl note and returns its explanation in
English.
"""
#~ print decl_node.show()
storage = ' '.join(decl_node.storage) + ' ' if decl_node.storage else ''
return (decl_node.name +
" is a " +
storage +
_explain_type(decl_node.type))
def _explain_type(decl):
""" Recursively explains a type decl node
"""
typ = type(decl)
if typ == c_ast.TypeDecl:
quals = ' '.join(decl.quals) + ' ' if decl.quals else ''
return quals + _explain_type(decl.type)
elif typ == c_ast.Typename or typ == c_ast.Decl:
return _explain_type(decl.type)
elif typ == c_ast.IdentifierType:
return ' '.join(decl.names)
elif typ == c_ast.PtrDecl:
quals = ' '.join(decl.quals) + ' ' if decl.quals else ''
return quals + 'pointer to ' + _explain_type(decl.type)
elif typ == c_ast.ArrayDecl:
arr = 'array'
if decl.dim: arr += '[%s]' % decl.dim.value
return arr + " of " + _explain_type(decl.type)
elif typ == c_ast.FuncDecl:
if decl.args:
params = [_explain_type(param) for param in decl.args.params]
args = ', '.join(params)
else:
args = ''
return ('function(%s) returning ' % (args) +
_explain_type(decl.type))
if __name__ == "__main__":
if len(sys.argv) > 1:
c_decl = sys.argv[1]
else:
c_decl = "char *(*(**foo[][8])())[];"
print "Explaining the declaration:", c_decl
print "\n", explain_c_declaration(c_decl)
#-----------------------------------------------------------------
# pycparser: cdecl.py
#
# Example of the CDECL tool using pycparser. CDECL "explains"
# C type declarations in plain English.
#
# The AST generated by pycparser from the given declaration is
# traversed recursively to build the explanation.
# Note that the declaration must be a valid external declaration
# in C. All the types used in it must be defined with typedef,
# or parsing will fail. The definition can be arbitrary, it isn't
# really used - by pycparser must know which tokens are types.
#
# For example:
#
# 'typedef int Node; const Node* (*ar)[10];'
# =>
# ar is a pointer to array[10] of pointer to const Node
#
# Copyright (C) 2008, Eli Bendersky
# License: LGPL
#-----------------------------------------------------------------
import sys
from pycparser import c_parser, c_ast
def explain_c_declaration(c_decl):
""" Parses the declaration in c_decl and returns a text
explanation as a string.
The last external node of the string is used, to allow
earlier typedefs for used types.
"""
parser = c_parser.CParser()
node = parser.parse(c_decl, filename='<stdin>')
if ( not isinstance(node, c_ast.FileAST) or
not isinstance(node.ext[-1], c_ast.Decl)):
return "Last external node is invalid type"
return _explain_decl_node(node.ext[-1])
def _explain_decl_node(decl_node):
""" Receives a c_ast.Decl note and returns its explanation in
English.
"""
#~ print decl_node.show()
storage = ' '.join(decl_node.storage) + ' ' if decl_node.storage else ''
return (decl_node.name +
" is a " +
storage +
_explain_type(decl_node.type))
def _explain_type(decl):
""" Recursively explains a type decl node
"""
typ = type(decl)
if typ == c_ast.TypeDecl:
quals = ' '.join(decl.quals) + ' ' if decl.quals else ''
return quals + _explain_type(decl.type)
elif typ == c_ast.Typename or typ == c_ast.Decl:
return _explain_type(decl.type)
elif typ == c_ast.IdentifierType:
return ' '.join(decl.names)
elif typ == c_ast.PtrDecl:
quals = ' '.join(decl.quals) + ' ' if decl.quals else ''
return quals + 'pointer to ' + _explain_type(decl.type)
elif typ == c_ast.ArrayDecl:
arr = 'array'
if decl.dim: arr += '[%s]' % decl.dim.value
return arr + " of " + _explain_type(decl.type)
elif typ == c_ast.FuncDecl:
if decl.args:
params = [_explain_type(param) for param in decl.args.params]
args = ', '.join(params)
else:
args = ''
return ('function(%s) returning ' % (args) +
_explain_type(decl.type))
if __name__ == "__main__":
if len(sys.argv) > 1:
c_decl = sys.argv[1]
else:
c_decl = "char *(*(**foo[][8])())[];"
print "Explaining the declaration:", c_decl
print "\n", explain_c_declaration(c_decl)

View File

@ -1,75 +1,75 @@
#-----------------------------------------------------------------
# pycparser: __init__.py
#
# This package file exports some convenience functions for
# interacting with pycparser
#
# Copyright (C) 2008-2009, Eli Bendersky
# License: LGPL
#-----------------------------------------------------------------
__all__ = ['c_lexer', 'c_parser', 'c_ast']
__version__ = '1.05'
from subprocess import Popen, PIPE
from types import ListType
from c_parser import CParser
def parse_file( filename, use_cpp=False,
cpp_path='cpp', cpp_args=''):
""" Parse a C file using pycparser.
filename:
Name of the file you want to parse.
use_cpp:
Set to True if you want to execute the C pre-processor
on the file prior to parsing it.
cpp_path:
If use_cpp is True, this is the path to 'cpp' on your
system. If no path is provided, it attempts to just
execute 'cpp', so it must be in your PATH.
cpp_args:
If use_cpp is True, set this to the command line
arguments strings to cpp. Be careful with quotes -
it's best to pass a raw string (r'') here.
For example:
r'-I../utils/fake_libc_include'
If several arguments are required, pass a list of
strings.
When successful, an AST is returned. ParseError can be
thrown if the file doesn't parse successfully.
Errors from cpp will be printed out.
"""
if use_cpp:
path_list = [cpp_path]
if isinstance(cpp_args, ListType):
path_list += cpp_args
elif cpp_args != '':
path_list += [cpp_args]
path_list += [filename]
# Note the use of universal_newlines to treat all newlines
# as \n for Python's purpose
#
pipe = Popen( path_list,
stdout=PIPE,
universal_newlines=True)
text = pipe.communicate()[0]
else:
text = open(filename).read()
parser = CParser()
return parser.parse(text, filename)
if __name__ == "__main__":
pass
#-----------------------------------------------------------------
# pycparser: __init__.py
#
# This package file exports some convenience functions for
# interacting with pycparser
#
# Copyright (C) 2008-2009, Eli Bendersky
# License: LGPL
#-----------------------------------------------------------------
__all__ = ['c_lexer', 'c_parser', 'c_ast']
__version__ = '1.05'
from subprocess import Popen, PIPE
from types import ListType
from c_parser import CParser
def parse_file( filename, use_cpp=False,
cpp_path='cpp', cpp_args=''):
""" Parse a C file using pycparser.
filename:
Name of the file you want to parse.
use_cpp:
Set to True if you want to execute the C pre-processor
on the file prior to parsing it.
cpp_path:
If use_cpp is True, this is the path to 'cpp' on your
system. If no path is provided, it attempts to just
execute 'cpp', so it must be in your PATH.
cpp_args:
If use_cpp is True, set this to the command line
arguments strings to cpp. Be careful with quotes -
it's best to pass a raw string (r'') here.
For example:
r'-I../utils/fake_libc_include'
If several arguments are required, pass a list of
strings.
When successful, an AST is returned. ParseError can be
thrown if the file doesn't parse successfully.
Errors from cpp will be printed out.
"""
if use_cpp:
path_list = [cpp_path]
if isinstance(cpp_args, ListType):
path_list += cpp_args
elif cpp_args != '':
path_list += [cpp_args]
path_list += [filename]
# Note the use of universal_newlines to treat all newlines
# as \n for Python's purpose
#
pipe = Popen( path_list,
stdout=PIPE,
universal_newlines=True)
text = pipe.communicate()[0]
else:
text = open(filename).read()
parser = CParser()
return parser.parse(text, filename)
if __name__ == "__main__":
pass

File diff suppressed because it is too large Load Diff

View File

@ -16,21 +16,21 @@ from ply.lex import TOKEN
class CLexer(object):
""" A lexer for the C language. After building it, set the
input text with input(), and call token() to get new
input text with input(), and call token() to get new
tokens.
The public attribute filename can be set to an initial
filaneme, but the lexer will update it upon #line
filaneme, but the lexer will update it upon #line
directives.
"""
def __init__(self, error_func, type_lookup_func):
""" Create a new Lexer.
error_func:
An error function. Will be called with an error
message, line and column as arguments, in case of
message, line and column as arguments, in case of
an error during lexing.
type_lookup_func:
A type lookup function. Given a string, it must
return True IFF this string is a name of a type
@ -39,7 +39,7 @@ class CLexer(object):
self.error_func = error_func
self.type_lookup_func = type_lookup_func
self.filename = ''
# Allow either "# line" or "# <num>" to support GCC's
# cpp output
#
@ -47,8 +47,8 @@ class CLexer(object):
def build(self, **kwargs):
""" Builds the lexer from the specification. Must be
called after the lexer object is created.
called after the lexer object is created.
This method exists separately, because the PLY
manual warns against calling lex.lex inside
__init__
@ -62,13 +62,13 @@ class CLexer(object):
def input(self, text):
self.lexer.input(text)
def token(self):
g = self.lexer.token()
return g
######################-- PRIVATE --######################
##
## Internal auxiliary methods
##
@ -76,26 +76,26 @@ class CLexer(object):
location = self._make_tok_location(token)
self.error_func(msg, location[0], location[1])
self.lexer.skip(1)
def _find_tok_column(self, token):
i = token.lexpos
while i > 0:
if self.lexer.lexdata[i] == '\n': break
i -= 1
return (token.lexpos - i) + 1
def _make_tok_location(self, token):
return (token.lineno, self._find_tok_column(token))
##
## Reserved keywords
##
keywords = (
'AUTO', 'BREAK', 'CASE', 'CHAR', 'CONST', 'CONTINUE',
'DEFAULT', 'DO', 'DOUBLE', 'ELSE', 'ENUM', 'EXTERN',
'AUTO', 'BREAK', 'CASE', 'CHAR', 'CONST', 'CONTINUE',
'DEFAULT', 'DO', 'DOUBLE', 'ELSE', 'ENUM', 'EXTERN',
'FLOAT', 'FOR', 'GOTO', 'IF', 'INT', 'LONG', 'REGISTER',
'RETURN', 'SHORT', 'SIGNED', 'SIZEOF', 'STATIC', 'STRUCT',
'SWITCH', 'TYPEDEF', 'UNION', 'UNSIGNED', 'VOID',
'RETURN', 'SHORT', 'SIGNED', 'SIZEOF', 'STATIC', 'STRUCT',
'SWITCH', 'TYPEDEF', 'UNION', 'UNSIGNED', 'VOID',
'VOLATILE', 'WHILE',
)
@ -108,35 +108,35 @@ class CLexer(object):
##
tokens = keywords + (
# Identifiers
'ID',
# Type identifiers (identifiers previously defined as
'ID',
# Type identifiers (identifiers previously defined as
# types with typedef)
'TYPEID',
# constants
# constants
'INT_CONST_DEC', 'INT_CONST_OCT', 'INT_CONST_HEX',
'FLOAT_CONST',
'FLOAT_CONST',
'CHAR_CONST',
'WCHAR_CONST',
# String literals
'STRING_LITERAL',
'WSTRING_LITERAL',
# Operators
# Operators
'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MOD',
'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
'LOR', 'LAND', 'LNOT',
'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',
# Assignment
'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL',
'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL',
'PLUSEQUAL', 'MINUSEQUAL',
'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL',
'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL',
'OREQUAL',
# Increment/decrement
# Increment/decrement
'PLUSPLUS', 'MINUSMINUS',
# Structure dereference (->)
@ -144,18 +144,18 @@ class CLexer(object):
# Conditional operator (?)
'CONDOP',
# Delimeters
# Delimeters
'LPAREN', 'RPAREN', # ( )
'LBRACKET', 'RBRACKET', # [ ]
'LBRACE', 'RBRACE', # { }
'LBRACE', 'RBRACE', # { }
'COMMA', 'PERIOD', # . ,
'SEMI', 'COLON', # ; :
# Ellipsis (...)
'ELLIPSIS',
# pre-processor
# pre-processor
'PPHASH', # '#'
)
@ -172,7 +172,7 @@ class CLexer(object):
decimal_constant = '(0'+integer_suffix_opt+')|([1-9][0-9]*'+integer_suffix_opt+')'
octal_constant = '0[0-7]*'+integer_suffix_opt
hex_constant = '0[xX][0-9a-fA-F]+'+integer_suffix_opt
bad_octal_constant = '0[0-7]*[89]'
# character constants (K&R2: A.2.5.2)
@ -185,14 +185,14 @@ class CLexer(object):
bad_escape = r"""([\\][^a-zA-Z\\?'"x0-7])"""
escape_sequence = r"""(\\("""+simple_escape+'|'+octal_escape+'|'+hex_escape+'))'
cconst_char = r"""([^'\\\n]|"""+escape_sequence+')'
cconst_char = r"""([^'\\\n]|"""+escape_sequence+')'
char_const = "'"+cconst_char+"'"
wchar_const = 'L'+char_const
unmatched_quote = "('"+cconst_char+"*\\n)|('"+cconst_char+"*$)"
bad_char_const = r"""('"""+cconst_char+"""[^'\n]+')|('')|('"""+bad_escape+r"""[^'\n]*')"""
# string literals (K&R2: A.2.6)
string_char = r"""([^"\\\n]|"""+escape_sequence+')'
string_char = r"""([^"\\\n]|"""+escape_sequence+')'
string_literal = '"'+string_char+'*"'
wstring_literal = 'L'+string_literal
bad_string_literal = '"'+string_char+'*'+bad_escape+string_char+'*"'
@ -207,15 +207,15 @@ class CLexer(object):
##
states = (
# ppline: preprocessor line directives
#
#
('ppline', 'exclusive'),
)
def t_PPHASH(self, t):
r'[ \t]*\#'
m = self.line_pattern.match(
t.lexer.lexdata, pos=t.lexer.lexpos)
if m:
t.lexer.begin('ppline')
self.pp_line = self.pp_filename = None
@ -223,7 +223,7 @@ class CLexer(object):
else:
t.type = 'PPHASH'
return t
##
## Rules for the ppline state
##
@ -246,21 +246,21 @@ class CLexer(object):
def t_ppline_NEWLINE(self, t):
r'\n'
if self.pp_line is None:
self._error('line number missing in #line', t)
else:
self.lexer.lineno = int(self.pp_line)
if self.pp_filename is not None:
self.filename = self.pp_filename
t.lexer.begin('INITIAL')
def t_ppline_PPLINE(self, t):
r'line'
pass
t_ppline_ignore = ' \t'
def t_ppline_error(self, t):
@ -336,8 +336,8 @@ class CLexer(object):
t_ELLIPSIS = r'\.\.\.'
t_STRING_LITERAL = string_literal
# The following floating and integer constants are defined as
# The following floating and integer constants are defined as
# functions to impose a strict order (otherwise, decimal
# is placed before the others because its regex is longer,
# and this is bad)
@ -363,17 +363,17 @@ class CLexer(object):
def t_INT_CONST_DEC(self, t):
return t
# Must come before bad_char_const, to prevent it from
# Must come before bad_char_const, to prevent it from
# catching valid char constants as invalid
#
#
@TOKEN(char_const)
def t_CHAR_CONST(self, t):
return t
@TOKEN(wchar_const)
def t_WCHAR_CONST(self, t):
return t
@TOKEN(unmatched_quote)
def t_UNMATCHED_QUOTE(self, t):
msg = "Unmatched '"
@ -387,23 +387,23 @@ class CLexer(object):
@TOKEN(wstring_literal)
def t_WSTRING_LITERAL(self, t):
return t
# unmatched string literals are caught by the preprocessor
@TOKEN(bad_string_literal)
def t_BAD_STRING_LITERAL(self, t):
msg = "String contains invalid escape code"
msg = "String contains invalid escape code"
self._error(msg, t)
@TOKEN(identifier)
def t_ID(self, t):
t.type = self.keyword_map.get(t.value, "ID")
if t.type == 'ID' and self.type_lookup_func(t.value):
t.type = "TYPEID"
return t
def t_error(self, t):
msg = 'Illegal character %s' % repr(t.value[0])
self._error(msg, t)
@ -412,32 +412,32 @@ class CLexer(object):
if __name__ == "__main__":
filename = '../zp.c'
text = open(filename).read()
#~ text = '"'+r"""ka \p ka"""+'"'
text = r"""
546
#line 66 "kwas\df.h"
#line 66 "kwas\df.h"
id 4
# 5
# 5
dsf
"""
def errfoo(msg, a, b):
print msg
sys.exit()
def typelookup(namd):
return False
clex = CLexer(errfoo, typelookup)
clex.build()
clex.input(text)
while 1:
tok = clex.token()
if not tok: break
#~ print type(tok)
print "-", tok.value, tok.type, tok.lineno, clex.filename, tok.lexpos

File diff suppressed because it is too large Load Diff

View File

@ -1,67 +1,67 @@
#-----------------------------------------------------------------
# plyparser.py
#
# PLYParser class and other utilites for simplifying programming
# parsers with PLY
#
# Copyright (C) 2008-2009, Eli Bendersky
# License: LGPL
#-----------------------------------------------------------------
class Coord(object):
""" Coordinates of a syntactic element. Consists of:
- File name
- Line number
- (optional) column number, for the Lexer
"""
def __init__(self, file, line, column=None):
self.file = file
self.line = line
self.column = column
def __str__(self):
str = "%s:%s" % (self.file, self.line)
if self.column: str += ":%s" % self.column
return str
class ParseError(Exception): pass
#-----------------------------------------------------------------
# plyparser.py
#
# PLYParser class and other utilites for simplifying programming
# parsers with PLY
#
# Copyright (C) 2008-2009, Eli Bendersky
# License: LGPL
#-----------------------------------------------------------------
class Coord(object):
""" Coordinates of a syntactic element. Consists of:
- File name
- Line number
- (optional) column number, for the Lexer
"""
def __init__(self, file, line, column=None):
self.file = file
self.line = line
self.column = column
def __str__(self):
str = "%s:%s" % (self.file, self.line)
if self.column: str += ":%s" % self.column
return str
class ParseError(Exception): pass
class PLYParser(object):
def _create_opt_rule(self, rulename):
""" Given a rule name, creates an optional ply.yacc rule
for it. The name of the optional rule is
<rulename>_opt
"""
optname = rulename + '_opt'
def optrule(self, p):
p[0] = p[1]
optrule.__doc__ = '%s : empty\n| %s' % (optname, rulename)
optrule.__name__ = 'p_%s' % optname
setattr(self.__class__, optrule.__name__, optrule)
def _coord(self, lineno, column=None):
return Coord(
file=self.clex.filename,
line=lineno,
column=column)
def _parse_error(self, msg, coord):
raise ParseError("%s: %s" % (coord, msg))
if __name__ == '__main__':
pp = PLYParser()
pp._create_opt_rule('java')
ar = [4, 6]
pp.p_java_opt(ar)
print ar
print pp.p_java_opt.__doc__
print dir(pp)
def _create_opt_rule(self, rulename):
""" Given a rule name, creates an optional ply.yacc rule
for it. The name of the optional rule is
<rulename>_opt
"""
optname = rulename + '_opt'
def optrule(self, p):
p[0] = p[1]
optrule.__doc__ = '%s : empty\n| %s' % (optname, rulename)
optrule.__name__ = 'p_%s' % optname
setattr(self.__class__, optrule.__name__, optrule)
def _coord(self, lineno, column=None):
return Coord(
file=self.clex.filename,
line=lineno,
column=column)
def _parse_error(self, msg, coord):
raise ParseError("%s: %s" % (coord, msg))
if __name__ == '__main__':
pp = PLYParser()
pp._create_opt_rule('java')
ar = [4, 6]
pp.p_java_opt(ar)
print ar
print pp.p_java_opt.__doc__
print dir(pp)

File diff suppressed because one or more lines are too long

View File

@ -2,7 +2,7 @@ from util import hook
@hook.sieve
def sieve_suite(bot, input, func, kind, args):
def sieve_suite(bot, input, func, kind, args):
if input.command == 'PRIVMSG' and input.nick.lower()[-3:] == 'bot' \
and args.get('ignorebots', True):
return None
@ -17,5 +17,5 @@ def sieve_suite(bot, input, func, kind, args):
denied_channels = map(unicode.lower, acl['allow-except'])
if input.chan.lower() in denied_channels:
return None
return input

View File

@ -24,20 +24,20 @@ def add_tag(db, chan, nick, subject):
(nick, chan, subject)).fetchall()
if match:
return 'already tagged'
db.execute('replace into tag(chan, subject, nick) values(?,?,?)',
(chan, subject, nick))
db.commit()
return 'tag added'
def delete_tag(db, chan, nick, del_tag):
count = db.execute('delete from tag where lower(nick)=lower(?) and'
' chan=? and lower(subject)=lower(?)',
(nick, chan, del_tag)).rowcount
db.commit()
if count:
return 'deleted'
else:
@ -69,7 +69,7 @@ def get_nicks_by_tag(db, chan, subject):
nicks = db.execute("select nick from tag where lower(subject)=lower(?)"
" and chan=?"
" order by lower(nick)", (subject, chan)).fetchall()
nicks = [munge(x[0], 3) for x in nicks]
if not nicks:
return 'tag not found'
@ -108,9 +108,9 @@ def tag(inp, chan='', db=None):
if not tags:
return get_nicks_by_tag(db, chan, inp)
else:
return 'tags for "%s": ' % munge(inp, 3) + ', '.join(
return 'tags for "%s": ' % munge(inp, 1) + ', '.join(
tag[0] for tag in tags)
character_replacements = {
'a': 'ä',

View File

@ -36,7 +36,8 @@ def tellinput(paraml, input=None, db=None, bot=None):
user_from, message, time, chan = tells[0]
reltime = timesince.timesince(time)
reply = "%s said %s ago in %s: %s" % (user_from, reltime, chan, message)
reply = "%s said %s ago in %s: %s" % (user_from, reltime, chan,
message)
if len(tells) > 1:
reply += " (+%d more, .showtells to view)" % (len(tells) - 1)

View File

@ -34,9 +34,10 @@ def _hook_add(func, add, name=''):
args.append(0) # means kwargs present
func._args = args
if not hasattr(func, '_thread'): # does function run in its own thread?
if not hasattr(func, '_thread'): # does function run in its own thread?
func._thread = False
def sieve(func):
if func.func_code.co_argcount != 5:
raise ValueError(
@ -86,7 +87,7 @@ def singlethread(func):
def regex(regex, flags=0, **kwargs):
args = kwargs
def regex_wrapper(func):
args['name'] = func.func_name
args['regex'] = regex