Fix unintentional changes to plugin directory structure.

This commit is contained in:
Hamled 2010-01-16 20:24:36 -08:00
parent f43c5238c2
commit c183032774
24 changed files with 1235 additions and 24 deletions

View File

@ -1 +0,0 @@
../plugins_available/babel.py

93
plugins/babel.py Normal file
View File

@ -0,0 +1,93 @@
import urllib
import htmlentitydefs
import re
import json
from util import hook
########### from http://effbot.org/zone/re-sub.htm#unescape-html #############
def unescape(text):
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text)
##############################################################################
languages = 'ja fr de ko ru zh'.split()
language_pairs = zip(languages[:-1], languages[1:])
def goog_trans(text, slang, tlang):
req_url = 'http://ajax.googleapis.com/ajax/services/language/translate' \
'?v=1.0&q=%s&langpair=%s'
url = req_url % (urllib.quote(text, safe=''), slang + '%7C' + tlang)
page = urllib.urlopen(url).read()
parsed = json.loads(page)
if not 200 <= parsed['responseStatus'] < 300:
raise IOError('error with the translation server: %d: %s' % (
parsed['responseStatus'], ''))
return unescape(parsed['responseData']['translatedText'])
def babel_gen(inp):
for language in languages:
inp = inp.encode('utf8')
trans = goog_trans(inp, 'en', language).encode('utf8')
inp = goog_trans(trans, language, 'en')
yield language, trans, inp
@hook.command
def babel(inp):
".babel <sentence> -- translates <sentence> through multiple languages"
if not inp:
return babel.__doc__
try:
return list(babel_gen(inp))[-1][2]
except IOError, e:
return e
@hook.command
def babelext(inp):
".babelext <sentence> -- like .babel, but with more detailed output"
if not inp:
return babelext.__doc__
try:
babels = list(babel_gen(inp))
except IOError, e:
return e
out = u''
for lang, trans, text in babels:
out += '%s:"%s", ' % (lang, text.decode('utf8'))
out += 'en:"' + babels[-1][2].decode('utf8') + '"'
if len(out) > 300:
out = out[:150] + ' ... ' + out[-150:]
return out

View File

@ -1 +0,0 @@
../plugins_available/bf.py

87
plugins/bf.py Normal file
View File

@ -0,0 +1,87 @@
'''brainfuck interpreter adapted from (public domain) code at
http://brainfuck.sourceforge.net/brain.py'''
import re
import random
from util import hook
BUFFER_SIZE = 5000
MAX_STEPS = 1000000
@hook.command
def bf(inp):
".bf <prog> -- executes brainfuck program <prog>"""
if not inp:
return bf.__doc__
program = re.sub('[^][<>+-.,]', '', inp)
# create a dict of brackets pairs, for speed later on
brackets={}
open_brackets=[]
for pos in range(len(program)):
if program[pos] == '[':
open_brackets.append(pos)
elif program[pos] == ']':
if len(open_brackets) > 0:
brackets[pos] = open_brackets[-1]
brackets[open_brackets[-1]] = pos
open_brackets.pop()
else:
return 'unbalanced brackets'
if len(open_brackets) != 0:
return 'unbalanced brackets'
# now we can start interpreting
ip = 0 # instruction pointer
mp = 0 # memory pointer
steps = 0
memory = [0] * BUFFER_SIZE #initial memory area
rightmost = 0
output = "" #we'll save the output here
# the main program loop:
while ip < len(program):
c = program[ip]
if c == '+':
memory[mp] = memory[mp] + 1 % 256
elif c == '-':
memory[mp] = memory[mp] - 1 % 256
elif c == '>':
mp += 1
if mp > rightmost:
rightmost = mp
if mp >= len(memory):
# no restriction on memory growth!
memory.extend([0]*BUFFER_SIZE)
elif c == '<':
mp = mp - 1 % len(memory)
elif c == '.':
output += chr(memory[mp])
if len(output) > 500:
break
elif c == ',':
memory[mp] = random.randint(1, 255)
elif c == '[':
if memory[mp] == 0:
ip = brackets[ip]
elif c == ']':
if memory[mp] != 0:
ip = brackets[ip]
ip += 1
steps += 1
if steps > MAX_STEPS:
output += "Maximum number of steps exceeded"
break
output = '/'.join(output.splitlines())
if output == '':
return 'no output'
return unicode(output, 'iso-8859-1')[:430]

View File

@ -1 +0,0 @@
../plugins_available/bible.py

27
plugins/bible.py Normal file
View File

@ -0,0 +1,27 @@
import urllib
from util import hook
@hook.command('god')
@hook.command
def bible(inp):
".bible <passage> -- gets <passage> from the Bible (ESV)"
if not inp:
return bible.__doc__
base_url = 'http://www.esvapi.org/v2/rest/passageQuery?key=IP&' \
'output-format=plain-text&include-heading-horizontal-lines&' \
'include-headings=false&include-passage-horizontal-lines=false&' \
'include-passage-references=false&include-short-copyright=false&' \
'include-footnotes=false&line-length=0&passage='
text = urllib.urlopen(base_url + urllib.quote(inp)).read()
text = ' '.join(text.split())
if len(text) > 400:
text = text[:text.rfind(' ', 0, 400)] + '...'
return text

View File

@ -1 +0,0 @@
../plugins_available/choose.py

19
plugins/choose.py Normal file
View File

@ -0,0 +1,19 @@
import re
import random
from util import hook
@hook.command
def choose(inp):
".choose <choice1>, <choice2>, ... <choicen> -- makes a decision"
if not inp:
return choose.__doc__
c = re.findall(r'([^,]+)', inp)
if len(c) == 1:
c = re.findall(r'(\S+)', inp)
if len(c) == 1:
return 'the decision is up to you'
return random.choice(c).strip()

View File

@ -1 +0,0 @@
../plugins_available/dice.py

59
plugins/dice.py Normal file
View File

@ -0,0 +1,59 @@
"""
dice.py: written by Scaevolus 2008, updated 2009
simulates dicerolls
"""
import re
import random
from util import hook
whitespace_re = re.compile(r'\s+')
valid_diceroll_re = re.compile(r'^[+-]?(\d+|\d*d\d+)([+-](\d+|\d*d\d+))*$')
sign_re = re.compile(r'[+-]?(?:\d*d)?\d+')
split_re = re.compile(r'([\d+-]*)d?(\d*)')
def nrolls(count, n):
"roll an n-sided die count times"
if n < 2: #it's a coin
if count < 5000:
return sum(random.randint(0, 1) for x in xrange(count))
else: #fake it
return int(random.normalvariate(.5*count, (.75*count)**.5))
else:
if count < 5000:
return sum(random.randint(1, n) for x in xrange(count))
else: #fake it
return int(random.normalvariate(.5*(1+n)*count,
(((n+1)*(2*n+1)/6.-(.5*(1+n))**2)*count)**.5))
@hook.command
def dice(inp):
".dice <diceroll> -- simulates dicerolls, e.g. .dice 2d20-d5+4 roll 2 " \
"D20s, subtract 1D5, add 4"
if not inp.strip():
return dice.__doc__
spec = whitespace_re.sub('', inp)
if not valid_diceroll_re.match(spec):
return "Invalid diceroll"
sum = 0
groups = sign_re.findall(spec)
for roll in groups:
count, side = split_re.match(roll).groups()
if side == "":
sum += int(count)
else:
count = int(count) if count not in" +-" else 1
side = int(side)
try:
if count > 0:
sum += nrolls(count, side)
else:
sum -= nrolls(abs(count), side)
except OverflowError:
return "Thanks for overflowing a float, jerk >:["
return str(sum)

View File

@ -1 +0,0 @@
../plugins_available/dotnetpad.py

96
plugins/dotnetpad.py Normal file
View File

@ -0,0 +1,96 @@
"dotnetpad.py: by sklnd, because gobiner wouldn't shut up"
import urllib
import httplib
import socket
import json
from util import hook
def dotnetpad(lang, code):
"Posts a provided snippet of code in a provided langugage to dotnetpad.net"
code = code.encode('utf8')
params = urllib.urlencode({'language': lang, 'code': code})
headers = {"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain"}
try:
conn = httplib.HTTPConnection("dotnetpad.net:80")
conn.request("POST", "/Skybot", params, headers)
response = conn.getresponse()
except httplib.HTTPException:
conn.close()
return 'error: dotnetpad is broken somehow'
except socket.error:
return 'error: unable to connect to dotnetpad'
try:
result = json.loads(response.read())
except ValueError:
conn.close()
return 'error: dotnetpad is broken somehow'
conn.close()
if result['Errors']:
return 'First error: %s' % (result['Errors'][0]['ErrorText'])
elif result['Output']:
return result['Output'].lstrip()
else:
return 'No output'
@hook.command
def fs(inp):
".fs -- post a F# code snippet to dotnetpad.net and print the results"
if not inp:
return fs.__doc__
return dotnetpad('fsharp', inp)
@hook.command
def cs(snippet):
".cs -- post a C# code snippet to dotnetpad.net and print the results"
if not snippet:
return cs.__doc__
file_template = ('using System; '
'using System.Linq; '
'using System.Collections.Generic; '
'using System.Text; '
'%(class)s')
class_template = ('public class Default '
'{ '
' %(main)s '
'}')
main_template = ('public static void Main(String[] args) '
'{ '
' %(snippet)s '
'}')
# There are probably better ways to do the following, but I'm feeling lazy
# if no main is found in the snippet, then we use the template with Main in it
if 'public static void Main' not in snippet:
code = main_template % { 'snippet': snippet }
code = class_template % { 'main': code }
code = file_template % { 'class' : code }
# if Main is found, check for class and see if we need to use the classed template
elif 'class' not in snippet:
code = class_template % { 'main': snippet }
code = file_template % { 'class' : code }
return 'Error using dotnetpad'
# if we found class, then use the barebones template
else:
code = file_template % { 'class' : snippet }
return dotnetpad('csharp', code)

View File

@ -1 +0,0 @@
../plugins_available/down.py

27
plugins/down.py Normal file
View File

@ -0,0 +1,27 @@
import urllib2
import urlparse
from util import hook
@hook.command
def down(inp):
'''.down <url> -- checks to see if the site is down'''
inp = inp.strip()
if not inp:
return down.__doc__
if 'http://' not in inp:
inp = 'http://' + inp
inp = 'http://' + urlparse.urlparse(inp).netloc
# http://mail.python.org/pipermail/python-list/2006-December/589854.html
try:
request = urllib2.Request(inp)
request.get_method = lambda: "HEAD"
http_file = urllib2.urlopen(request)
head = http_file.read()
return inp + ' seems to be up'
except urllib2.URLError:
return inp + ' seems to be down'

View File

@ -1 +0,0 @@
../plugins_available/explain.py

15
plugins/explain.py Executable file
View File

@ -0,0 +1,15 @@
from util import hook
from pycparser.cdecl import explain_c_declaration
@hook.command('explain')
def explain(inp):
".explain <c expression> -- gives an explanation of C expression"
if not inp:
return explain.__doc__
inp = inp.encode('utf8', 'ignore')
try:
return explain_c_declaration(inp.rstrip())
except Exception, e:
return 'error: %s' % e

View File

@ -1 +0,0 @@
../plugins_available/google.py

59
plugins/google.py Normal file
View File

@ -0,0 +1,59 @@
import urllib
import random
from lxml import html
import json
from util import hook
def api_get(kind, query):
req_url = 'http://ajax.googleapis.com/ajax/services/search/%s?' \
'v=1.0&safe=off&q=%s'
query = query.encode('utf8')
url = req_url % (kind, urllib.quote(query, safe=''))
page = urllib.urlopen(url).read()
return json.loads(page)
@hook.command
def gis(inp):
'''.gis <term> -- returns first google image result (safesearch off)'''
if not inp:
return gis.__doc__
parsed = api_get('images', inp)
if not 200 <= parsed['responseStatus'] < 300:
raise IOError('error searching for images: %d: %s' % (
parsed['responseStatus'], ''))
if not parsed['responseData']['results']:
return 'no images found'
return random.choice(parsed['responseData']['results'][:10]
)['unescapedUrl'] # squares is dumb
@hook.command
@hook.command('g')
def google(inp):
'''.g/.google <query> -- returns first google search result'''
if not inp:
return google.__doc__
parsed = api_get('web', inp)
if not 200 <= parsed['responseStatus'] < 300:
raise IOError('error searching for pages: %d: %s' % (
parsed['responseStatus'], ''))
if not parsed['responseData']['results']:
return 'no results found'
result = parsed['responseData']['results'][0]
title, content = map(lambda x: html.fromstring(x).text_content(),
(result['titleNoFormatting'], result['content']))
out = '%s -- \x02%s\x02: "%s"' % (result['unescapedUrl'], title, content)
out = ' '.join(out.split())
if len(out) > 300:
out = out[:out.rfind(' ')] + '..."'
return out

View File

@ -1 +0,0 @@
../plugins_available/goonsay.py

8
plugins/goonsay.py Normal file
View File

@ -0,0 +1,8 @@
from util import hook
#Scaevolus: factormystic if you commit a re-enabled goonsay I'm going to revoke your commit access
#@hook.command
def goonsay(bot, input):
input.say(' __________ /')
input.say('(--[. ]-[ .] /')
input.say('(_______o__)')

View File

@ -1 +0,0 @@
../plugins_available/hash.py

20
plugins/hash.py Normal file
View File

@ -0,0 +1,20 @@
import hashlib
from util import hook
@hook.command
def md5(inp):
return hashlib.md5(inp).hexdigest()
@hook.command
def sha1(inp):
return hashlib.sha1(inp).hexdigest()
@hook.command
def hash(inp):
".hash <text> -- returns hashes of <text>"
return ', '.join(x + ": " + getattr(hashlib, x)(inp).hexdigest()
for x in 'md5 sha1 sha256'.split())

View File

@ -1 +0,0 @@
../plugins_available/help.py

17
plugins/help.py Normal file
View File

@ -0,0 +1,17 @@
from util import hook
@hook.command
def help(bot, input):
".help [command] -- gives a list of commands/help for a command"
funcs = {}
for csig, func, args in bot.plugs['command']:
if args['hook'] != r'(.*)':
if func.__doc__ is not None:
funcs[csig[1]] = func
if not input.inp.strip():
input.pm('available commands: ' + ' '.join(sorted(funcs)))
else:
if input.inp in funcs:
input.pm(funcs[input.inp].__doc__)

View File

@ -1 +0,0 @@
../plugins_available/log.py

108
plugins/log.py Normal file
View File

@ -0,0 +1,108 @@
"""
log.py: written by Scaevolus 2009
"""
import os
import thread
import codecs
import time
import re
from util import hook
lock = thread.allocate_lock()
log_fds = {} # '%(net)s %(chan)s' : (filename, fd)
timestamp_format = '%H:%M:%S'
formats = {'PRIVMSG': '<%(nick)s> %(msg)s',
'PART': '-!- %(nick)s [%(user)s@%(host)s] has left %(chan)s',
'JOIN': '-!- %(nick)s [%(user)s@%(host)s] has joined %(param0)s',
'MODE': '-!- mode/%(chan)s [%(param_tail)s] by %(nick)s',
'KICK': '-!- %(param1)s was kicked from %(chan)s by %(nick)s [%(msg)s]',
'TOPIC': '-!- %(nick)s changed the topic of %(chan)s to: %(msg)s',
'QUIT': '-!- %(nick)s has quit [%(msg)s]',
'PING': '',
'NOTICE': ''
}
ctcp_formats = {'ACTION': '* %(nick)s %(ctcpmsg)s'}
irc_color_re = re.compile(r'(\x03(\d+,\d+|\d)|[\x0f\x02\x16\x1f])')
def get_log_filename(dir, server, chan):
return os.path.join(dir, 'log', gmtime('%Y'), server,
gmtime('%%s.%m-%d.log') % chan).lower()
def gmtime(format):
return time.strftime(format, time.gmtime())
def beautify(input):
format = formats.get(input.command, '%(raw)s')
args = vars(input)
leng = len(args['paraml'])
for n, p in enumerate(args['paraml']):
args['param' + str(n)] = p
args['param_' + str(abs(n - leng))] = p
args['param_tail'] = ' '.join(args['paraml'][1:])
args['msg'] = irc_color_re.sub('', args['msg'])
if input.command == 'PRIVMSG' and input.msg.count('\x01') >= 2:
#ctcp
ctcp = input.msg.split('\x01', 2)[1].split(' ', 1)
if len(ctcp) == 1:
ctcp += ['']
args['ctcpcmd'], args['ctcpmsg'] = ctcp
format = ctcp_formats.get(args['ctcpcmd'],
'%(nick)s [%(user)s@%(host)s] requested unknown CTCP '
'%(ctcpcmd)s from %(chan)s: %(ctcpmsg)s')
return format % args
def get_log_fd(dir, server, chan):
fn = get_log_filename(dir, server, chan)
cache_key = '%s %s' % (server, chan)
filename, fd = log_fds.get(cache_key, ('', 0))
if fn != filename: # we need to open a file for writing
if fd != 0: # is a valid fd
fd.flush()
fd.close()
dir = os.path.split(fn)[0]
if not os.path.exists(dir):
os.makedirs(dir)
fd = codecs.open(fn, 'a', 'utf-8')
log_fds[cache_key] = (fn, fd)
return fd
@hook.tee
def log(bot, input):
with lock:
timestamp = gmtime(timestamp_format)
fd = get_log_fd(bot.persist_dir, input.server, 'raw')
fd.write(timestamp + ' ' + input.raw + '\n')
if input.command == 'QUIT': # these are temporary fixes until proper
input.chan = 'quit' # presence tracking is implemented
if input.command == 'NICK':
input.chan = 'nick'
beau = beautify(input)
if beau == '': # don't log this
return
if input.chan:
fd = get_log_fd(bot.persist_dir, input.server, input.chan)
fd.write(timestamp + ' ' + beau + '\n')
print timestamp, input.chan, beau.encode('utf8', 'ignore')

View File

@ -1 +0,0 @@
../plugins_available/misc.py

30
plugins/misc.py Normal file
View File

@ -0,0 +1,30 @@
from util import hook
import socket
socket.setdefaulttimeout(5) # global setting
#autorejoin channels
@hook.event('KICK')
def rejoin(bot, input):
if input.paraml[1] == input.conn.nick:
if input.paraml[0] in input.conn.channels:
input.conn.join(input.paraml[0])
#join channels when invited
@hook.event('INVITE')
def invite(bot, input):
if input.command == 'INVITE':
input.conn.join(input.inp)
#join channels when server says hello & identify bot
@hook.event('004')
def onjoin(bot, input):
for channel in input.conn.channels:
input.conn.join(channel)
nickserv_password = input.conn.conf.get('nickserv_password', '')
nickserv_name = input.conn.conf.get('nickserv_name', 'nickserv')
nickserv_command = input.conn.conf.get('nickserv_command', 'IDENTIFY %s')
if nickserv_password:
input.conn.msg(nickserv_name, nickserv_command % nickserv_password)

View File

@ -1 +0,0 @@
../plugins_available/mtg.py

170
plugins/mtg.py Normal file
View File

@ -0,0 +1,170 @@
from lxml import html
import re
import urllib2
import sys
from util import hook
@hook.command
def mtg(inp):
url = 'http://magiccards.info/query.php?cardname='
url += urllib2.quote(inp, safe='')
h = html.parse(url)
name = h.find('/body/table/tr/td/table/tr/td/h1')
if name is None:
return "no cards found"
card = name.getparent()
text = card.find('p')
type = text.text
text = text.find('b').text_content()
text = re.sub(r'\(.*?\)', '', text) # strip parenthetical explanations
text = re.sub(r'\.(\S)', r'. \1', text) # fix spacing
printings = card.find('table/tr/td/img').getparent().text_content()
printings = re.findall(r'\s*(.+?(?: \([^)]+\))*) \((.*?)\)',
' '.join(printings.split()))
printing_out = ', '.join('%s (%s)' % (set_abbrevs.get(x[0], x[0]),
rarity_abbrevs.get(x[1], x[1]))
for x in printings)
name.make_links_absolute()
link = name.find('a').attrib['href']
name = name.text_content().strip()
type = type.strip()
text = ' '.join(text.split())
return ' | '.join((name, type, text, printing_out, link))
set_abbrevs = {
'15th Anniversary': '15ANN',
'APAC Junior Series': 'AJS',
'Alara Reborn': 'ARB',
'Alliances': 'AI',
'Anthologies': 'AT',
'Antiquities': 'AQ',
'Apocalypse': 'AP',
'Arabian Nights': 'AN',
'Arena League': 'ARENA',
'Asia Pacific Land Program': 'APAC',
'Battle Royale': 'BR',
'Beatdown': 'BD',
'Betrayers of Kamigawa': 'BOK',
'Celebration Cards': 'UQC',
'Champions of Kamigawa': 'CHK',
'Champs': 'CP',
'Chronicles': 'CH',
'Classic Sixth Edition': '6E',
'Coldsnap': 'CS',
'Coldsnap Theme Decks': 'CSTD',
'Conflux': 'CFX',
'Core Set - Eighth Edition': '8E',
'Core Set - Ninth Edition': '9E',
'Darksteel': 'DS',
'Deckmasters': 'DM',
'Dissension': 'DI',
'Dragon Con': 'DRC',
'Duel Decks: Divine vs. Demonic': 'DVD',
'Duel Decks: Elves vs. Goblins': 'EVG',
'Duel Decks: Garruk vs. Liliana': 'GVL',
'Duel Decks: Jace vs. Chandra': 'JVC',
'Eighth Edition Box Set': '8EB',
'European Land Program': 'EURO',
'Eventide': 'EVE',
'Exodus': 'EX',
'Fallen Empires': 'FE',
'Fifth Dawn': '5DN',
'Fifth Edition': '5E',
'Fourth Edition': '4E',
'Friday Night Magic': 'FNMP',
'From the Vault: Dragons': 'FVD',
'From the Vault: Exiled': 'FVE',
'Future Sight': 'FUT',
'Gateway': 'GRC',
'Grand Prix': 'GPX',
'Guildpact': 'GP',
'Guru': 'GURU',
'Happy Holidays': 'HHO',
'Homelands': 'HL',
'Ice Age': 'IA',
'Introductory Two-Player Set': 'ITP',
'Invasion': 'IN',
'Judge Gift Program': 'JR',
'Judgment': 'JU',
'Junior Series': 'JSR',
'Legend Membership': 'DCILM',
'Legends': 'LG',
'Legions': 'LE',
'Limited Edition (Alpha)': 'AL',
'Limited Edition (Beta)': 'BE',
'Lorwyn': 'LW',
'MTGO Masters Edition': 'MED',
'MTGO Masters Edition II': 'ME2',
'MTGO Masters Edition III': 'ME3',
'Magic 2010': 'M10',
'Magic Game Day Cards': 'MGDC',
'Magic Player Rewards': 'MPRP',
'Magic Scholarship Series': 'MSS',
'Magic: The Gathering Launch Parties': 'MLP',
'Media Inserts': 'MBP',
'Mercadian Masques': 'MM',
'Mirage': 'MR',
'Mirrodin': 'MI',
'Morningtide': 'MT',
'Multiverse Gift Box Cards': 'MGBC',
'Nemesis': 'NE',
'Ninth Edition Box Set': '9EB',
'Odyssey': 'OD',
'Onslaught': 'ON',
'Planar Chaos': 'PC',
'Planechase': 'PCH',
'Planeshift': 'PS',
'Portal': 'PO',
'Portal Demogame': 'POT',
'Portal Second Age': 'PO2',
'Portal Three Kingdoms': 'P3K',
'Premium Deck Series: Slivers': 'PDS',
'Prerelease Events': 'PTC',
'Pro Tour': 'PRO',
'Prophecy': 'PR',
'Ravnica: City of Guilds': 'RAV',
'Release Events': 'REP',
'Revised Edition': 'RV',
'Saviors of Kamigawa': 'SOK',
'Scourge': 'SC',
'Seventh Edition': '7E',
'Shadowmoor': 'SHM',
'Shards of Alara': 'ALA',
'Starter': 'ST',
'Starter 2000 Box Set': 'ST2K',
'Stronghold': 'SH',
'Summer of Magic': 'SOM',
'Super Series': 'SUS',
'Tempest': 'TP',
'Tenth Edition': '10E',
'The Dark': 'DK',
'Time Spiral': 'TS',
'Time Spiral Timeshifted': 'TSTS',
'Torment': 'TR',
'Two-Headed Giant Tournament': 'THGT',
'Unglued': 'UG',
'Unhinged': 'UH',
'Unhinged Alternate Foils': 'UHAA',
'Unlimited Edition': 'UN',
"Urza's Destiny": 'UD',
"Urza's Legacy": 'UL',
"Urza's Saga": 'US',
'Visions': 'VI',
'Weatherlight': 'WL',
'Worlds': 'WRL',
'WotC Online Store': 'WOTC',
'Zendikar': 'ZEN'}
rarity_abbrevs = {
'Common': 'C',
'Uncommon': 'UC',
'Rare': 'R',
'Special': 'S',
'Mythic Rare': 'MR'}

View File

@ -1 +0,0 @@
../plugins_available/profile.py

13
plugins/profile.py Normal file
View File

@ -0,0 +1,13 @@
# for crusty old rotor
from util import hook
@hook.command
def profile(inp):
".profile <username> -- links to <username>'s profile on SA"
if not inp:
return profile.__doc__
return 'http://forums.somethingawful.com/member.php?action=getinfo' + \
'&username=' + '+'.join(inp.split())

View File

@ -1 +0,0 @@
../plugins_available/pyexec.py

25
plugins/pyexec.py Normal file
View File

@ -0,0 +1,25 @@
import urllib
import re
from util import hook
re_lineends = re.compile(r'[\r\n]*')
@hook.command
def py(inp):
".py <prog> -- executes python code <prog>"
if not inp:
return py.__doc__
res = urllib.urlopen("http://eval.appspot.com/eval?statement=%s" %
urllib.quote(inp.strip(), safe='')).readlines()
if len(res) == 0:
return
res[0] = re_lineends.split(res[0])[0]
if not res[0] == 'Traceback (most recent call last):':
return res[0]
else:
return res[-1]

View File

@ -1 +0,0 @@
../plugins_available/sieve.py

29
plugins/sieve.py Normal file
View File

@ -0,0 +1,29 @@
import re
from util import hook
@hook.sieve
def sieve_suite(bot, input, func, args):
events = args.get('events', ['PRIVMSG'])
if input.command not in events and events != '*':
return None
if input.nick.lower()[-3:] == 'bot' and args.get('ignorebots', True):
return None
hook = args.get('hook', r'(.*)')
if args.get('prefix', True):
# add a prefix, unless it's a private message
hook = (r'^(?:[.!]|' if input.chan != input.nick else r'^(?:[.!]?|') \
+ input.conn.nick + r'[:,]*\s*)' + hook
input.re = re.match(hook, input.msg, flags=re.I)
if input.re is None:
return None
input.inp = ' '.join(input.re.groups())
return input

View File

@ -1 +0,0 @@
../plugins_available/suggest.py

36
plugins/suggest.py Normal file
View File

@ -0,0 +1,36 @@
import random
import urllib
import urllib2
import re
import json
from util import hook
@hook.command
def suggest(inp):
".suggest [#n] <phrase> -- gets a random/the nth suggested google search"
if not inp.strip():
return suggest.__doc__
m = re.match('^#(\d+) (.+)$', inp)
if m:
num, inp = m.groups()
num = int(num)
if num > 10:
return 'can only get first ten suggestions'
else:
num = 0
url = 'http://google.com/complete/search?q=' + urllib.quote(inp, safe='')
page = urllib2.urlopen(url).read()
page_json = page.split('(', 1)[1][:-1]
suggestions = json.loads(page_json)[1]
if not suggestions:
return 'no suggestions found'
if num:
if len(suggestions) + 1 <= num:
return 'only got %d suggestions' % len(suggestions)
out = suggestions[num - 1]
else:
out = random.choice(suggestions)
return '#%d: %s (%s)' % (int(out[2][0]) + 1, out[0], out[1])

View File

@ -1 +0,0 @@
../plugins_available/tinyurl.py

18
plugins/tinyurl.py Normal file
View File

@ -0,0 +1,18 @@
import re
import urllib2
from util import hook
tinyurl_re = re.compile(r'http://(?:www\.)?tinyurl.com/([A-Za-z0-9\-]+)',
flags=re.IGNORECASE)
@hook.command(hook=r'(.*)', prefix=False)
def tinyurl(inp):
tumatch = tinyurl_re.search(inp)
if tumatch:
try:
return urllib2.urlopen(tumatch.group()).url.strip()
except urllib2.URLError:
pass

View File

@ -1 +0,0 @@
../plugins_available/twitter.py

137
plugins/twitter.py Normal file
View File

@ -0,0 +1,137 @@
"""
twitter.py: written by Scaevolus 2009
retrieves most recent tweets
"""
import re
import random
import urllib2
from lxml import etree
from time import strptime, strftime
from util import hook
def unescape_xml(string):
# unescape the 5 chars that might be escaped in xml
# gratuitously functional
# return reduce(lambda x, y: x.replace(*y), (string,
# zip('&gt; &lt; &apos; &quote; &amp'.split(), '> < \' " &'.split()))
# boring, normal
return string.replace('&gt;', '>').replace('&lt;', '<').replace('&apos;',
"'").replace('&quote;', '"').replace('&amp;', '&')
history = []
history_max_size = 250
@hook.command
def twitter(inp):
".twitter <user>/<user> <n>/<id>/#<hashtag>/@<user> -- gets last/<n>th tweet from"\
"<user>/gets tweet <id>/gets random tweet with #<hashtag>/gets replied tweet from @<user>"
inp = inp.strip()
if not inp:
return twitter.__doc__
def add_reply(reply_name, reply_id):
if len(history) == history_max_size:
history.pop()
history.insert(0, (reply_name, reply_id))
def find_reply(reply_name):
for name, id in history:
if name == reply_name:
return id
if inp[0] == '@':
reply_id = find_reply(inp[1:])
if reply_id == None:
return 'error: no replies to %s found' % inp
inp = reply_id
url = 'http://twitter.com'
getting_nth = False
getting_id = False
searching_hashtag = False
time = 'status/created_at'
text = 'status/text'
reply_name = 'status/in_reply_to_screen_name'
reply_id = 'status/in_reply_to_status_id'
if re.match(r'^\d+$', inp):
getting_id = True
url += '/statuses/show/%s.xml' % inp
screen_name = 'user/screen_name'
time = 'created_at'
text = 'text'
reply_name = 'in_reply_to_screen_name'
reply_id = 'in_reply_to_status_id'
elif re.match(r'^\w{1,15}$', inp):
url += '/users/show/%s.xml' % inp
screen_name = 'screen_name'
elif re.match(r'^\w{1,15}\s+\d+$', inp):
getting_nth = True
name, num = inp.split()
if int(num) > 3200:
return 'error: only supports up to the 3200th tweet'
url += '/statuses/user_timeline/%s.xml?count=1&page=%s' % (name, num)
screen_name = 'status/user/screen_name'
elif re.match(r'^#\w+$', inp):
url = 'http://search.twitter.com/search.atom?q=%23' + inp[1:]
searching_hashtag = True
else:
return 'error: invalid request'
try:
xml = urllib2.urlopen(url).read()
except urllib2.HTTPError, e:
errors = {400 : 'bad request (ratelimited?)',
401: 'tweet is private',
404: 'invalid user/id',
500: 'twitter is broken',
502: 'twitter is down ("getting upgraded")',
503: 'twitter is overloaded (lol, RoR)'}
if e.code == 404:
return 'error: invalid ' + ['username', 'tweet id'][getting_id]
if e.code in errors:
return 'error: ' + errors[e.code]
return 'error: unknown'
except urllib2.URLerror, e:
return 'error: timeout'
tweet = etree.fromstring(xml)
if searching_hashtag:
ns = '{http://www.w3.org/2005/Atom}'
tweets = tweet.findall(ns + 'entry/' + ns + 'id')
if not tweets:
return 'error: hashtag not found'
id = random.choice(tweets).text
id = id[id.rfind(':') + 1:]
print id
return twitter(id)
if getting_nth:
if tweet.find('status') is None:
return 'error: user does not have that many tweets'
time = tweet.find(time)
if time is None:
return 'error: user has no tweets'
reply_name = tweet.find(reply_name).text
reply_id = tweet.find(reply_id).text
if reply_name is not None and reply_id is not None:
add_reply(reply_name, reply_id)
time = strftime('%Y-%m-%d %H:%M:%S',
strptime(time.text,
'%a %b %d %H:%M:%S +0000 %Y'))
text = unescape_xml(tweet.find(text).text.replace('\n', ''))
screen_name = tweet.find(screen_name).text
return "%s %s: %s" % (time, screen_name, text)

View File

@ -1 +0,0 @@
../plugins_available/urbandictionary.py

View File

@ -0,0 +1,29 @@
from lxml import html
import urllib
from util import hook
@hook.command('u')
@hook.command
def urban(inp):
'''.u/.urban <phrase> -- looks up <phrase> on urbandictionary.com'''
if not inp.strip():
return urban.__doc__
url = 'http://www.urbandictionary.com/define.php?term=' + \
urllib.quote(inp.strip(), safe='')
page = html.parse(url)
words = page.xpath("//td[@class='word']")
defs = page.xpath("//div[@class='definition']")
if not defs:
return 'no definitions found'
out = words[0].text_content().strip() + ': ' + ' '.join(
defs[0].text_content().split())
if len(out) > 400:
out = out[:out.rfind(' ', 0, 400)] + '...'
return out

View File

@ -1 +0,0 @@
../plugins_available/weather.py

72
plugins/weather.py Normal file
View File

@ -0,0 +1,72 @@
"weather, thanks to google"
import os
import codecs
import thread
import urllib
from lxml import etree
from util import hook
lock = thread.allocate_lock()
stalk = {}
def load_stalk(filename, mtimes={}):
if not os.path.exists(filename):
return {}
mtime = os.stat(filename).st_mtime
if mtimes.get(filename, 0) != mtime:
mtimes[filename] = mtime
return dict(x.strip().split(None, 1) for x in
codecs.open(filename, 'r', 'utf-8'))
def save_stalk(filename, houses):
out = codecs.open(filename, 'w', 'utf-8')
out.write('\n'.join('%s %s' % x for x in sorted(houses.iteritems()))) #heh
out.flush()
out.close()
@hook.command
def weather(bot, input):
".weather <location> [dontsave] -- queries the google weather API for weather data"
global stalk
filename = os.path.join(bot.persist_dir, 'weather')
if not stalk:
with lock:
stalk = load_stalk(filename)
nick = input.nick.lower()
loc = input.inp.strip()
dontsave = loc.endswith(" dontsave")
if dontsave:
loc = loc[:-9].strip().lower()
if not loc: # blank line
loc = stalk.get(nick, '')
if not loc:
return weather.__doc__
data = urllib.urlencode({'weather': loc.encode('utf-8')})
url = 'http://www.google.com/ig/api?' + data
w = etree.parse(url).find('weather')
if w.find('problem_cause') is not None:
return "Couldn't fetch weather data for '%s', try using a zip or " \
"postal code." % input.inp
info = dict((e.tag, e.get('data')) for e in w.find('current_conditions'))
info['city'] = w.find('forecast_information/city').get('data')
info['high'] = w.find('forecast_conditions/high').get('data')
info['low'] = w.find('forecast_conditions/low').get('data')
input.reply('%(city)s: %(condition)s, %(temp_f)sF/%(temp_c)sC (H:%(high)sF'\
', L:%(low)sF), %(humidity)s, %(wind_condition)s.' % info)
if not dontsave and loc != stalk.get(nick, ''):
with lock:
stalk[nick] = loc
save_stalk(filename, stalk)

View File

@ -1 +0,0 @@
../plugins_available/youtube.py

41
plugins/youtube.py Normal file
View File

@ -0,0 +1,41 @@
import re
from lxml import etree
import locale
from util import hook
def ytdata(id):
url = 'http://gdata.youtube.com/feeds/api/videos/' + id
x = etree.parse(url)
# I can't figure out how to deal with schemas/namespaces properly :(
yt = '{http://gdata.youtube.com/schemas/2007}'
media = '{http://search.yahoo.com/mrss/}'
rating = x.find('{http://schemas.google.com/g/2005}rating')
data = dict(rating.items())
data['title'] = x.find('{http://www.w3.org/2005/Atom}title').text
data['views'] = locale.format('%d', int(x.find(yt + 'statistics').get(
'viewCount')), 1)
length = int(x.find(media + 'group/' + yt + 'duration').get('seconds'))
data['length'] = ''
if length / 3600: # > 1 hour
data['length'] += str(length/3600) + 'h '
if length / 60: # > 1 minute
data['length'] += str(length/60 % 60) + 'm '
data['length'] += "%ds" % (length % 60)
return data
youtube_re = re.compile(r'.*youtube.*v=([-_a-z0-9]+)', flags=re.IGNORECASE)
#@hook.command(hook=r'(.*)', prefix=False)
def youtube(inp):
m = youtube_re.match(inp)
if m:
data = ytdata(m.group(1))
return '\x02%(title)s\x02 - rated \x02%(average)s/%(max)s\x02 ' \
'(%(numRaters)s) - views \x02%(views)s\x02 - length \x02' \
'%(length)s\x02' % data