infinite pain

This commit is contained in:
Christine Dodrill 2015-11-02 17:18:44 -08:00
parent 551d854fbd
commit 65f36182bb
34 changed files with 196 additions and 1640 deletions

View File

@ -1,119 +0,0 @@
'''brainfuck interpreter adapted from (public domain) code at
http://brainfuck.sourceforge.net/brain.py'''
import re
import random
import unittest
from util import hook
@hook.command
def bf(inp, max_steps=1000000, buffer_size=5000):
".bf <prog> -- executes brainfuck program <prog>"""
program = re.sub('[^][<>+-.,]', '', inp)
# create a dict of brackets pairs, for speed later on
brackets = {}
open_brackets = []
for pos in range(len(program)):
if program[pos] == '[':
open_brackets.append(pos)
elif program[pos] == ']':
if len(open_brackets) > 0:
brackets[pos] = open_brackets[-1]
brackets[open_brackets[-1]] = pos
open_brackets.pop()
else:
return 'unbalanced brackets'
if len(open_brackets) != 0:
return 'unbalanced brackets'
# now we can start interpreting
ip = 0 # instruction pointer
mp = 0 # memory pointer
steps = 0
memory = [0] * buffer_size # initial memory area
rightmost = 0
output = "" # we'll save the output here
# the main program loop:
while ip < len(program):
c = program[ip]
if c == '+':
memory[mp] = (memory[mp] + 1) % 256
elif c == '-':
memory[mp] = (memory[mp] - 1) % 256
elif c == '>':
mp += 1
if mp > rightmost:
rightmost = mp
if mp >= len(memory):
# no restriction on memory growth!
memory.extend([0] * buffer_size)
elif c == '<':
mp = (mp - 1) % len(memory)
elif c == '.':
output += chr(memory[mp])
if len(output) > 500:
break
elif c == ',':
memory[mp] = random.randint(1, 255)
elif c == '[':
if memory[mp] == 0:
ip = brackets[ip]
elif c == ']':
if memory[mp] != 0:
ip = brackets[ip]
ip += 1
steps += 1
if steps > max_steps:
if output == '':
output = 'no output'
output += ' [exceeded %d iterations]' % max_steps
break
stripped_output = re.sub(r'[\x00-\x1F]', '', output)
if stripped_output == '':
if output != '':
return 'no printable output'
return 'no output'
return stripped_output[:430].decode('utf8', 'ignore')
class BFTest(unittest.TestCase):
def test_hello(self):
assert bf('--[>--->->->++>-<<<<<-------]>--.>---------.>--..+++.>---'
'-.>+++++++++.<<.+++.------.<-.>>+.') == 'Hello world!'
def test_unbalanced(self):
assert 'unbalanced' in bf('[[++]]]')
assert 'unbalanced' in bf('[[[++]]')
def test_comment(self):
assert bf('[this is a comment!]++++++[>+++++++<-]>.') == '*'
def test_unprintable(self):
assert bf('+.') == 'no printable output'
def test_empty(self):
assert bf('+++[-]') == 'no output'
def test_exceeded(self):
assert bf('+[>,[-]<]', 1000) == 'no output [exceeded 1000 iterations]'
def test_inf_mem(self):
assert 'no output [exceeded 1000 iterations]' == \
bf('+[>[.-]+]', 1000, buffer_size=10)
def test_left_wrap(self):
# eventually, wrap around and hit ourselves
assert 'aaaa' in bf('+[<[-' + '+' * ord('a') + '.[-]]+]',
2000, buffer_size=5)
def test_too_much_output(self):
assert 'a' * 430 == bf('+' * ord('a') + '[.]')

View File

@ -1,9 +0,0 @@
from util import http, hook
@hook.command(autohelp=False)
def bitcoin(inp, say=None):
".bitcoin -- gets current exchange rate for bitcoins from BTC-e"
data = http.get_json("https://btc-e.com/api/2/btc_usd/ticker")
say("USD/BTC: \x0307{buy:.0f}\x0f - High: \x0307{high:.0f}\x0f"
" - Low: \x0307{low:.0f}\x0f - Volume: {vol_cur:.0f}".format(**data['ticker']))

View File

@ -1,7 +0,0 @@
from util import hook, http
@hook.command
def cdecl(inp):
'''.cdecl <expr> -- translate between C declarations and English, using cdecl.org'''
return http.get("http://cdecl.org/query.php", q=inp)

View File

@ -1,17 +0,0 @@
import re
import random
from util import hook
@hook.command
def choose(inp):
".choose <choice1>, <choice2>, ... <choicen> -- makes a decision"
c = re.findall(r'([^,]+)', inp)
if len(c) == 1:
c = re.findall(r'(\S+)', inp)
if len(c) == 1:
return 'the decision is up to you'
return random.choice(c).strip()

View File

@ -1,33 +0,0 @@
# crowdcontrol.py by craisins in 2014
# Bot must have some sort of op or admin privileges to be useful
import re
import time
from util import hook
# Use "crowdcontrol" array in config
# syntax
# rule:
# re: RegEx. regular expression to match
# msg: String. message to display either with kick or as a warning
# kick: Integer. 1 for True, 0 for False on if to kick user
# ban_length: Integer. (optional) Length of time (seconds) to ban user. (-1 to never unban, 0 to not ban, > 1 for time)
@hook.regex(r'.*')
def crowdcontrol(inp, kick=None, ban=None, unban=None, reply=None, bot=None):
inp = inp.group(0)
for rule in bot.config.get('crowdcontrol', []):
if re.search(rule['re'], inp) is not None:
should_kick = rule.get('kick', 0)
ban_length = rule.get('ban_length', 0)
reason = rule.get('msg')
if ban_length != 0:
ban()
if should_kick:
kick(reason=reason)
elif 'msg' in rule:
reply(reason)
if ban_length > 0:
time.sleep(ban_length)
unban()

11
plugins/derpiback.py Normal file
View File

@ -0,0 +1,11 @@
from util import hook
import requests
@hook.command
def derpiback(pls):
r = requests.get("https://derpibooru.org")
if "J6-eVNTVvMk" in r.text:
return "nope derpibooru is still down for maintenance, at soonest it will be tomorrow"
return "yep"

View File

@ -1,26 +0,0 @@
import urlparse
from util import hook, http
@hook.command
def down(inp):
'''.down <url> -- checks to see if the website is down'''
# urlparse follows RFC closely, so we have to check for schema existance and prepend empty schema if necessary
if not inp.startswith('//') and '://' not in inp:
inp = '//' + inp
urlp = urlparse.urlparse(inp, 'http')
if urlp.scheme not in ('http', 'https'):
return inp + " is not a valid HTTP URL"
inp = "%s://%s" % (urlp.scheme, urlp.netloc)
# http://mail.python.org/pipermail/python-list/2006-December/589854.html
try:
http.get(inp, get_method='HEAD')
return inp + ' seems to be up'
except http.URLError as error:
return inp + ' seems to be down. Error: %s' % error.reason

View File

@ -1,31 +0,0 @@
'''Searches Encyclopedia Dramatica and returns the first paragraph of the
article'''
from util import hook, http
api_url = "http://encyclopediadramatica.se/api.php?action=opensearch"
ed_url = "http://encyclopediadramatica.se/"
@hook.command('ed')
@hook.command
def drama(inp):
'''.drama <phrase> -- gets first paragraph of Encyclopedia Dramatica ''' \
'''article on <phrase>'''
j = http.get_json(api_url, search=inp)
if not j[1]:
return 'no results found'
article_name = j[1][0].replace(' ', '_').encode('utf8')
url = ed_url + http.quote(article_name, '')
page = http.get_html(url)
for p in page.xpath('//div[@id="bodyContent"]/p'):
if p.text_content():
summary = ' '.join(p.text_content().splitlines())
if len(summary) > 300:
summary = summary[:summary.rfind(' ', 0, 300)] + "..."
return '%s :: \x02%s\x02' % (summary, url)
return "error"

View File

@ -1,17 +0,0 @@
from util import hook, http
@hook.command
def calc(inp):
'''.calc <term> -- returns Google Calculator result'''
h = http.get_html('http://www.google.com/search', q=inp)
m = h.xpath('//h2[@class="r"]/text()')
if not m:
return "could not calculate " + inp
res = ' '.join(m[0].split())
return res

View File

@ -1,39 +0,0 @@
import random
from util import hook, http
def api_get(query, key, is_image=None, num=1):
url = ('https://www.googleapis.com/customsearch/v1?cx=007629729846476161907:ud5nlxktgcw'
'&fields=items(title,link,snippet)&safe=off' + ('&searchType=image' if is_image else ''))
return http.get_json(url, key=key, q=query, num=num)
@hook.api_key('google')
@hook.command
def gis(inp, api_key=None):
'''.gis <term> -- finds an image using google images (safesearch off)'''
parsed = api_get(inp, api_key, is_image=True, num=10)
if 'items' not in parsed:
return 'no images found'
return random.choice(parsed['items'])['link']
@hook.api_key('google')
@hook.command('g')
@hook.command
def google(inp, api_key=None):
'''.g/.google <query> -- returns first google search result'''
parsed = api_get(inp, api_key)
if 'items' not in parsed:
return 'no results found'
out = u'{link} -- \x02{title}\x02: "{snippet}"'.format(**parsed['items'][0])
out = ' '.join(out.split())
if len(out) > 300:
out = out[:out.rfind(' ')] + '..."'
return out

View File

@ -1,20 +0,0 @@
import hashlib
from util import hook
@hook.command
def md5(inp):
return hashlib.md5(inp).hexdigest()
@hook.command
def sha1(inp):
return hashlib.sha1(inp).hexdigest()
@hook.command
def hash(inp):
".hash <text> -- returns hashes of <text>"
return ', '.join(x + ": " + getattr(hashlib, x)(inp).hexdigest()
for x in 'md5 sha1 sha256'.split())

View File

@ -1,25 +0,0 @@
# IMDb lookup plugin by Ghetto Wizard (2011).
from util import hook, http
@hook.command
def imdb(inp):
'''.imdb <movie> -- gets information about <movie> from IMDb'''
content = http.get_json("http://www.omdbapi.com/", t=inp)
if content['Response'] == 'Movie Not Found':
return 'movie not found'
elif content['Response'] == 'True':
content['URL'] = 'http://www.imdb.com/title/%(imdbID)s' % content
out = '\x02%(Title)s\x02 (%(Year)s) (%(Genre)s): %(Plot)s'
if content['Runtime'] != 'N/A':
out += ' \x02%(Runtime)s\x02.'
if content['imdbRating'] != 'N/A' and content['imdbVotes'] != 'N/A':
out += ' \x02%(imdbRating)s/10\x02 with \x02%(imdbVotes)s\x02 votes.'
out += ' %(URL)s'
return out % content
else:
return 'unknown error'

View File

@ -1,82 +0,0 @@
'''
The Last.fm API key is retrieved from the bot config file.
'''
from util import hook, http
api_url = "http://ws.audioscrobbler.com/2.0/?format=json"
@hook.api_key('lastfm')
@hook.command(autohelp=False)
def lastfm(inp, chan='', nick='', reply=None, api_key=None, db=None):
".lastfm <username> [dontsave] | @<nick> -- gets current or last played " \
"track from lastfm"
db.execute(
"create table if not exists "
"lastfm(chan, nick, user, primary key(chan, nick))"
)
if inp[0:1] == '@':
nick = inp[1:].strip()
user = None
dontsave = True
else:
user = inp
dontsave = user.endswith(" dontsave")
if dontsave:
user = user[:-9].strip().lower()
if not user:
user = db.execute(
"select user from lastfm where chan=? and nick=lower(?)",
(chan, nick)).fetchone()
if not user:
return lastfm.__doc__
user = user[0]
response = http.get_json(api_url, method="user.getrecenttracks",
api_key=api_key, user=user, limit=1)
if 'error' in response:
return "error: %s" % response["message"]
if not "track" in response["recenttracks"] or \
len(response["recenttracks"]["track"]) == 0:
return "no recent tracks for user \x02%s\x0F found" % user
tracks = response["recenttracks"]["track"]
if type(tracks) == list:
# if the user is listening to something, the tracks entry is a list
# the first item is the current track
track = tracks[0]
status = 'current track'
elif type(tracks) == dict:
# otherwise, they aren't listening to anything right now, and
# the tracks entry is a dict representing the most recent track
track = tracks
status = 'last track'
else:
return "error parsing track listing"
title = track["name"]
album = track["album"]["#text"]
artist = track["artist"]["#text"]
ret = "\x02%s\x0F's %s - \x02%s\x0f" % (user, status, title)
if artist:
ret += " by \x02%s\x0f" % artist
if album:
ret += " on \x02%s\x0f" % album
reply(ret)
if inp and not dontsave:
db.execute(
"insert or replace into lastfm(chan, nick, user) "
"values (?, ?, ?)", (chan, nick.lower(), inp))
db.commit()

View File

@ -1,28 +0,0 @@
import os
import re
from util import hook
@hook.command(autohelp=False)
def mem(inp):
".mem -- returns bot's current memory usage -- linux/windows only"
if os.name == 'posix':
status_file = open("/proc/%d/status" % os.getpid()).read()
line_pairs = re.findall(r"^(\w+):\s*(.*)\s*$", status_file, re.M)
status = dict(line_pairs)
keys = 'VmSize VmLib VmData VmExe VmRSS VmStk'.split()
return ', '.join(key + ':' + status[key] for key in keys)
elif os.name == 'nt':
cmd = "tasklist /FI \"PID eq %s\" /FO CSV /NH" % os.getpid()
out = os.popen(cmd).read()
total = 0
for amount in re.findall(r'([,0-9]+) K', out):
total += int(amount.replace(',', ''))
return 'memory usage: %d kB' % total
return mem.__doc__

View File

@ -1,135 +0,0 @@
# metacritic.com scraper
import re
from urllib2 import HTTPError
from util import hook, http
@hook.command('mc')
def metacritic(inp):
'.mc [all|movie|tv|album|x360|ps3|pc|gba|ds|3ds|wii|vita|wiiu|xone|ps4] <title> -- gets rating for'\
' <title> from metacritic on the specified medium'
# if the results suck, it's metacritic's fault
args = inp.strip()
game_platforms = ('x360', 'ps3', 'pc', 'gba', 'ds', '3ds', 'wii', 'vita', 'wiiu', 'xone', 'ps4')
all_platforms = game_platforms + ('all', 'movie', 'tv', 'album')
try:
plat, title = args.split(' ', 1)
if plat not in all_platforms:
# raise the ValueError so that the except block catches it
# in this case, or in the case of the .split above raising the
# ValueError, we want the same thing to happen
raise ValueError
except ValueError:
plat = 'all'
title = args
cat = 'game' if plat in game_platforms else plat
title_safe = http.quote_plus(title)
url = 'http://www.metacritic.com/search/%s/%s/results' % (cat, title_safe)
try:
doc = http.get_html(url)
except HTTPError:
return 'error fetching results'
''' result format:
-- game result, with score
-- subsequent results are the same structure, without first_result class
<li class="result first_result">
<div class="result_type">
<strong>Game</strong>
<span class="platform">WII</span>
</div>
<div class="result_wrap">
<div class="basic_stats has_score">
<div class="main_stats">
<h3 class="product_title basic_stat">...</h3>
<div class="std_score">
<div class="score_wrap">
<span class="label">Metascore: </span>
<span class="data metascore score_favorable">87</span>
</div>
</div>
</div>
<div class="more_stats extended_stats">...</div>
</div>
</div>
</li>
-- other platforms are the same basic layout
-- if it doesn't have a score, there is no div.basic_score
-- the <div class="result_type"> changes content for non-games:
<div class="result_type"><strong>Movie</strong></div>
'''
# get the proper result element we want to pull data from
result = None
if not doc.find_class('query_results'):
return 'no results found'
# if they specified an invalid search term, the input box will be empty
if doc.get_element_by_id('search_term').value == '':
return 'invalid search term'
if plat not in game_platforms:
# for [all] results, or non-game platforms, get the first result
result = doc.find_class('result first_result')[0]
# find the platform, if it exists
result_type = result.find_class('result_type')
if result_type:
# if the result_type div has a platform div, get that one
platform_div = result_type[0].find_class('platform')
if platform_div:
plat = platform_div[0].text_content().strip()
else:
# otherwise, use the result_type text_content
plat = result_type[0].text_content().strip()
else:
# for games, we want to pull the first result with the correct
# platform
results = doc.find_class('result')
for res in results:
result_plat = res.find_class('platform')[0].text_content().strip()
if result_plat == plat.upper():
result = res
break
if not result:
return 'no results found'
# get the name, release date, and score from the result
product_title = result.find_class('product_title')[0]
name = product_title.text_content()
link = 'http://metacritic.com' + product_title.find('a').attrib['href']
try:
release = result.find_class('release_date')[0].\
find_class('data')[0].text_content()
# strip extra spaces out of the release date
release = re.sub(r'\s{2,}', ' ', release)
except IndexError:
release = None
try:
score = result.find_class('metascore_w')[0].text_content()
except IndexError:
score = None
return '[%s] %s - %s, %s -- %s' % (plat.upper(), name,
score or 'no score',
'release: %s' % release if release else 'unreleased',
link)

View File

@ -1,60 +0,0 @@
import time
from util import hook, http
commands_modtime = 0
commands = {}
def update_commands(force=False):
global commands_modtime, commands
if force or time.time() - commands_modtime > 60 * 60: # update hourly
h = http.get_html('http://wiki.github.com/nslater/oblique/')
lines = h.xpath('//li/text()')
commands = {}
for line in lines:
if not line.strip():
continue
if line.strip().find(" ") == -1:
continue
name, url = line.strip().split(None, 1)
commands[name] = url
commands_modtime = time.time()
@hook.command('o')
@hook.command
def oblique(inp, nick='', chan=''):
'.o/.oblique <command> <args> -- runs <command> using oblique web'
' services. see http://wiki.github.com/nslater/oblique/'
update_commands()
if ' ' in inp:
command, args = inp.split(None, 1)
else:
command = inp
args = ''
command = command.lower()
if command == 'refresh':
update_commands(True)
return '%d commands loaded.' % len(commands)
if command in commands:
url = commands[command]
url = url.replace('${nick}', nick)
url = url.replace('${sender}', chan)
url = url.replace('${args}', http.quote(args.encode('utf8')))
try:
return http.get(url)
except http.HTTPError, e:
return "http error %d" % e.code
else:
return 'no such service'

41
plugins/pony.py Normal file
View File

@ -0,0 +1,41 @@
from util import hook
import datetime
import ponyapi
@hook.command
def when(inp, say=None):
#"Shows the countdown to the new episode of My Little Pony: Friendship is Magic!"
now = datetime.datetime(2006, 1, 1)
now = now.now()
ep = ponyapi.newest()
then = now.fromtimestamp(int(ep[u"air_date"]))
td = then-now
seasonep = ""
if ep[u"is_movie"]:
seasonep = "(a movie)"
else:
seasonep = "(season %d episode %d)" % (ep[u"season"], ep[u"episode"])
reply = "%s %s will air on %s in %d days!" % (
ep[u"name"], seasonep, then.strftime("%a, %d %b %Y %H:%M:%S"),
td.days)
return reply
@hook.command
def randomep(inp):
#"Shows a random episode of My Little Pony: Friendship is Magic"
ep = ponyapi.random()
seasonep = ""
if ep[u"is_movie"]:
seasonep = "(a movie)"
else:
seasonep = "(season %d episode %d)" % (ep[u"season"], ep[u"episode"])
return "%s %s" % (ep[u"name"], seasonep)

84
plugins/ponyapi.py Normal file
View File

@ -0,0 +1,84 @@
import requests
"""
# PonyAPI module for Python programs
This is written in a metaprogramming style.
Usage:
```python
import ponyapi
episodes = ponyapi.all_episodes()
for episode in episodes:
print episode
```
Available methods:
all_episodes() -> return all information on all episodes
newest() -> return information on the newest episode
random() -> return a random episode
get_season(snum) -> return all episodes in season snum
get_episode(snum, enum) -> return info on season snum episode enum
search(query) -> return all episodes that have query in the title
"""
API_ENDPOINT = "http://ponyapi.apps.xeserv.us"
# _base_get :: Text -> Maybe [Text] -> (Maybe [Text] -> IO (Either Episode [Episode]))
# _base_get takes a text, a splatted list of texts and returns a function such that
# the function takes a splatted list of texts and returns either an Episode or
# a list of Episode as an IO action.
def _base_get(endpoint, *fragments):
def doer(*args):
r = None
assert len(fragments) == len(args)
if len(fragments) == 0:
r = requests.get(API_ENDPOINT + endpoint)
else:
url = API_ENDPOINT + endpoint
for i in range(len(fragments)):
url = url + "/" + fragments[i] + "/" + str(args[i])
r = requests.get(url)
if r.status_code != 200:
raise Exception("Not found or server error")
try:
return r.json()["episodes"]
except:
return r.json()["episode"]
return doer
# all_episodes :: IO [Episode]
all_episodes = _base_get("/all")
# newest :: IO Episode
newest = _base_get("/newest")
# random :: IO Episode
random = _base_get("/random")
# get_season :: Int -> IO [Episode]
get_season = _base_get("", "season")
# get_episode :: Int -> Int -> IO Episode
get_episode = _base_get("", "season", "episode")
# search :: Text -> IO [Episode]
def search(query):
params = {"q": query}
r = requests.get(API_ENDPOINT + "/search", params=params)
if r.status_code != 200:
raise Exception("Not found or server error")
return r.json()["episodes"]

17
plugins/printerfact.py Normal file
View File

@ -0,0 +1,17 @@
from util import hook
import requests, re, random
regex = re.compile(re.escape("cat"), re.IGNORECASE)
kittenrex = re.compile(re.escape("kitten"), re.IGNORECASE)
@hook.regex("PHP sadness$")
def php_fact(inp):
return "http://phpsadness.com/sad/" + str(random.randint(0,53))
@hook.regex("(.*) fact$")
def printerfact(inp, say=None):
r = requests.get('https://catfacts-api.appspot.com/api/facts?number=1')
fact = r.json()['facts'][0]
inp = inp.group(1)
return kittenrex.sub("baby "+ inp, regex.sub(inp, fact))

View File

@ -1,21 +0,0 @@
import re
from util import hook, http
re_lineends = re.compile(r'[\r\n]*')
@hook.command
def python(inp):
".python <prog> -- executes python code <prog>"
res = http.get("http://eval.appspot.com/eval", statement=inp).splitlines()
if len(res) == 0:
return
res[0] = re_lineends.split(res[0])[0]
if not res[0] == 'Traceback (most recent call last):':
return res[0].decode('utf8', 'ignore')
else:
return res[-1].decode('utf8', 'ignore')

View File

@ -1,38 +0,0 @@
from util import hook, http
@hook.command('god')
@hook.command
def bible(inp):
".bible <passage> -- gets <passage> from the Bible (ESV)"
base_url = ('http://www.esvapi.org/v2/rest/passageQuery?key=IP&'
'output-format=plain-text&include-heading-horizontal-lines&'
'include-headings=false&include-passage-horizontal-lines=false&'
'include-passage-references=false&include-short-copyright=false&'
'include-footnotes=false&line-length=0&'
'include-heading-horizontal-lines=false')
text = http.get(base_url, passage=inp)
text = ' '.join(text.split())
if len(text) > 400:
text = text[:text.rfind(' ', 0, 400)] + '...'
return text
@hook.command('allah')
@hook.command
def koran(inp): # Koran look-up plugin by Ghetto Wizard
".koran <chapter.verse> -- gets <chapter.verse> from the Koran"
url = 'http://quod.lib.umich.edu/cgi/k/koran/koran-idx?type=simple'
results = http.get_html(url, q1=inp).xpath('//li')
if not results:
return 'No results for ' + inp
return results[0].text_content()

View File

@ -1,37 +0,0 @@
from util import http, hook
api_root = 'http://api.rottentomatoes.com/api/public/v1.0/'
movie_search_url = api_root + 'movies.json'
movie_reviews_url = api_root + 'movies/%s/reviews.json'
@hook.api_key('rottentomatoes')
@hook.command('rt')
@hook.command
def rottentomatoes(inp, api_key=None):
'.rt <title> -- gets ratings for <title> from Rotten Tomatoes'
results = http.get_json(movie_search_url, q=inp, apikey=api_key)
if results['total'] == 0:
return 'no results'
movie = results['movies'][0]
title = movie['title']
id = movie['id']
critics_score = movie['ratings']['critics_score']
audience_score = movie['ratings']['audience_score']
url = movie['links']['alternate']
if critics_score == -1:
return
reviews = http.get_json(movie_reviews_url %
id, apikey=api_key, review_type='all')
review_count = reviews['total']
fresh = critics_score * review_count / 100
rotten = review_count - fresh
return u"%s - critics: \x02%d%%\x02 (%d\u2191%d\u2193)" \
" audience: \x02%d%%\x02 - %s" % (title, critics_score,
fresh, rotten, audience_score, url)

15
plugins/shitposting.py Normal file
View File

@ -0,0 +1,15 @@
from util import hook
@hook.regex("thanks mr skeltal")
def skeltal(_):
return "https://www.youtube.com/watch?v=10pqeNBg5d0"
@hook.regex(r"(.*)")
def h(inp, channel=None, conn=None):
inp = inp.group(1)
if inp == "h":
return "h"
@hook.regex("dQw4w9WgXcQ")
def rickrollProtector(inp):
return "linked a rick roll, watch out"

View File

@ -1,61 +0,0 @@
from util import hook, http
thread_re = r"(?i)forums\.somethingawful\.com/\S+threadid=(\d+)"
showthread = "http://forums.somethingawful.com/showthread.php?noseen=1"
def login(user, password):
http.jar.clear_expired_cookies()
if any(cookie.domain == 'forums.somethingawful.com' and
cookie.name == 'bbuserid' for cookie in http.jar):
if any(cookie.domain == 'forums.somethingawful.com' and
cookie.name == 'bbpassword' for cookie in http.jar):
return
assert("malformed cookie jar")
user = http.quote(user)
password = http.quote(password)
http.get("http://forums.somethingawful.com/account.php", cookies=True,
post_data="action=login&username=%s&password=%s" % (user, password))
@hook.api_key('somethingawful')
@hook.regex(thread_re)
def forum_link(inp, api_key=None):
if api_key is None or 'user' not in api_key or 'password' not in api_key:
return
login(api_key['user'], api_key['password'])
thread = http.get_html(showthread, threadid=inp.group(1), perpage='1',
cookies=True)
breadcrumbs = thread.xpath('//div[@class="breadcrumbs"]//a/text()')
if not breadcrumbs:
return
thread_title = breadcrumbs[-1]
forum_title = forum_abbrevs.get(breadcrumbs[-2], breadcrumbs[-2])
poster = thread.xpath('//dt[contains(@class, author)]//text()')[0]
# 1 post per page => n_pages = n_posts
num_posts = thread.xpath('//a[@title="Last page"]/@href')
if not num_posts:
num_posts = 1
else:
num_posts = int(num_posts[0].rsplit('=', 1)[1])
return '\x02%s\x02 > \x02%s\x02 by \x02%s\x02, %s post%s' % (
forum_title, thread_title, poster, num_posts,
's' if num_posts > 1 else '')
forum_abbrevs = {
'Serious Hardware / Software Crap': 'SHSC',
'The Cavern of COBOL': 'CoC',
'General Bullshit': 'GBS',
'Haus of Tech Support': 'HoTS'
}

View File

@ -1,40 +0,0 @@
# tf.py: written by ipsum
#
# This skybot plugin retreives the number of items
# a given user has waiting from idling in Team Fortress 2.
from util import hook, http
@hook.command('hats')
@hook.command
def tf(inp):
""".tf/.hats <SteamID> -- Shows items waiting to be received in TF2."""
if inp.isdigit():
link = 'profiles'
else:
link = 'id'
url = 'http://steamcommunity.com/%s/%s/tfitems?json=1' % \
(link, http.quote(inp.encode('utf8'), safe=''))
try:
inv = http.get_json(url)
except ValueError:
return '%s is not a valid profile' % inp
dropped, dhats, hats = 0, 0, 0
for item, data in inv.iteritems():
ind = int(data['defindex'])
if data['inventory'] == 0:
if 47 <= ind <= 55 or 94 <= ind <= 126 or 134 <= ind <= 152:
dhats += 1
else:
dropped += 1
else:
if 47 <= ind <= 55 or 94 <= ind <= 126 or 134 <= ind <= 152:
hats += 1
return '%s has had %s items and %s hats drop (%s total hats)' % \
(inp, dropped, dhats, dhats + hats)

View File

@ -1,211 +0,0 @@
'''
A Google API key is required and retrieved from the bot config file.
Since December 1, 2011, the Google Translate API is a paid service only.
'''
import htmlentitydefs
import re
from util import hook, http
api_key = ""
########### from http://effbot.org/zone/re-sub.htm#unescape-html #############
def unescape(text):
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text)
##############################################################################
def goog_trans(text, slang, tlang):
url = 'https://www.googleapis.com/language/translate/v2'
parsed = http.get_json(
url, key=api_key, q=text, source=slang, target=tlang)
if not 200 <= parsed['responseStatus'] < 300:
raise IOError('error with the translation server: %d: %s' % (
parsed['responseStatus'], parsed['responseDetails']))
if not slang:
return unescape('(%(detectedSourceLanguage)s) %(translatedText)s' %
(parsed['responseData']['data']['translations'][0]))
return unescape('%(translatedText)s' % parsed['responseData']['data']['translations'][0])
def match_language(fragment):
fragment = fragment.lower()
for short, _ in lang_pairs:
if fragment in short.lower().split():
return short.split()[0]
for short, full in lang_pairs:
if fragment in full.lower():
return short.split()[0]
return None
@hook.command
def translate(inp, bot=None):
'.translate [source language [target language]] <sentence> -- translates' \
' <sentence> from source language (default autodetect) to target' \
' language (default English) using Google Translate'
if not hasapikey(bot):
return None
args = inp.split(' ', 2)
try:
if len(args) >= 2:
sl = match_language(args[0])
if not sl:
return goog_trans(inp, '', 'en')
if len(args) == 2:
return goog_trans(args[1], sl, 'en')
if len(args) >= 3:
tl = match_language(args[1])
if not tl:
if sl == 'en':
return 'unable to determine desired target language'
return goog_trans(args[1] + ' ' + args[2], sl, 'en')
return goog_trans(args[2], sl, tl)
return goog_trans(inp, '', 'en')
except IOError, e:
return e
languages = 'ja fr de ko ru zh'.split()
language_pairs = zip(languages[:-1], languages[1:])
def babel_gen(inp):
for language in languages:
inp = inp.encode('utf8')
trans = goog_trans(inp, 'en', language).encode('utf8')
inp = goog_trans(trans, language, 'en')
yield language, trans, inp
@hook.command
def babel(inp, bot=None):
".babel <sentence> -- translates <sentence> through multiple languages"
if not hasapikey(bot):
return None
try:
return list(babel_gen(inp))[-1][2]
except IOError, e:
return e
@hook.command
def babelext(inp, bot=None):
".babelext <sentence> -- like .babel, but with more detailed output"
if not hasapikey(bot):
return None
try:
babels = list(babel_gen(inp))
except IOError, e:
return e
out = u''
for lang, trans, text in babels:
out += '%s:"%s", ' % (lang, text.decode('utf8'))
out += 'en:"' + babels[-1][2].decode('utf8') + '"'
if len(out) > 300:
out = out[:150] + ' ... ' + out[-150:]
return out
def hasapikey(bot):
api_key = bot.config.get("api_keys", {}).get("googletranslate", None)
return api_key
lang_pairs = [
("no", "Norwegian"),
("it", "Italian"),
("ht", "Haitian Creole"),
("af", "Afrikaans"),
("sq", "Albanian"),
("ar", "Arabic"),
("hy", "Armenian"),
("az", "Azerbaijani"),
("eu", "Basque"),
("be", "Belarusian"),
("bg", "Bulgarian"),
("ca", "Catalan"),
("zh-CN zh", "Chinese"),
("hr", "Croatian"),
("cs", "Czech"),
("da", "Danish"),
("nl", "Dutch"),
("en", "English"),
("et", "Estonian"),
("tl", "Filipino"),
("fi", "Finnish"),
("fr", "French"),
("gl", "Galician"),
("ka", "Georgian"),
("de", "German"),
("el", "Greek"),
("ht", "Haitian Creole"),
("iw", "Hebrew"),
("hi", "Hindi"),
("hu", "Hungarian"),
("is", "Icelandic"),
("id", "Indonesian"),
("ga", "Irish"),
("it", "Italian"),
("ja jp jpn", "Japanese"),
("ko", "Korean"),
("lv", "Latvian"),
("lt", "Lithuanian"),
("mk", "Macedonian"),
("ms", "Malay"),
("mt", "Maltese"),
("no", "Norwegian"),
("fa", "Persian"),
("pl", "Polish"),
("pt", "Portuguese"),
("ro", "Romanian"),
("ru", "Russian"),
("sr", "Serbian"),
("sk", "Slovak"),
("sl", "Slovenian"),
("es", "Spanish"),
("sw", "Swahili"),
("sv", "Swedish"),
("th", "Thai"),
("tr", "Turkish"),
("uk", "Ukrainian"),
("ur", "Urdu"),
("vi", "Vietnamese"),
("cy", "Welsh"),
("yi", "Yiddish")
]

View File

@ -1,153 +0,0 @@
"""
TV information, written by Lurchington 2010
modified by rmmh 2010, 2013
"""
import datetime
from util import hook, http, timesince
base_url = "http://thetvdb.com/api/"
api_key = "469B73127CA0C411"
def get_episodes_for_series(seriesname):
res = {"error": None, "ended": False, "episodes": None, "name": None}
# http://thetvdb.com/wiki/index.php/API:GetSeries
try:
query = http.get_xml(base_url + 'GetSeries.php', seriesname=seriesname)
except http.URLError:
res["error"] = "error contacting thetvdb.com"
return res
series_id = query.xpath('//seriesid/text()')
if not series_id:
res["error"] = "unknown tv series (using www.thetvdb.com)"
return res
series_id = series_id[0]
try:
series = http.get_xml(base_url + '%s/series/%s/all/en.xml' %
(api_key, series_id))
except http.URLError:
res["error"] = "error contacting thetvdb.com"
return res
series_name = series.xpath('//SeriesName/text()')[0]
if series.xpath('//Status/text()')[0] == 'Ended':
res["ended"] = True
res["episodes"] = series.xpath('//Episode')
res["name"] = series_name
return res
def get_episode_info(episode):
episode_air_date = episode.findtext("FirstAired")
try:
airdate = datetime.date(*map(int, episode_air_date.split('-')))
except (ValueError, TypeError):
return None
episode_num = "S%02dE%02d" % (int(episode.findtext("SeasonNumber")),
int(episode.findtext("EpisodeNumber")))
episode_name = episode.findtext("EpisodeName")
# in the event of an unannounced episode title, users either leave the
# field out (None) or fill it with TBA
if episode_name == "TBA":
episode_name = None
episode_desc = '%s' % episode_num
if episode_name:
episode_desc += ' - %s' % episode_name
return (episode_air_date, airdate, episode_desc)
@hook.command
@hook.command('tv')
def tv_next(inp):
".tv_next <series> -- get the next episode of <series>"
episodes = get_episodes_for_series(inp)
if episodes["error"]:
return episodes["error"]
series_name = episodes["name"]
ended = episodes["ended"]
episodes = episodes["episodes"]
if ended:
return "%s has ended." % series_name
next_eps = []
today = datetime.date.today()
for episode in reversed(episodes):
ep_info = get_episode_info(episode)
if ep_info is None:
continue
(episode_air_date, airdate, episode_desc) = ep_info
if airdate > today:
next_eps = ['%s (%s) (%s)' % (episode_air_date, timesince.timeuntil(
datetime.datetime.strptime(episode_air_date, "%Y-%m-%d")), episode_desc)]
elif airdate == today:
next_eps = ['Today (%s)' % episode_desc] + next_eps
else:
# we're iterating in reverse order with newest episodes last
# so, as soon as we're past today, break out of loop
break
if not next_eps:
return "there are no new episodes scheduled for %s" % series_name
if len(next_eps) == 1:
return "the next episode of %s airs %s" % (series_name, next_eps[0])
else:
next_eps = ', '.join(next_eps)
return "the next episodes of %s: %s" % (series_name, next_eps)
@hook.command
@hook.command('tv_prev')
def tv_last(inp):
".tv_last <series> -- gets the most recently aired episode of <series>"
episodes = get_episodes_for_series(inp)
if episodes["error"]:
return episodes["error"]
series_name = episodes["name"]
ended = episodes["ended"]
episodes = episodes["episodes"]
prev_ep = None
today = datetime.date.today()
for episode in reversed(episodes):
ep_info = get_episode_info(episode)
if ep_info is None:
continue
(episode_air_date, airdate, episode_desc) = ep_info
if airdate < today:
# iterating in reverse order, so the first episode encountered
# before today was the most recently aired
prev_ep = '%s (%s)' % (episode_air_date, episode_desc)
break
if not prev_ep:
return "there are no previously aired episodes for %s" % series_name
if ended:
return '%s ended. The last episode aired %s' % (series_name, prev_ep)
return "the last episode of %s aired %s" % (series_name, prev_ep)

View File

@ -1,93 +0,0 @@
import random
import re
from time import strptime, strftime
from urllib import quote
from util import hook, http
@hook.api_key('twitter')
@hook.command
def twitter(inp, api_key=None):
".twitter <user>/<user> <n>/<id>/#<search>/#<search> <n> -- " \
"get <user>'s last/<n>th tweet/get tweet <id>/do <search>/get <n>th <search> result"
if not isinstance(api_key, dict) or any(key not in api_key for key in
('consumer', 'consumer_secret', 'access', 'access_secret')):
return "error: api keys not set"
getting_id = False
doing_search = False
index_specified = False
if re.match(r'^\d+$', inp):
getting_id = True
request_url = "https://api.twitter.com/1.1/statuses/show.json?id=%s" % inp
else:
try:
inp, index = re.split('\s+', inp, 1)
index = int(index)
index_specified = True
except ValueError:
index = 0
if index < 0:
index = 0
if index >= 20:
return 'error: only supports up to the 20th tweet'
if re.match(r'^#', inp):
doing_search = True
request_url = "https://api.twitter.com/1.1/search/tweets.json?q=%s" % quote(inp)
else:
request_url = "https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name=%s" % inp
try:
tweet = http.get_json(request_url, oauth=True, oauth_keys=api_key)
except http.HTTPError, e:
errors = {400: 'bad request (ratelimited?)',
401: 'unauthorized',
403: 'forbidden',
404: 'invalid user/id',
500: 'twitter is broken',
502: 'twitter is down ("getting upgraded")',
503: 'twitter is overloaded (lol, RoR)',
410: 'twitter shut off api v1.'}
if e.code == 404:
return 'error: invalid ' + ['username', 'tweet id'][getting_id]
if e.code in errors:
return 'error: ' + errors[e.code]
return 'error: unknown %s' % e.code
if doing_search:
try:
tweet = tweet["statuses"]
if not index_specified:
index = random.randint(0, len(tweet) - 1)
except KeyError:
return 'error: no results'
if not getting_id:
try:
tweet = tweet[index]
except IndexError:
return 'error: not that many tweets found'
if 'retweeted_status' in tweet:
rt = tweet["retweeted_status"]
rt_text = http.unescape(rt["text"]).replace('\n', ' ')
text = "RT @%s %s" % (rt["user"]["screen_name"], rt_text)
else:
text = http.unescape(tweet["text"]).replace('\n', ' ')
screen_name = tweet["user"]["screen_name"]
time = tweet["created_at"]
time = strftime('%Y-%m-%d %H:%M:%S',
strptime(time, '%a %b %d %H:%M:%S +0000 %Y'))
return "%s \x02%s\x02: %s" % (time, screen_name, text)
@hook.api_key('twitter')
@hook.regex(r'https?://twitter.com/(#!/)?([_0-9a-zA-Z]+)/status/(\d+)')
def show_tweet(match, api_key=None):
return twitter(match.group(3), api_key)

28
plugins/urbit.py Normal file
View File

@ -0,0 +1,28 @@
names = "dozmarbinwansamlitsighidfidlissogdirwacsabwissibrigsoldopmodfoglidhopdardorlorhodfolrintogsilmirholpaslacrovlivdalsatlibtabhanticpidtorbolfosdotlosdilforpilramtirwintadbicdifrocwidbisdasmidloprilnardapmolsanlocnovsitnidtipsicropwitnatpanminritpodmottamtolsavposnapnopsomfinfonbanporworsipronnorbotwicsocwatdolmagpicdavbidbaltimtasmalligsivtagpadsaldivdactansidfabtarmonranniswolmispallasdismaprabtobrollatlonnodnavfignomnibpagsopralbilhaddocridmocpacravripfaltodtiltinhapmicfanpattaclabmogsimsonpinlomrictapfirhasbosbatpochactidhavsaplindibhosdabbitbarracparloddosbortochilmactomdigfilfasmithobharmighinradmashalraglagfadtopmophabnilnosmilfopfamdatnoldinhatnacrisfotribhocnimlarfitwalrapsarnalmoslandondanladdovrivbacpollaptalpitnambonrostonfodponsovnocsorlavmatmipfap"
endings = "zodnecbudwessevpersutletfulpensytdurwepserwylsunrypsyxdyrnuphebpeglupdepdysputlughecryttyvsydnexlunmeplutseppesdelsulpedtemledtulmetwenbynhexfebpyldulhetmevruttylwydtepbesdexsefwycburderneppurrysrebdennutsubpetrulsynregtydsupsemwynrecmegnetsecmulnymtevwebsummutnyxrextebfushepbenmuswyxsymselrucdecwexsyrwetdylmynmesdetbetbeltuxtugmyrpelsyptermebsetdutdegtexsurfeltudnuxruxrenwytnubmedlytdusnebrumtynseglyxpunresredfunrevrefmectedrusbexlebduxrynnumpyxrygryxfeptyrtustyclegnemfermertenlusnussyltecmexpubrymtucfyllepdebbermughuttunbylsudpemdevlurdefbusbeprunmelpexdytbyttyplevmylwedducfurfexnulluclennerlexrupnedlecrydlydfenwelnydhusrelrudneshesfetdesretdunlernyrsebhulrylludremlysfynwerrycsugnysnyllyndyndemluxfedsedbecmunlyrtesmudnytbyrsenwegfyrmurtelreptegpecnelnevfes"
def split_len(seq, length):
return [seq[i:i+length] for i in range(0, len(seq), length)]
prefix = split_len(names, 3)
suffix = split_len(endings, 3)
def ipv4tourbit(ip):
ip = map(lambda x: int(x), ip.split("."))
return "~%s%s-%s%s" % (prefix[ip[0]], suffix[ip[1]], prefix[ip[2]], suffix[ip[3]])
from util import hook
import random
@hook.command
def urbit(name):
random.seed(name if len(name) > 0 else None)
ip = "%d.%d.%d.%d" % (
random.randint(0,255),
random.randint(0,255),
random.randint(0,255),
random.randint(0,255),
)
return ipv4tourbit(ip)

View File

@ -1,25 +0,0 @@
'''
Runs a given url through the w3c validator
by Vladi
'''
from util import hook, http
@hook.command
def validate(inp):
".validate <url> -- runs url through w3c markup validator"
if not inp.startswith('http://'):
inp = 'http://' + inp
url = 'http://validator.w3.org/check?uri=' + http.quote_plus(inp)
info = dict(http.open(url).info())
status = info['x-w3c-validator-status'].lower()
if status in ("valid", "invalid"):
errorcount = info['x-w3c-validator-errors']
warningcount = info['x-w3c-validator-warnings']
return "%s was found to be %s with %s errors and %s warnings." \
" see: %s" % (inp, status, errorcount, warningcount, url)

View File

@ -1,122 +0,0 @@
"weather, thanks to wunderground"
from util import hook, http
@hook.api_key('wunderground')
@hook.command(autohelp=False)
def weather(inp, chan='', nick='', reply=None, db=None, api_key=None):
".weather <location> [dontsave] | @<nick> -- gets weather data from Wunderground "\
"http://wunderground.com/weather/api"
if not api_key:
return None
# this database is used by other plugins interested in user's locations,
# like .near in tag.py
db.execute(
"create table if not exists location(chan, nick, loc, lat, lon, primary key(chan, nick))")
if inp[0:1] == '@':
nick = inp[1:].strip()
loc = None
dontsave = True
else:
dontsave = inp.endswith(" dontsave")
# strip off the " dontsave" text if it exists and set it back to `inp` so we don't report it
# back to the user incorrectly
if dontsave:
inp = inp[:-9].strip().lower()
loc = inp
if not loc: # blank line
loc = db.execute(
"select loc from location where chan=? and nick=lower(?)",
(chan, nick)).fetchone()
if not loc:
try:
# grab from old-style weather database
loc = db.execute("select loc from weather where nick=lower(?)",
(nick,)).fetchone()
except db.OperationalError:
pass # no such table
if not loc:
return weather.__doc__
loc = loc[0]
loc, _, state = loc.partition(', ')
# Check to see if a lat, long pair is being passed. This could be done more
# completely with regex, and converting from DMS to decimal degrees. This
# is nice and simple, however.
try:
float(loc)
float(state)
loc = loc + ',' + state
state = ''
except ValueError:
if state:
state = http.quote_plus(state)
state += '/'
loc = http.quote(loc)
url = 'http://api.wunderground.com/api/'
query = '{key}/geolookup/conditions/forecast/q/{state}{loc}.json' \
.format(key=api_key, state=state, loc=loc)
url += query
try:
parsed_json = http.get_json(url)
except IOError:
return 'Could not get data from Wunderground'
info = {}
if 'current_observation' not in parsed_json:
resp = 'Could not find weather for {inp}. '.format(inp=inp)
# In the case of no observation, but results, print some possible
# location matches
if 'results' in parsed_json['response']:
resp += 'Possible matches include: '
results = parsed_json['response']['results']
for place in results[:6]:
resp += '{city}, '.format(**place)
if place['state']:
resp += '{state}, '.format(**place)
if place['country_name']:
resp += '{country_name}; '.format(**place)
resp = resp[:-2]
reply(resp)
return
obs = parsed_json['current_observation']
sf = parsed_json['forecast']['simpleforecast']['forecastday'][0]
info['city'] = obs['display_location']['full']
info['t_f'] = obs['temp_f']
info['t_c'] = obs['temp_c']
info['weather'] = obs['weather']
info['h_f'] = sf['high']['fahrenheit']
info['h_c'] = sf['high']['celsius']
info['l_f'] = sf['low']['fahrenheit']
info['l_c'] = sf['low']['celsius']
info['humid'] = obs['relative_humidity']
info['wind'] = 'Wind: {mph}mph/{kph}kph' \
.format(mph=obs['wind_mph'], kph=obs['wind_kph'])
reply('{city}: {weather}, {t_f}F/{t_c}C'
'(H:{h_f}F/{h_c}C L:{l_f}F/{l_c}C)'
', Humidity: {humid}, {wind}'.format(**info))
lat = float(obs['display_location']['latitude'])
lon = float(obs['display_location']['longitude'])
if inp and not dontsave:
db.execute("insert or replace into location(chan, nick, loc, lat, lon) "
"values (?, ?, ?, ?,?)", (chan, nick.lower(), inp, lat, lon))
db.commit()

View File

@ -1,55 +0,0 @@
'''Searches wikipedia and returns first sentence of article
Scaevolus 2009'''
import re
from util import hook, http
api_prefix = "http://en.wikipedia.org/w/api.php"
search_url = api_prefix + "?action=opensearch&format=xml"
random_url = api_prefix + "?action=query&format=xml&list=random&rnlimit=1&rnnamespace=0"
paren_re = re.compile('\s*\(.*\)$')
@hook.command('w')
@hook.command(autohelp=False)
def wiki(inp):
'''.w/.wiki <phrase> -- gets first sentence of wikipedia ''' \
'''article on <phrase>'''
if inp == "":
r = http.get_xml(random_url)
inp = r.find('.//page').get('title')
x = http.get_xml(search_url, search=inp)
ns = '{http://opensearch.org/searchsuggest2}'
items = x.findall(ns + 'Section/' + ns + 'Item')
if items == []:
if x.find('error') is not None:
return 'error: %(code)s: %(info)s' % x.find('error').attrib
else:
return 'no results found'
def extract(item):
return [item.find(ns + x).text for x in
('Text', 'Description', 'Url')]
title, desc, url = extract(items[0])
if 'may refer to' in desc:
title, desc, url = extract(items[1])
title = paren_re.sub('', title)
if title.lower() not in desc.lower():
desc = title + desc
desc = re.sub('\s+', ' ', desc).strip() # remove excess spaces
if len(desc) > 300:
desc = desc[:300] + '...'
return '%s -- %s' % (desc, http.quote(http.unquote(url), ':/'))

View File

@ -1,50 +0,0 @@
import re
from util import hook, http
@hook.api_key('wolframalpha')
@hook.command('wa')
@hook.command
def wolframalpha(inp, api_key=None):
".wa/.wolframalpha <query> -- computes <query> using Wolfram Alpha"
url = 'http://api.wolframalpha.com/v2/query?format=plaintext'
result = http.get_xml(url, input=inp, appid=api_key)
pod_texts = []
for pod in result.xpath("//pod"):
title = pod.attrib['title']
if pod.attrib['id'] == 'Input':
continue
results = []
for subpod in pod.xpath('subpod/plaintext/text()'):
subpod = subpod.strip().replace('\\n', '; ')
subpod = re.sub(r'\s+', ' ', subpod)
if subpod:
results.append(subpod)
if results:
pod_texts.append(title + ': ' + '|'.join(results))
ret = '. '.join(pod_texts)
if not pod_texts:
return 'no results'
ret = re.sub(r'\\(.)', r'\1', ret)
def unicode_sub(match):
return unichr(int(match.group(1), 16))
ret = re.sub(r'\\:([0-9a-z]{4})', unicode_sub, ret)
if len(ret) > 430:
ret = ret[:ret.rfind(' ', 0, 430)]
ret = re.sub(r'\W+$', '', ret) + '...'
if not ret:
return 'no results'
return ret

View File

@ -1,86 +0,0 @@
import re
import time
from util import hook, http
youtube_re = (r'(?:youtube.*?(?:v=|/v/)|youtu\.be/|yooouuutuuube.*?id=)'
'([-_a-z0-9]+)', re.I)
base_url = 'https://www.googleapis.com/youtube/v3/'
info_url = base_url + 'videos?part=snippet,contentDetails,statistics'
search_api_url = base_url + 'search'
video_url = 'http://youtube.com/watch?v=%s'
def get_video_description(vid_id, api_key):
j = http.get_json(info_url, id=vid_id, key=api_key)
if not j['pageInfo']['totalResults']:
return
j = j['items'][0]
duration = j['contentDetails']['duration'].replace('PT', '').lower()
published = time.strptime(j['snippet']['publishedAt'],
"%Y-%m-%dT%H:%M:%S.000Z")
published = time.strftime("%Y.%m.%d", published)
views = group_int_digits(j['statistics']['viewCount'], ',')
out = (u'\x02{snippet[title]}\x02 - length \x02{duration}\x02 - '
u'{statistics[likeCount]}\u2191{statistics[dislikeCount]}\u2193 - '
u'\x02{views}\x02 views - '
u'\x02{snippet[channelTitle]}\x02 on \x02{published}\x02'
).format(duration=duration, views=views, published=published, **j)
# TODO: figure out how to detect NSFW videos
return out
def group_int_digits(number, delimiter=' ', grouping=3):
base = str(number).strip()
builder = []
while base:
builder.append(base[-grouping:])
base = base[:-grouping]
builder.reverse()
return delimiter.join(builder)
@hook.api_key('google')
@hook.regex(*youtube_re)
def youtube_url(match, api_key=None):
return get_video_description(match.group(1), api_key)
@hook.api_key('google')
@hook.command('yt')
@hook.command('y')
@hook.command
def youtube(inp, api_key=None):
'.youtube <query> -- returns the first YouTube search result for <query>'
params = {
'key': api_key,
'fields': 'items(id,snippet(channelId,title))',
'part': 'snippet',
'type': 'video',
'q': inp
}
j = http.get_json(search_api_url, **params)
if 'error' in j:
return 'error while performing the search'
results = j.get("items")
if not results:
return 'no results found'
vid_id = j['items'][0]['id']['videoId']
return get_video_description(vid_id, api_key) + " - " + video_url % vid_id