2010-01-17 04:24:36 +00:00
|
|
|
"""
|
|
|
|
twitter.py: written by Scaevolus 2009
|
|
|
|
retrieves most recent tweets
|
|
|
|
"""
|
|
|
|
|
|
|
|
import random
|
2010-04-23 03:47:41 +00:00
|
|
|
import re
|
2010-01-17 04:24:36 +00:00
|
|
|
from time import strptime, strftime
|
|
|
|
|
2010-04-23 03:47:41 +00:00
|
|
|
from util import hook, http
|
2010-01-17 04:24:36 +00:00
|
|
|
|
|
|
|
|
|
|
|
def unescape_xml(string):
|
|
|
|
# unescape the 5 chars that might be escaped in xml
|
|
|
|
|
|
|
|
# gratuitously functional
|
|
|
|
# return reduce(lambda x, y: x.replace(*y), (string,
|
|
|
|
# zip('> < ' "e; &'.split(), '> < \' " &'.split()))
|
|
|
|
|
|
|
|
# boring, normal
|
|
|
|
return string.replace('>', '>').replace('<', '<').replace(''',
|
|
|
|
"'").replace('"e;', '"').replace('&', '&')
|
|
|
|
|
|
|
|
history = []
|
|
|
|
history_max_size = 250
|
|
|
|
|
2010-03-01 02:32:41 +00:00
|
|
|
|
2010-01-17 04:24:36 +00:00
|
|
|
@hook.command
|
|
|
|
def twitter(inp):
|
2010-03-01 02:32:41 +00:00
|
|
|
".twitter <user>/<user> <n>/<id>/#<hashtag>/@<user> -- gets last/<n>th "\
|
|
|
|
"tweet from <user>/gets tweet <id>/gets random tweet with #<hashtag>/"\
|
|
|
|
"gets replied tweet from @<user>"
|
2010-01-17 04:24:36 +00:00
|
|
|
|
|
|
|
def add_reply(reply_name, reply_id):
|
|
|
|
if len(history) == history_max_size:
|
|
|
|
history.pop()
|
|
|
|
history.insert(0, (reply_name, reply_id))
|
|
|
|
|
|
|
|
def find_reply(reply_name):
|
|
|
|
for name, id in history:
|
|
|
|
if name == reply_name:
|
2010-01-17 08:31:22 +00:00
|
|
|
return id if id != -1 else name
|
2010-01-17 04:24:36 +00:00
|
|
|
|
|
|
|
if inp[0] == '@':
|
2010-01-17 08:31:22 +00:00
|
|
|
reply_inp = find_reply(inp[1:])
|
|
|
|
if reply_inp == None:
|
2010-01-17 04:24:36 +00:00
|
|
|
return 'error: no replies to %s found' % inp
|
2010-01-17 08:31:22 +00:00
|
|
|
inp = reply_inp
|
2010-01-17 04:24:36 +00:00
|
|
|
|
|
|
|
url = 'http://twitter.com'
|
|
|
|
getting_nth = False
|
|
|
|
getting_id = False
|
|
|
|
searching_hashtag = False
|
|
|
|
|
|
|
|
time = 'status/created_at'
|
|
|
|
text = 'status/text'
|
|
|
|
reply_name = 'status/in_reply_to_screen_name'
|
|
|
|
reply_id = 'status/in_reply_to_status_id'
|
2010-01-17 08:31:22 +00:00
|
|
|
reply_user = 'status/in_reply_to_user_id'
|
2010-01-17 04:24:36 +00:00
|
|
|
|
|
|
|
if re.match(r'^\d+$', inp):
|
|
|
|
getting_id = True
|
|
|
|
url += '/statuses/show/%s.xml' % inp
|
|
|
|
screen_name = 'user/screen_name'
|
|
|
|
time = 'created_at'
|
|
|
|
text = 'text'
|
|
|
|
reply_name = 'in_reply_to_screen_name'
|
|
|
|
reply_id = 'in_reply_to_status_id'
|
2010-01-17 08:31:22 +00:00
|
|
|
reply_user = 'in_reply_to_user_id'
|
2010-01-17 04:24:36 +00:00
|
|
|
elif re.match(r'^\w{1,15}$', inp):
|
|
|
|
url += '/users/show/%s.xml' % inp
|
|
|
|
screen_name = 'screen_name'
|
|
|
|
elif re.match(r'^\w{1,15}\s+\d+$', inp):
|
|
|
|
getting_nth = True
|
|
|
|
name, num = inp.split()
|
|
|
|
if int(num) > 3200:
|
|
|
|
return 'error: only supports up to the 3200th tweet'
|
|
|
|
url += '/statuses/user_timeline/%s.xml?count=1&page=%s' % (name, num)
|
|
|
|
screen_name = 'status/user/screen_name'
|
|
|
|
elif re.match(r'^#\w+$', inp):
|
|
|
|
url = 'http://search.twitter.com/search.atom?q=%23' + inp[1:]
|
|
|
|
searching_hashtag = True
|
|
|
|
else:
|
|
|
|
return 'error: invalid request'
|
|
|
|
|
|
|
|
try:
|
2010-04-23 03:47:41 +00:00
|
|
|
tweet = http.get_xml(url)
|
|
|
|
except http.HTTPError, e:
|
2010-03-01 02:32:41 +00:00
|
|
|
errors = {400: 'bad request (ratelimited?)',
|
2010-01-17 04:24:36 +00:00
|
|
|
401: 'tweet is private',
|
|
|
|
404: 'invalid user/id',
|
|
|
|
500: 'twitter is broken',
|
|
|
|
502: 'twitter is down ("getting upgraded")',
|
|
|
|
503: 'twitter is overloaded (lol, RoR)'}
|
|
|
|
if e.code == 404:
|
|
|
|
return 'error: invalid ' + ['username', 'tweet id'][getting_id]
|
|
|
|
if e.code in errors:
|
|
|
|
return 'error: ' + errors[e.code]
|
|
|
|
return 'error: unknown'
|
2010-04-23 03:47:41 +00:00
|
|
|
except http.URLerror, e:
|
2010-01-17 04:24:36 +00:00
|
|
|
return 'error: timeout'
|
|
|
|
|
|
|
|
if searching_hashtag:
|
|
|
|
ns = '{http://www.w3.org/2005/Atom}'
|
|
|
|
tweets = tweet.findall(ns + 'entry/' + ns + 'id')
|
|
|
|
if not tweets:
|
|
|
|
return 'error: hashtag not found'
|
|
|
|
id = random.choice(tweets).text
|
|
|
|
id = id[id.rfind(':') + 1:]
|
|
|
|
return twitter(id)
|
|
|
|
|
|
|
|
if getting_nth:
|
|
|
|
if tweet.find('status') is None:
|
|
|
|
return 'error: user does not have that many tweets'
|
|
|
|
|
|
|
|
time = tweet.find(time)
|
|
|
|
if time is None:
|
|
|
|
return 'error: user has no tweets'
|
|
|
|
|
|
|
|
reply_name = tweet.find(reply_name).text
|
|
|
|
reply_id = tweet.find(reply_id).text
|
2010-01-17 08:31:22 +00:00
|
|
|
reply_user = tweet.find(reply_user).text
|
2010-03-04 05:30:54 +00:00
|
|
|
if reply_name is not None and (reply_id is not None or
|
2010-03-01 02:32:41 +00:00
|
|
|
reply_user is not None):
|
2010-01-17 08:31:22 +00:00
|
|
|
add_reply(reply_name, reply_id if reply_id else -1)
|
2010-01-17 04:24:36 +00:00
|
|
|
|
2010-03-01 02:32:41 +00:00
|
|
|
time = strftime('%Y-%m-%d %H:%M:%S',
|
2010-01-17 04:24:36 +00:00
|
|
|
strptime(time.text,
|
|
|
|
'%a %b %d %H:%M:%S +0000 %Y'))
|
|
|
|
text = unescape_xml(tweet.find(text).text.replace('\n', ''))
|
|
|
|
screen_name = tweet.find(screen_name).text
|
|
|
|
|
|
|
|
return "%s %s: %s" % (time, screen_name, text)
|