2009-03-19 23:13:22 +00:00
|
|
|
'''Searches wikipedia and returns first sentence of article
|
|
|
|
Scaevolus 2009'''
|
|
|
|
|
|
|
|
import re
|
|
|
|
|
2010-04-23 03:47:41 +00:00
|
|
|
from util import hook, http
|
2009-07-08 17:04:30 +00:00
|
|
|
|
2009-03-19 23:13:22 +00:00
|
|
|
api_prefix = "http://en.wikipedia.org/w/api.php"
|
2010-04-23 03:47:41 +00:00
|
|
|
search_url = api_prefix + "?action=opensearch&format=xml"
|
2014-05-21 12:44:35 +00:00
|
|
|
random_url = api_prefix + "?action=query&format=xml&list=random&rnlimit=1&rnnamespace=0"
|
2009-03-19 23:13:22 +00:00
|
|
|
|
|
|
|
paren_re = re.compile('\s*\(.*\)$')
|
|
|
|
|
2009-04-18 00:57:18 +00:00
|
|
|
|
2010-01-19 05:14:49 +00:00
|
|
|
@hook.command('w')
|
2014-05-21 12:44:35 +00:00
|
|
|
@hook.command(autohelp=False)
|
2010-01-17 23:07:08 +00:00
|
|
|
def wiki(inp):
|
2009-04-18 00:57:18 +00:00
|
|
|
'''.w/.wiki <phrase> -- gets first sentence of wikipedia ''' \
|
2014-01-14 21:12:37 +00:00
|
|
|
'''article on <phrase>'''
|
2009-04-18 00:57:18 +00:00
|
|
|
|
2014-05-21 12:44:35 +00:00
|
|
|
if inp == "":
|
|
|
|
r = http.get_xml(random_url)
|
|
|
|
inp = r.find('.//page').get('title')
|
|
|
|
|
2010-04-23 03:47:41 +00:00
|
|
|
x = http.get_xml(search_url, search=inp)
|
2009-03-19 23:13:22 +00:00
|
|
|
|
2011-10-22 20:11:59 +00:00
|
|
|
ns = '{http://opensearch.org/searchsuggest2}'
|
2009-03-19 23:13:22 +00:00
|
|
|
items = x.findall(ns + 'Section/' + ns + 'Item')
|
|
|
|
|
2009-03-24 22:53:56 +00:00
|
|
|
if items == []:
|
2009-03-30 23:32:52 +00:00
|
|
|
if x.find('error') is not None:
|
|
|
|
return 'error: %(code)s: %(info)s' % x.find('error').attrib
|
|
|
|
else:
|
|
|
|
return 'no results found'
|
2009-03-24 22:53:56 +00:00
|
|
|
|
2009-03-19 23:13:22 +00:00
|
|
|
def extract(item):
|
2009-04-18 00:57:18 +00:00
|
|
|
return [item.find(ns + x).text for x in
|
2014-01-14 21:12:37 +00:00
|
|
|
('Text', 'Description', 'Url')]
|
2009-04-18 00:57:18 +00:00
|
|
|
|
2009-03-19 23:13:22 +00:00
|
|
|
title, desc, url = extract(items[0])
|
|
|
|
|
|
|
|
if 'may refer to' in desc:
|
|
|
|
title, desc, url = extract(items[1])
|
2009-04-18 00:57:18 +00:00
|
|
|
|
2009-03-19 23:13:22 +00:00
|
|
|
title = paren_re.sub('', title)
|
|
|
|
|
|
|
|
if title.lower() not in desc.lower():
|
|
|
|
desc = title + desc
|
2009-04-18 00:57:18 +00:00
|
|
|
|
2010-03-01 02:32:41 +00:00
|
|
|
desc = re.sub('\s+', ' ', desc).strip() # remove excess spaces
|
2009-03-19 23:13:22 +00:00
|
|
|
|
|
|
|
if len(desc) > 300:
|
|
|
|
desc = desc[:300] + '...'
|
|
|
|
|
2014-05-19 16:45:05 +00:00
|
|
|
return '%s -- %s' % (desc, http.quote(http.unquote(url), ':/'))
|