2010-11-11 16:39:40 +00:00
|
|
|
import re
|
|
|
|
|
|
|
|
from util import hook, http
|
|
|
|
|
|
|
|
|
|
|
|
search_url = "http://search.atomz.com/search/?sp_a=00062d45-sp00000000"
|
|
|
|
|
|
|
|
|
|
|
|
@hook.command
|
|
|
|
def snopes(inp):
|
2010-11-12 05:18:32 +00:00
|
|
|
".snopes <topic> -- searches snopes for an urban legend about <topic>"
|
|
|
|
|
2010-11-11 16:39:40 +00:00
|
|
|
search_page = http.get_html(search_url, sp_q=inp, sp_c="1")
|
|
|
|
result_urls = search_page.xpath("//a[@target='_self']/@href")
|
|
|
|
|
|
|
|
if not result_urls:
|
|
|
|
return "no matching pages found"
|
|
|
|
|
|
|
|
snopes_page = http.get_html(result_urls[0])
|
|
|
|
snopes_text = snopes_page.text_content()
|
|
|
|
|
2010-12-02 21:32:23 +00:00
|
|
|
claim = re.search(r"Claim: .*", snopes_text).group(0).strip()
|
|
|
|
status = re.search(r"Status: .*", snopes_text)
|
|
|
|
|
|
|
|
if status is not None:
|
|
|
|
status = status.group(0).strip()
|
2011-05-11 20:40:04 +00:00
|
|
|
else: # new-style statuses
|
2010-12-02 21:32:23 +00:00
|
|
|
status = "Status: %s." % re.search(r"FALSE|TRUE|MIXTURE|UNDETERMINED",
|
2014-01-14 21:12:37 +00:00
|
|
|
snopes_text).group(0).title()
|
2010-11-11 16:39:40 +00:00
|
|
|
|
|
|
|
claim = re.sub(r"[\s\xa0]+", " ", claim) # compress whitespace
|
|
|
|
status = re.sub(r"[\s\xa0]+", " ", status)
|
|
|
|
|
2010-11-12 05:18:32 +00:00
|
|
|
return "%s %s %s" % (claim, status, result_urls[0])
|