2010-11-11 16:39:40 +00:00
|
|
|
import re
|
|
|
|
|
|
|
|
from util import hook, http
|
|
|
|
|
|
|
|
|
|
|
|
search_url = "http://search.atomz.com/search/?sp_a=00062d45-sp00000000"
|
|
|
|
|
|
|
|
|
|
|
|
@hook.command
|
|
|
|
def snopes(inp):
|
2010-11-12 05:18:32 +00:00
|
|
|
".snopes <topic> -- searches snopes for an urban legend about <topic>"
|
|
|
|
|
2010-11-11 16:39:40 +00:00
|
|
|
search_page = http.get_html(search_url, sp_q=inp, sp_c="1")
|
|
|
|
result_urls = search_page.xpath("//a[@target='_self']/@href")
|
|
|
|
|
|
|
|
if not result_urls:
|
|
|
|
return "no matching pages found"
|
|
|
|
|
|
|
|
snopes_page = http.get_html(result_urls[0])
|
|
|
|
snopes_text = snopes_page.text_content()
|
|
|
|
|
|
|
|
claim = re.search(r"Claim: .*", snopes_text).group(0)
|
|
|
|
status = re.search(r"Status: .*", snopes_text).group(0)
|
|
|
|
|
|
|
|
claim = re.sub(r"[\s\xa0]+", " ", claim) # compress whitespace
|
|
|
|
status = re.sub(r"[\s\xa0]+", " ", status)
|
|
|
|
|
2010-11-12 05:18:32 +00:00
|
|
|
return "%s %s %s" % (claim, status, result_urls[0])
|