# Part of Acrobat.
-import string, cPickle, random, urllib, sys, time, re, os
+import string, cPickle, random, urllib, sys, time, re, os, twitter
from irclib import irc_lower, nm_to_n
# query karma
# help - provides the URL of the help file
def helpq(bot, cmd, nick, conn, public):
bot.automsg(public,nick,
- "For help see http://www.pick.ucam.org/~matthew/irc/servus.html")
+ "For help see http://www.chiark.greenend.org.uk/~matthewv/irc/servus.html")
# query bot status
conn.notice(nick, "syntax: units arg1 as arg2")
return
if args[1]=='?':
- sin,sout=os.popen2(["units","--verbose",args[0]],"r")
+ sin,sout=os.popen4(["units","--verbose","--",args[0]],"r")
else:
- sin,sout=os.popen2(["units","--verbose",args[0],args[1]],"r")
+ sin,sout=os.popen4(["units","--verbose","--",args[0],args[1]],"r")
sin.close()
res=sout.readlines()
#popen2 doesn't clean up the child properly. Do this by hand
bot.automsg(public,nick,
"Configuration can only be reloaded by my owner, by /msg.")
-# lose the game and/or install a new trigger word
-def gameq(bot, cmd, nick, conn, public, game):
- #only install a new trigger if it's not too short.
- if len(' '.join(cmd.split()[1:]))>2:
- game.trigger=' '.join(cmd.split()[1:])
- if (time.time()> game.grace):
- if not public:
- if irc_lower(nick) == irc_lower(bot.owner):
- conn.action(bot.channel,"loses the game!")
- else:
- conn.privmsg(bot.channel,nick+" just lost the game!")
- else:
- if not public:
- conn.notice(nick, "It's a grace period!")
- game.grace=time.time()+60*20 #20 minutes' grace
- game.losetime=time.time()+random.randrange(game.minlose,game.maxlose)
- conn.notice(bot.owner, str(game.losetime-time.time())+" "+game.trigger)
-
# quit irc
def quitq(bot, cmd, nick, conn, public):
if irc_lower(nick) == irc_lower(bot.owner):
# Look up the definition of something using google
def defineq(bot, cmd, nick, conn, public):
cmdrest = string.join(cmd.split()[1:])
- targ = ("http://www.google.com/search?q=define%%3A%s&ie=utf-8&oe=utf-8"
+ targ = ("http://www.google.co.uk/search?q=define%%3A%s&ie=utf-8&oe=utf-8"
% urllib.quote_plus(cmdrest))
try:
# Just slurp everything into a string
# This is of course going to be a bit fragile. We first look for
# 'Definitions of %s on the Web' -- if this isn't present we
# assume we have the 'no definitions found page'.
- # The first defn starts after the following <p> tag.
+ # The first defn starts after the following <p> tag, but as the
+ # first <li> in a <ul type="disc" class=std>
# Following that we assume that each definition is all the non-markup
# before a <br> tag. Currently we just dump out the first definition.
- match = re.search(r"Definitions of <b>.*?</b> on the Web.*?<p>\s*([^>]*)<br>",defnpage,re.MULTILINE)
+ match = re.search(r"Definitions of <b>.*?</b> on the Web.*?<li>\s*([^>]*)((<br>)|(<li>))",defnpage,re.MULTILINE)
if match == None:
bot.automsg(public,nick,"Some things defy definition.")
else:
# We assume google has truncated the definition for us so this
# won't flood the channel with text...
- defn = " ".join(match.group(1).split("\n"));
+ defn = " ".join(match.group(1).split("\n"))
bot.automsg(public,nick,defn)
except IOError: # if the connection times out. This blocks. :(
bot.automsg(public,nick,"The web's broken. Waah!")
b=a[13:]+a[:13]
trans=string.maketrans(a+a.upper(),b+b.upper())
conn.notice(nick, string.join(cmd.split()[1:]).translate(trans))
+
+### URL-tracking stuff
+
+### return a easy-to-read approximation of a time period
+def nicetime(tempus):
+ if (tempus<120):
+ tm="%d seconds ago"%int(tempus)
+ elif (tempus<7200):
+ tm="%d minutes ago"%int(tempus/60)
+ if (tempus>7200):
+ tm="%d hours ago"%int(tempus/3600)
+ return tm
+
+### class to store URL data
+class UrlLog:
+ "contains meta-data about a URL seen on-channel"
+ def __init__(self,url,nick):
+ self.nick=nick
+ self.url=url
+ self.first=time.time()
+ self.count=1
+ self.lastseen=time.time()
+ self.lastasked=time.time()
+ def recenttime(self):
+ return max(self.lastseen,self.lastasked)
+ def firstmen(self):
+ return nicetime(time.time()-self.first)
+ def urltype(self):
+ z=min(len(urlcomplaints)-1, self.count-1)
+ return urlcomplaints[z]
+
+#(?:) is a regexp that doesn't group
+urlre = re.compile(r"((?:(?:http)|(?:nsfw))s?://[^ ]+)( |$)")
+hturlre= re.compile(r"(http)(s?://[^ ]+)( |$)")
+#matches \bre\:?\s+ before a regexp; (?i)==case insensitive match
+shibboleth = re.compile(r"(?i)\bre\:?\s+((?:(?:http)|(?:nsfw))s?://[^ ]+)( |$)")
+urlcomplaints = ["a contemporary","an interesting","a fascinating","an overused","a vastly overused"]
+
+### Deal with /msg bot url or ~url in channel
+def urlq(bot, cmd, nick, conn, public,urldb):
+ if (not urlre.search(cmd)):
+ bot.automsg(False,nick,"Please use 'url' only with http, https, nsfw, or nsfws URLs")
+ return
+
+ urlstring=urlre.search(cmd).group(1)
+ url=canonical_url(urlstring)
+ if (url in urldb):
+ T = urldb[url]
+ complaint="That's %s URL that was first mentioned %s by %s" % \
+ (T.urltype(),T.firstmen(),T.nick)
+ if (public):
+ complaint=complaint+". Furthermore it defeats the point of this command to use it other than via /msg."
+ T.count+=1
+ bot.automsg(False,nick,complaint)
+ T.lastasked=time.time()
+ #URL suppressed, so mention in #urls
+ if urlstring != cmd.split()[1]: #first argument to URL was not the url
+ conn.privmsg("#urls","%s remarks: %s" % (nick," ".join(cmd.split()[1:])))
+ else:
+ conn.privmsg("#urls","(via %s) %s"%(nick," ".join(cmd.split()[1:])))
+ else:
+ if (public):
+ bot.automsg(False,nick,"That URL was unique. There is little point in using !url out loud; please use it via /msg")
+ else:
+ if urlstring != cmd.split()[1]: #first argument to URL was not the url
+ conn.privmsg(bot.channel,"%s remarks: %s" % (nick," ".join(cmd.split()[1:])))
+ else:
+ conn.privmsg(bot.channel,"(via %s) %s"%(nick," ".join(cmd.split()[1:])))
+ bot.automsg(False,nick,"That URL was unique; I have posted it into IRC")
+ urldb[url]=UrlLog(url,nick)
+
+### Deal with URLs spotted in channel
+def dourl(bot,conn,nick,command,urldb):
+ urlstring=urlre.search(command).group(1)
+ urlstring=canonical_url(urlstring)
+
+ if urlstring in urldb:
+ T=urldb[urlstring]
+ message="observes %s URL, first mentioned %s by %s" % \
+ (T.urltype(),T.firstmen(),T.nick)
+ if shibboleth.search(command)==None:
+ conn.action(bot.channel, message)
+ T.lastseen=time.time()
+ T.count+=1
+ else:
+ urldb[urlstring]=UrlLog(urlstring,nick)
+
+### Expire old urls
+def urlexpire(urldb,expire):
+ urls=urldb.keys()
+ for u in urls:
+ if time.time() - urldb[u].recenttime() > expire:
+ del urldb[u]
+
+# canonicalise BBC URLs (internal use only)
+def canonical_url(urlstring):
+ if "nsfw://" in urlstring or "nsfws://" in urlstring:
+ urlstring=urlstring.replace("nsfw","http",1)
+ if (urlstring.find("news.bbc.co.uk") != -1):
+ for middle in ("/low/","/mobile/"):
+ x = urlstring.find(middle)
+ if (x != -1):
+ urlstring.replace(middle,"/hi/")
+ return urlstring
+
+# automatically make nsfw urls for you and pass them on to url
+def nsfwq(bot,cmd,nick,conn,public,urldb):
+ if (not hturlre.search(cmd)):
+ bot.automsg(False,nick,"Please use 'nsfw' only with http or https URLs")
+ return
+ newcmd=hturlre.sub(nsfwify,cmd)
+ urlq(bot,newcmd,nick,conn,public,urldb)
+
+def nsfwify(match):
+ a,b,c=match.groups()
+ return 'nsfw'+b+c
+
+#get tweet text
+def twitterq(bot,cmd,nick,conn,public,twitapi):
+
+ if (not urlre.search(cmd)):
+ bot.automsg(False,nick,"Please use 'twit' only with http URLs")
+ return
+
+ urlstring = urlre.search(cmd).group(1)
+ if (urlstring.find("twitter.com") !=-1):
+ stringout = getTweet(urlstring,twitapi)
+ bot.automsg(public, nick, stringout)
+
+def getTweet(urlstring,twitapi):
+ parts = string.split(urlstring,'/')
+ tweetID = parts[-1]
+ try:
+ status = twitapi.GetStatus(tweetID)
+ tweeter_screen = status.user.screen_name.encode('UTF-8', 'replace')
+ tweeter_name = status.user.name.encode('UTF-8', 'replace')
+ tweetText = status.text.encode('UTF-8', 'replace')
+ stringout = "tweet by %s (%s): %s" %(tweeter_screen,tweeter_name,tweetText)
+ except twitter.TwitterError:
+ terror = sys.exc_info()
+ stringout = "Twitter error: %s" % terror[1].__str__()
+ return stringout