chiark / gitweb /
send tweets as UTF-8
[irc.git] / commands.py
index b2df479f8eef6f4ef0c0a2834f94721053942fd8..6049640b62fddce7749fa01f1d5b5b3c83b3099b 100644 (file)
@@ -1,5 +1,5 @@
 # Part of Acrobat.
-import string, cPickle, random, urllib, sys, time, re, os
+import string, cPickle, random, urllib, sys, time, re, os, twitter
 from irclib import irc_lower, nm_to_n
 
 # query karma
@@ -36,7 +36,7 @@ def karmadelq(bot, cmd, nick, conn, public, karma):
 # help - provides the URL of the help file
 def helpq(bot, cmd, nick, conn, public):
     bot.automsg(public,nick,
-                "For help see http://www.pick.ucam.org/~matthew/irc/servus.html")
+                "For help see http://www.chiark.greenend.org.uk/~matthewv/irc/servus.html")
 
 
 # query bot status
@@ -294,3 +294,125 @@ def rot13q(bot, cmd, nick, conn, public):
     b=a[13:]+a[:13]
     trans=string.maketrans(a+a.upper(),b+b.upper())
     conn.notice(nick, string.join(cmd.split()[1:]).translate(trans))
+
+### URL-tracking stuff
+
+### return a easy-to-read approximation of a time period
+def nicetime(tempus):
+  if (tempus<120):
+    tm="%d seconds ago"%int(tempus)
+  elif (tempus<7200):
+    tm="%d minutes ago"%int(tempus/60)
+  if (tempus>7200):
+    tm="%d hours ago"%int(tempus/3600)
+  return tm
+
+### class to store URL data
+class UrlLog:
+    "contains meta-data about a URL seen on-channel"
+    def __init__(self,url,nick):
+        self.nick=nick
+        self.url=url
+        self.first=time.time()
+        self.count=1
+        self.lastseen=time.time()
+        self.lastasked=time.time()
+    def recenttime(self):
+        return max(self.lastseen,self.lastasked)
+    def firstmen(self):
+        return nicetime(time.time()-self.first)
+    def urltype(self):
+        z=min(len(urlcomplaints)-1, self.count-1)
+        return urlcomplaints[z]
+
+#(?:) is a regexp that doesn't group        
+urlre = re.compile("((?:(?:http)|(?:nsfw))s?://[^ ]+)( |$)")
+urlcomplaints = ["a contemporary","an interesting","a fascinating","an overused","a vastly overused"]
+
+### Deal with /msg bot url or ~url in channel
+def urlq(bot, cmd, nick, conn, public,urldb):
+  if (not urlre.search(cmd)):
+    bot.automsg(False,nick,"Please use 'url' only with http, https, nsfw, or nsfws URLs")
+    return
+
+  urlstring=urlre.search(cmd).group(1)
+  url=canonical_url(urlstring)
+  if (url in urldb):
+    T = urldb[url]
+    complaint="That's %s URL that was first mentioned %s by %s" % \
+               (T.urltype(),T.firstmen(),T.nick)
+    if (public):
+      complaint=complaint+". Furthermore it defeats the point of this command to use it other than via /msg."
+      T.count+=1
+    bot.automsg(False,nick,complaint)
+    T.lastasked=time.time()
+  else:
+    if (public):
+      bot.automsg(False,nick,"That URL was unique. There is little point in using !url out loud; please use it via /msg")
+    else:
+      if urlstring != cmd.split()[1]: #first argument to URL was not the url
+        conn.privmsg(bot.channel,"%s remarks: %s" % (nick," ".join(cmd.split()[1:])))
+      else:
+        conn.privmsg(bot.channel,"(via %s) %s"%(nick," ".join(cmd.split()[1:])))
+      bot.automsg(False,nick,"That URL was unique; I have posted it into IRC")
+    urldb[url]=UrlLog(url,nick)
+
+### Deal with URLs spotted in channel
+def dourl(bot,conn,nick,command,urldb):
+  urlstring=urlre.search(command).group(1)
+  urlstring=canonical_url(urlstring)
+
+  if urlstring in urldb:
+    T=urldb[urlstring]
+    message="observes %s URL, first mentioned %s by %s" % \
+             (T.urltype(),T.firstmen(),T.nick)
+    conn.action(bot.channel, message)
+    T.lastseen=time.time()
+    T.count+=1
+  else:
+    urldb[urlstring]=UrlLog(urlstring,nick)
+
+### Expire old urls
+def urlexpire(urldb,expire):
+    urls=urldb.keys()
+    for u in urls:
+        if time.time() - urldb[u].recenttime() > expire:
+            del urldb[u]
+
+# canonicalise BBC URLs (internal use only)
+def canonical_url(urlstring):
+  if "nsfw://" in urlstring or "nsfws://" in urlstring:
+      urlstring=urlstring.replace("nsfw","http",1)
+  if (urlstring.find("news.bbc.co.uk") != -1):
+    for middle in ("/low/","/mobile/"):
+      x = urlstring.find(middle)
+      if (x != -1):
+        urlstring.replace(middle,"/hi/")
+  return urlstring
+
+
+#get tweet text
+def twitterq(bot,cmd,nick,conn,public,twitapi):
+  
+  if (not urlre.search(cmd)):
+    bot.automsg(False,nick,"Please use 'twit' only with http URLs")
+    return
+
+  urlstring = urlre.search(cmd).group(1)
+  if (urlstring.find("twitter.com") !=-1):
+    stringout = getTweet(urlstring,twitapi)
+    bot.automsg(public, nick, stringout)
+  
+def getTweet(urlstring,twitapi):
+  parts = string.split(urlstring,'/')
+  tweetID = parts[-1]
+  try:
+    status = twitapi.GetStatus(tweetID)
+    tweeter_screen = status.user.screen_name.encode('UTF-8', 'replace')
+    tweeter_name = status.user.name.encode('UTF-8', 'replace')
+    tweetText = status.text.encode('UTF-8', 'replace')
+    stringout = "tweet by %s (%s): %s" %(tweeter_screen,tweeter_name,tweetText)
+  except twitter.TwitterError:
+    terror = sys.exc_info()
+    stringout = "Twitter error: %s" % terror[1].__str__()
+  return stringout