chiark / gitweb /
new trout from rejs ( https://twitter.com/kingsbookstore/status/1620432807196778496 )
[irc.git] / commands.py
index 140d7c5faf27ee866728a123dfb3a34f5166f1c5..eec69e7f416dd0d085f60e8b08dda26e1ae3b57b 100755 (executable)
@@ -1,6 +1,13 @@
 # Part of Acrobat.
-import string, cPickle, random, urllib, sys, time, re, os, twitter, subprocess, datetime
+import string, cPickle, random, urllib, sys, time, re, os, twitter, subprocess, datetime, urlparse, hashlib
+from collections import defaultdict
 from irclib import irc_lower, nm_to_n
+import json
+
+try:
+    from blame_filter import bfd
+except ImportError:
+    bfd = None
 
 # query karma
 def karmaq(bot, cmd, nick, conn, public, karma):
@@ -47,6 +54,16 @@ def infoq(bot, cmd, nick, conn, public, karma):
        (bot.revision.split()[1], bot.channel, conn.get_nickname(),
         bot.owner, len(karma.keys())))
 
+class FishPond:
+    def __init__(fishpond):
+       fishpond.last=[]
+       fishpond.DoS=0
+       fishpond.quotatime=0
+
+    def note_last(fishpond, msg, cfg):
+       fishpond.last.insert(0,(msg,cfg))
+       fishpond.last = fishpond.last[0:10]
+
 # Check on fish stocks
 def fish_quota(pond):
     if pond.DoS:
@@ -87,7 +104,7 @@ def troutq(bot, cmd, nick, conn, public, cfg):
        return
     me = bot.connection.get_nickname()
     trout_msg = random.choice(fishlist)
-    fishpond.last=trout_msg
+    fishpond.note_last(trout_msg,cfg)
     # The bot won't trout or flirt with itself;
     if irc_lower(me) == irc_lower(target) or irc_lower(target) in synonyms:
         target = nick
@@ -128,7 +145,7 @@ def slashq(bot, cmd, nick, conn, public, cfg):
        return
     me = bot.connection.get_nickname()
     slash_msg = random.choice(fishlist)
-    fishpond.last=slash_msg
+    fishpond.note_last(slash_msg,cfg)
     # The bot won't slash people with themselves
     if irc_lower(who[0]) == irc_lower(who[1]):
        conn.notice(nick, "oooooh no missus!")
@@ -256,7 +273,7 @@ def currencyq(bot, cmd, nick, conn, public):
     targ = ("http://www.xe.com/ucc/convert.cgi?From=%s&To=%s" % (args[0], args[1]))
     try:
         currencypage = urllib.urlopen(targ).read()
-        match = re.search(r"(1 %s = [\d\.]+ %s)" % (args[0],args[1]),currencypage,re.MULTILINE)
+        match = re.search(r"(1 %s = [\d\.]+ %s)" % (args[0].upper(),args[1].upper()),currencypage,re.MULTILINE)
         if match == None:
             bot.automsg(public,nick,"Dear Chief Secretary, there is no money.")
         else:
@@ -278,8 +295,12 @@ def __getcommitinfo(commit):
        return(err)
 
     ts,mes=out.split('|')
+    mes=mes.strip()
+    md5mes=hashlib.md5(mes).hexdigest()
+    if bfd and md5mes in bfd:
+        mes=bfd[md5mes]
     when=datetime.date.fromtimestamp(float(ts))
-    return mes.strip(), when
+    return mes, when
 
 ###Return an array of commit messages and timestamps for lines in db that match what
 def __getcommits(db,keys,what):
@@ -302,14 +323,30 @@ def __getall(tdb,tdbk,fdb,fdbk,sdb,sdbk,what):
     sans=__getcommits(sdb,sdbk,what)
     return tans+fans+sans
 
-def blameq(bot,cmd,nick,conn,public,fish,tdb,tdbk,fdb,fdbk,sdb,sdbk):
+def blameq(bot,cmd,nick,conn,public,fishpond,cfgs):
+    tdb,tdbk,x = cfgs[0][7] # urgh, magic, to support magic knowledge below
+    fdb,fdbk,x = cfgs[1][7]
+    sdb,sdbk,x = cfgs[2][7]
     clist=cmd.split()
     if len(clist) < 2:
        bot.automsg(public,nick,"Who or what do you want to blame?")
        return
     cwhat=' '.join(clist[2:])
+    kindsfile = "fish?"
     if clist[1]=="#last":
-       ans=__getall(tdb,tdbk,fdb,fdbk,sdb,sdbk,fish.last)
+        try:
+            n = abs(int(clist[2]))-1
+            if n < 0: raise ValueError
+        except IndexError: n = 0
+        except ValueError:
+            bot.automsg(public,nick,"Huh?")
+            return
+        try: lmsg, lcfg = fishpond.last[n]
+        except IndexError:
+           bot.automsg(public,nick,"Nothing")
+           return
+       xdb,xdbk,kindsfile = lcfg[7]
+       ans=__getcommits(xdb,xdbk,lmsg)
     elif clist[1]=="#trouts" or clist[1]=="#trout":
        ans=__getcommits(tdb,tdbk,cwhat)
     elif clist[1]=="#flirts" or clist[1]=="#flirt":
@@ -325,7 +362,7 @@ def blameq(bot,cmd,nick,conn,public,fish,tdb,tdbk,fdb,fdbk,sdb,sdbk):
        if len(ans[0])==1:
            bot.automsg(public,nick,ans[0])
        else:
-           bot.automsg(public,nick,"Modified %s: %s" % (ans[0][2].isoformat(),ans[0][1]))
+           bot.automsg(public,nick,"Modified %s %s: %s" % (kindsfile, ans[0][2].isoformat(),ans[0][1]))
     elif len(ans)>4:
        bot.automsg(public,nick,"I found %d matches, which is too many. Please be more specific!" % (len(ans)) )
     else:
@@ -333,7 +370,7 @@ def blameq(bot,cmd,nick,conn,public,fish,tdb,tdbk,fdb,fdbk,sdb,sdbk):
            if len(a)==1:
                bot.automsg(public,nick,a)
            else:
-               bot.automsg(public,nick,"'%s' modified on %s: %s" % (a[0],a[2].isoformat(),a[1]))
+               bot.automsg(public,nick,"%s '%s' modified on %s: %s" % (kindsfile, a[0],a[2].isoformat(),a[1]))
 
 ### say to msg/channel            
 def sayq(bot, cmd, nick, conn, public):
@@ -392,23 +429,33 @@ class UrlLog:
         self.nick=nick
         self.url=url
         self.first=time.time()
+        self.localfirst=time.localtime(self.first)
         self.count=1
         self.lastseen=time.time()
         self.lastasked=time.time()
     def recenttime(self):
         return max(self.lastseen,self.lastasked)
     def firstmen(self):
-        return nicetime(time.time()-self.first)
+        n=time.localtime(time.time())
+        s="%02d:%02d" % (self.localfirst.tm_hour,self.localfirst.tm_min)
+        if n.tm_yday != self.localfirst.tm_yday:
+            s+=time.strftime(" on %d %B", self.localfirst)
+        return s
     def urltype(self):
-        z=min(len(urlcomplaints)-1, self.count-1)
-        return urlcomplaints[z]
+        z=min(len(urlinfos)-1, self.count-1)
+        return urlinfos[z]
 
 #(?:) is a regexp that doesn't group        
 urlre = re.compile(r"((?:(?:http)|(?:nsfw))s?://[^ ]+)( |$)")
 hturlre= re.compile(r"(http)(s?://[^ ]+)( |$)")
 #matches \bre\:?\s+ before a regexp; (?i)==case insensitive match
 shibboleth = re.compile(r"(?i)\bre\:?\s+((?:(?:http)|(?:nsfw))s?://[^ ]+)( |$)")
-urlcomplaints = ["a contemporary","an interesting","a fascinating","an overused","a vastly overused"]
+#How long (in s) to wait since the most recent mention before commenting
+url_repeat_time = 300
+urlinfos = ["a new",
+            "a fascinating",
+            "an interesting",
+            "a popular"]
 
 ### Deal with /msg bot url or ~url in channel
 def urlq(bot, cmd, nick, conn, public,urldb):
@@ -420,12 +467,12 @@ def urlq(bot, cmd, nick, conn, public,urldb):
   url=canonical_url(urlstring)
   if (url in urldb):
     T = urldb[url]
-    complaint="That's %s URL that was first mentioned %s by %s" % \
-               (T.urltype(),T.firstmen(),T.nick)
+    comment="I saw that URL in scrool, first mentioned by %s at %s" % \
+               (T.nick,T.firstmen())
     if (public):
-      complaint=complaint+". Furthermore it defeats the point of this command to use it other than via /msg."
+      comment=comment+". Furthermore it defeats the point of this command to use it other than via /msg."
       T.count+=1
-    bot.automsg(False,nick,complaint)
+    bot.automsg(False,nick,comment)
     T.lastasked=time.time()
     #URL suppressed, so mention in #urls
     if urlstring != cmd.split()[1]: #first argument to URL was not the url
@@ -440,7 +487,6 @@ def urlq(bot, cmd, nick, conn, public,urldb):
         conn.privmsg(bot.channel,"%s remarks: %s" % (nick," ".join(cmd.split()[1:])))
       else:
         conn.privmsg(bot.channel,"(via %s) %s"%(nick," ".join(cmd.split()[1:])))
-      bot.automsg(False,nick,"That URL was unique; I have posted it into IRC")
     urldb[url]=UrlLog(url,nick)
 
 ### Deal with URLs spotted in channel
@@ -450,9 +496,10 @@ def dourl(bot,conn,nick,command,urldb):
 
   if urlstring in urldb:
     T=urldb[urlstring]
-    message="observes %s URL, first mentioned %s by %s" % \
-             (T.urltype(),T.firstmen(),T.nick)
-    if shibboleth.search(command)==None:
+    message="saw that URL in scrool, first mentioned by %s at %s" % \
+             (T.nick,T.firstmen())
+    if shibboleth.search(command)==None and \
+       time.time() - T.lastseen > url_repeat_time:
         conn.action(bot.channel, message)
     T.lastseen=time.time()
     T.count+=1
@@ -493,33 +540,141 @@ def nsfwify(match):
 def twitterq(bot,cmd,nick,conn,public,twitapi):
   
   if (not urlre.search(cmd)):
-    bot.automsg(False,nick,"Please use 'twit' only with http URLs")
+    bot.automsg(False,nick,"Please use 'twit' only with http or https URLs")
     return
 
   urlstring = urlre.search(cmd).group(1)
   if (urlstring.find("twitter.com") !=-1):
-    stringout = getTweet(urlstring,twitapi)
-    bot.automsg(public, nick, stringout)
+    stringsout = getTweet(urlstring,twitapi)
+    for stringout in stringsout:
+        bot.automsg(public, nick, stringout)
   
-def getTweet(urlstring,twitapi):
-  parts = string.split(urlstring,'/')
-  tweetID = parts[-1]
+def getTweet(urlstring,twitapi,inclusion=False,recurlvl=0):
+  unobfuscate_urls=True
+  expand_included_tweets=True
+  stringsout=[]
+
+  path = urlparse.urlparse(urlstring).path
+  tweetID = path.split('/')[-1]
   try:
     status = twitapi.GetStatus(tweetID)
-    print status, type(status), status=={}
     if status == {}:
         return "twitapi.GetStatus returned nothing :-("
     if status.user == None and status.text == None:
         return "Empty status object returned :("
+    if status.retweeted_status and status.retweeted_status.text:
+        status = status.retweeted_status
     if status.user is not None:
-        tweeter_screen = status.user.screen_name.encode('UTF-8', 'replace')
-        tweeter_name = status.user.name.encode('UTF-8', 'replace')
+        tweeter_screen = status.user.screen_name #.encode('UTF-8', 'replace')
+        tweeter_name = status.user.name #.encode('UTF-8', 'replace')
     else:
         tweeter_screen = "[not returned]" ; tweeter_name = "[not returned]"
-    tweetText = status.text.encode('UTF-8', 'replace')
-    tweetText = tweetText.replace('\n',' ')
+        tweeter_name = tweeter_name + " RTing " + status.user.name #.encode('UTF-8', 'replace')
+    tweetText = status.full_text
+    if status.media:
+        replacements = defaultdict(list)
+
+        for medium in status.media:
+            replacements[medium.url].append(medium.media_url_https)
+
+        # The twitter-api 'conveniently' parses this for you and
+        # throws away the actual video URLs, so we have to take the
+        # JSON and reparse it :sadpanda:
+        # This is particularly annoying because we don't know
+        # for sure that status.media and the JSON 'media' entry
+        # have the same elements in the same order.  Probably they
+        # do but maybe twitter-api randomly reorganised things or
+        # filtered the list or something.  So instead we go through
+        # the JSON and handle the media urls, discarding whatever
+        # unfortunate thing we have put in replacements already.
+        parsed_tweet = json.loads(status.AsJsonString())
+        for medium in parsed_tweet.get('media', []):
+            if medium['type'] == 'video':
+                best = { 'bitrate': -1 }
+                for vt in medium['video_info']['variants']:
+                    if (vt.get('content_type') == 'video/mp4' and
+                        vt.get('bitrate', -1) > best['bitrate']):
+                        best = vt
+                if 'url' in best:
+                    video_url = best['url'].split('?',1)[0]
+                    duration = medium['video_info']['duration_millis']
+                    # ^ duration_millis is a string
+                    duration = "%.1f" % (float(duration)/1000.)
+                    video_desc = "%s (%ss)" % (video_url, duration)
+                    replacements[medium['url']] = [video_desc]
+
+        for k,v in replacements.items():
+            if len(v) > 1:
+                replacementstring = "[" +  " ; ".join(v) +"]"
+            else:
+                replacementstring = v[0]
+            tweetText = tweetText.replace(k, replacementstring)
+
+    for url in status.urls:
+        toReplace = url.expanded_url
+
+        if unobfuscate_urls:
+            import urllib
+            rv = urlparse.urlparse(toReplace)
+            if rv.hostname in {
+                # sourced from http://bit.do/list-of-url-shorteners.php
+                "bit.do", "t.co", "lnkd.in", "db.tt", "qr.ae", "adf.ly",
+                "goo.gl", "bitly.com", "cur.lv", "tinyurl.com", "ow.ly",
+                "bit.ly", "adcrun.ch", "ity.im", "q.gs", "viralurl.com",
+                "is.gd", "po.st", "vur.me", "bc.vc", "twitthis.com", "u.to",
+                "j.mp", "buzurl.com", "cutt.us", "u.bb", "yourls.org",
+                "crisco.com", "x.co", "prettylinkpro.com", "viralurl.biz",
+                "adcraft.co", "virl.ws", "scrnch.me", "filoops.info", "vurl.bz",
+                "vzturl.com", "lemde.fr", "qr.net", "1url.com", "tweez.me",
+                "7vd.cn", "v.gd", "dft.ba", "aka.gr", "tr.im",
+                 # added by ASB:
+                 "trib.al", "dlvr.it"
+                               }:
+                #expand list as needed.
+                response = urllib.urlopen('http://urlex.org/txt/' + toReplace)
+                resptext = response.read()
+                if resptext.startswith('http'): # ie it looks urlish (http or https)
+                    if resptext != toReplace:
+                        toReplace = resptext
+                    # maybe make a note of the domain of the original URL to compile list of shortenable domains?
+
+        # remove tracking utm_ query parameters, for privacy and brevity
+        # code snippet from https://gist.github.com/lepture/5997883
+        rv = urlparse.urlparse(toReplace)
+        if rv.query:
+            query = re.sub(r'utm_\w+=[^&]+&?', '', rv.query)
+            if query:
+                toReplace = '%s://%s%s?%s' % (rv.scheme, rv.hostname, rv.path, query)
+            else:
+                toReplace = '%s://%s%s' % (rv.scheme, rv.hostname, rv.path) # leave off the final '?'
+
+        if expand_included_tweets:
+            if rv.hostname == 'twitter.com' and re.search(r'status/\d+',rv.path):
+                if recurlvl > 2:
+                  stringsout = [ "{{ Recursion level too high }}" ] + stringsout
+                else:
+                  quotedtweet = getTweet(toReplace, twitapi, inclusion=True, recurlvl=recurlvl+1) # inclusion parameter limits recursion.
+                  if not quotedtweet:
+                      quotedtweet = [""]
+                  quotedtweet[0] = "Q{ " + quotedtweet[0]
+                  quotedtweet[-1] += " }"
+                  stringsout = quotedtweet + stringsout
+
+        tweetText = tweetText.replace(url.url, toReplace)
+
+    tweetText = tweetText.replace("&gt;",">")
+    tweetText = tweetText.replace("&lt;","<")
+    tweetText = tweetText.replace("&amp;","&")
+    tweetText = tweetText.replace("\n"," ")
     stringout = "tweet by %s (%s): %s" %(tweeter_screen,tweeter_name,tweetText)
   except twitter.TwitterError:
     terror = sys.exc_info()
     stringout = "Twitter error: %s" % terror[1].__str__()
-  return stringout
+  except Exception:
+    terror = sys.exc_info()
+    stringout = "Error: %s" % terror[1].__str__()
+  stringsout = [stringout] + stringsout
+  if inclusion:
+      return stringsout # don't want to double-encode it, so just pass it on for now and encode later
+
+  return map(lambda x: x.encode('UTF-8', 'replace'), stringsout)