X-Git-Url: http://www.chiark.greenend.org.uk/ucgi/~matthewv/git?a=blobdiff_plain;f=commands.py;h=5f436c63fd2b41f95a62399a5893a294939517db;hb=89ed2ee50e6a50feab0a1b51c00d4f7525ec1671;hp=431a113cd1274fec3926c36a9ccaca276b0f206c;hpb=caa74702b15b8640370e1ff3c98d8e3bf18924e1;p=irc.git diff --git a/commands.py b/commands.py index 431a113..5f436c6 100755 --- a/commands.py +++ b/commands.py @@ -1,8 +1,13 @@ # Part of Acrobat. -import string, cPickle, random, urllib, sys, time, re, os, twitter, subprocess, datetime, urlparse +import string, cPickle, random, urllib, sys, time, re, os, twitter, subprocess, datetime, urlparse, hashlib from collections import defaultdict from irclib import irc_lower, nm_to_n +try: + from blame_filter import bfd +except ImportError: + bfd = None + # query karma def karmaq(bot, cmd, nick, conn, public, karma): try: @@ -279,8 +284,12 @@ def __getcommitinfo(commit): return(err) ts,mes=out.split('|') + mes=mes.strip() + md5mes=hashlib.md5(mes).hexdigest() + if bfd and md5mes in bfd: + mes=bfd[md5mes] when=datetime.date.fromtimestamp(float(ts)) - return mes.strip(), when + return mes, when ###Return an array of commit messages and timestamps for lines in db that match what def __getcommits(db,keys,what): @@ -393,23 +402,33 @@ class UrlLog: self.nick=nick self.url=url self.first=time.time() + self.localfirst=time.localtime(self.first) self.count=1 self.lastseen=time.time() self.lastasked=time.time() def recenttime(self): return max(self.lastseen,self.lastasked) def firstmen(self): - return nicetime(time.time()-self.first) + n=time.localtime(time.time()) + s="%02d:%02d" % (self.localfirst.tm_hour,self.localfirst.tm_min) + if n.tm_yday != self.localfirst.tm_yday: + s+=time.strftime(" on %d %B", self.localfirst) + return s def urltype(self): - z=min(len(urlcomplaints)-1, self.count-1) - return urlcomplaints[z] + z=min(len(urlinfos)-1, self.count-1) + return urlinfos[z] #(?:) is a regexp that doesn't group urlre = re.compile(r"((?:(?:http)|(?:nsfw))s?://[^ ]+)( |$)") hturlre= re.compile(r"(http)(s?://[^ ]+)( |$)") #matches \bre\:?\s+ before a regexp; (?i)==case insensitive match shibboleth = re.compile(r"(?i)\bre\:?\s+((?:(?:http)|(?:nsfw))s?://[^ ]+)( |$)") -urlcomplaints = ["a contemporary","an interesting","a fascinating","an overused","a vastly overused"] +#How long (in s) to wait since the most recent mention before commenting +url_repeat_time = 300 +urlinfos = ["a new", + "a fascinating", + "an interesting", + "a popular"] ### Deal with /msg bot url or ~url in channel def urlq(bot, cmd, nick, conn, public,urldb): @@ -421,12 +440,12 @@ def urlq(bot, cmd, nick, conn, public,urldb): url=canonical_url(urlstring) if (url in urldb): T = urldb[url] - complaint="That's %s URL that was first mentioned %s by %s" % \ - (T.urltype(),T.firstmen(),T.nick) + comment="I saw that URL in scrool, first mentioned by %s at %s" % \ + (T.nick,T.firstmen()) if (public): - complaint=complaint+". Furthermore it defeats the point of this command to use it other than via /msg." + comment=comment+". Furthermore it defeats the point of this command to use it other than via /msg." T.count+=1 - bot.automsg(False,nick,complaint) + bot.automsg(False,nick,comment) T.lastasked=time.time() #URL suppressed, so mention in #urls if urlstring != cmd.split()[1]: #first argument to URL was not the url @@ -450,9 +469,10 @@ def dourl(bot,conn,nick,command,urldb): if urlstring in urldb: T=urldb[urlstring] - message="observes %s URL, first mentioned %s by %s" % \ - (T.urltype(),T.firstmen(),T.nick) - if shibboleth.search(command)==None: + message="saw that URL in scrool, first mentioned by %s at %s" % \ + (T.nick,T.firstmen()) + if shibboleth.search(command)==None and \ + time.time() - T.lastseen > url_repeat_time: conn.action(bot.channel, message) T.lastseen=time.time() T.count+=1 @@ -502,13 +522,13 @@ def twitterq(bot,cmd,nick,conn,public,twitapi): for stringout in stringsout: bot.automsg(public, nick, stringout) -def getTweet(urlstring,twitapi,inclusion=False): +def getTweet(urlstring,twitapi,inclusion=False,recurlvl=0): unobfuscate_urls=True expand_included_tweets=True stringsout=[] - - parts = string.split(urlstring,'/') - tweetID = parts[-1] + + path = urlparse.urlparse(urlstring).path + tweetID = path.split('/')[-1] try: status = twitapi.GetStatus(tweetID) if status == {}: @@ -578,12 +598,15 @@ def getTweet(urlstring,twitapi,inclusion=False): if expand_included_tweets: if rv.hostname == 'twitter.com' and re.search(r'status/\d+',rv.path): - quotedtweet = getTweet(toReplace, twitapi, inclusion=True) # inclusion parameter limits recursion. - if not quotedtweet: - quotedtweet = [""] - quotedtweet[0] = "Q{ " + quotedtweet[0] - quotedtweet[-1] += " }" - stringsout = quotedtweet + stringsout + if recurlvl > 2: + stringsout = [ "{{ Recursion level too high }}" ] + stringsout + else: + quotedtweet = getTweet(toReplace, twitapi, inclusion=True, recurlvl=recurlvl+1) # inclusion parameter limits recursion. + if not quotedtweet: + quotedtweet = [""] + quotedtweet[0] = "Q{ " + quotedtweet[0] + quotedtweet[-1] += " }" + stringsout = quotedtweet + stringsout tweetText = tweetText.replace(url.url, toReplace)