# Part of Acrobat.
-import string, cPickle, random, urllib, sys, time, re, os, twitter, subprocess, datetime
+import string, cPickle, random, urllib, sys, time, re, os, twitter, subprocess, datetime, urlparse, hashlib
+from collections import defaultdict
from irclib import irc_lower, nm_to_n
+try:
+ from blame_filter import bfd
+except ImportError:
+ bfd = None
+
# query karma
def karmaq(bot, cmd, nick, conn, public, karma):
try:
(bot.revision.split()[1], bot.channel, conn.get_nickname(),
bot.owner, len(karma.keys())))
+class FishPond:
+ def __init__(fishpond):
+ fishpond.last=[]
+ fishpond.DoS=0
+ fishpond.quotatime=0
+
+ def note_last(fishpond, msg, cfg):
+ fishpond.last.insert(0,(msg,cfg))
+ fishpond.last = fishpond.last[0:10]
+
# Check on fish stocks
def fish_quota(pond):
if pond.DoS:
return
me = bot.connection.get_nickname()
trout_msg = random.choice(fishlist)
- fishpond.last=trout_msg
+ fishpond.note_last(trout_msg,cfg)
# The bot won't trout or flirt with itself;
if irc_lower(me) == irc_lower(target) or irc_lower(target) in synonyms:
target = nick
return
me = bot.connection.get_nickname()
slash_msg = random.choice(fishlist)
- fishpond.last=slash_msg
+ fishpond.note_last(slash_msg,cfg)
# The bot won't slash people with themselves
if irc_lower(who[0]) == irc_lower(who[1]):
conn.notice(nick, "oooooh no missus!")
return(err)
ts,mes=out.split('|')
+ mes=mes.strip()
+ md5mes=hashlib.md5(mes).hexdigest()
+ if bfd and md5mes in bfd:
+ mes=bfd[md5mes]
when=datetime.date.fromtimestamp(float(ts))
- return mes.strip(), when
+ return mes, when
###Return an array of commit messages and timestamps for lines in db that match what
def __getcommits(db,keys,what):
sans=__getcommits(sdb,sdbk,what)
return tans+fans+sans
-def blameq(bot,cmd,nick,conn,public,fish,tdb,tdbk,fdb,fdbk,sdb,sdbk):
+def blameq(bot,cmd,nick,conn,public,fishpond,cfgs):
+ tdb,tdbk,x = cfgs[0][7] # urgh, magic, to support magic knowledge below
+ fdb,fdbk,x = cfgs[1][7]
+ sdb,sdbk,x = cfgs[2][7]
clist=cmd.split()
if len(clist) < 2:
bot.automsg(public,nick,"Who or what do you want to blame?")
return
cwhat=' '.join(clist[2:])
+ kindsfile = "fish?"
if clist[1]=="#last":
- ans=__getall(tdb,tdbk,fdb,fdbk,sdb,sdbk,fish.last)
+ try:
+ n = abs(int(clist[2]))-1
+ if n < 0: raise ValueError
+ except IndexError: n = 0
+ except ValueError:
+ bot.automsg(public,nick,"Huh?")
+ return
+ try: lmsg, lcfg = fishpond.last[n]
+ except IndexError:
+ bot.automsg(public,nick,"Nothing")
+ return
+ xdb,xdbk,kindsfile = lcfg[7]
+ ans=__getcommits(xdb,xdbk,lmsg)
elif clist[1]=="#trouts" or clist[1]=="#trout":
ans=__getcommits(tdb,tdbk,cwhat)
elif clist[1]=="#flirts" or clist[1]=="#flirt":
if len(ans[0])==1:
bot.automsg(public,nick,ans[0])
else:
- bot.automsg(public,nick,"Modified %s: %s" % (ans[0][2].isoformat(),ans[0][1]))
+ bot.automsg(public,nick,"Modified %s %s: %s" % (kindsfile, ans[0][2].isoformat(),ans[0][1]))
elif len(ans)>4:
bot.automsg(public,nick,"I found %d matches, which is too many. Please be more specific!" % (len(ans)) )
else:
if len(a)==1:
bot.automsg(public,nick,a)
else:
- bot.automsg(public,nick,"'%s' modified on %s: %s" % (a[0],a[2].isoformat(),a[1]))
+ bot.automsg(public,nick,"%s '%s' modified on %s: %s" % (kindsfile, a[0],a[2].isoformat(),a[1]))
### say to msg/channel
def sayq(bot, cmd, nick, conn, public):
self.nick=nick
self.url=url
self.first=time.time()
+ self.localfirst=time.localtime(self.first)
self.count=1
self.lastseen=time.time()
self.lastasked=time.time()
def recenttime(self):
return max(self.lastseen,self.lastasked)
def firstmen(self):
- return nicetime(time.time()-self.first)
+ n=time.localtime(time.time())
+ s="%02d:%02d" % (self.localfirst.tm_hour,self.localfirst.tm_min)
+ if n.tm_yday != self.localfirst.tm_yday:
+ s+=time.strftime(" on %d %B", self.localfirst)
+ return s
def urltype(self):
- z=min(len(urlcomplaints)-1, self.count-1)
- return urlcomplaints[z]
+ z=min(len(urlinfos)-1, self.count-1)
+ return urlinfos[z]
#(?:) is a regexp that doesn't group
urlre = re.compile(r"((?:(?:http)|(?:nsfw))s?://[^ ]+)( |$)")
hturlre= re.compile(r"(http)(s?://[^ ]+)( |$)")
#matches \bre\:?\s+ before a regexp; (?i)==case insensitive match
shibboleth = re.compile(r"(?i)\bre\:?\s+((?:(?:http)|(?:nsfw))s?://[^ ]+)( |$)")
-urlcomplaints = ["a contemporary","an interesting","a fascinating","an overused","a vastly overused"]
+#How long (in s) to wait since the most recent mention before commenting
+url_repeat_time = 300
+urlinfos = ["a new",
+ "a fascinating",
+ "an interesting",
+ "a popular"]
### Deal with /msg bot url or ~url in channel
def urlq(bot, cmd, nick, conn, public,urldb):
url=canonical_url(urlstring)
if (url in urldb):
T = urldb[url]
- complaint="That's %s URL that was first mentioned %s by %s" % \
- (T.urltype(),T.firstmen(),T.nick)
+ comment="I saw that URL in scrool, first mentioned by %s at %s" % \
+ (T.nick,T.firstmen())
if (public):
- complaint=complaint+". Furthermore it defeats the point of this command to use it other than via /msg."
+ comment=comment+". Furthermore it defeats the point of this command to use it other than via /msg."
T.count+=1
- bot.automsg(False,nick,complaint)
+ bot.automsg(False,nick,comment)
T.lastasked=time.time()
#URL suppressed, so mention in #urls
if urlstring != cmd.split()[1]: #first argument to URL was not the url
if urlstring in urldb:
T=urldb[urlstring]
- message="observes %s URL, first mentioned %s by %s" % \
- (T.urltype(),T.firstmen(),T.nick)
- if shibboleth.search(command)==None:
+ message="saw that URL in scrool, first mentioned by %s at %s" % \
+ (T.nick,T.firstmen())
+ if shibboleth.search(command)==None and \
+ time.time() - T.lastseen > url_repeat_time:
conn.action(bot.channel, message)
T.lastseen=time.time()
T.count+=1
def twitterq(bot,cmd,nick,conn,public,twitapi):
if (not urlre.search(cmd)):
- bot.automsg(False,nick,"Please use 'twit' only with http URLs")
+ bot.automsg(False,nick,"Please use 'twit' only with http or https URLs")
return
urlstring = urlre.search(cmd).group(1)
if (urlstring.find("twitter.com") !=-1):
- stringout = getTweet(urlstring,twitapi)
- bot.automsg(public, nick, stringout)
+ stringsout = getTweet(urlstring,twitapi)
+ for stringout in stringsout:
+ bot.automsg(public, nick, stringout)
-def getTweet(urlstring,twitapi):
- parts = string.split(urlstring,'/')
- tweetID = parts[-1]
+def getTweet(urlstring,twitapi,inclusion=False,recurlvl=0):
+ unobfuscate_urls=True
+ expand_included_tweets=True
+ stringsout=[]
+
+ path = urlparse.urlparse(urlstring).path
+ tweetID = path.split('/')[-1]
try:
status = twitapi.GetStatus(tweetID)
- #print status, type(status), status=={}
if status == {}:
return "twitapi.GetStatus returned nothing :-("
if status.user == None and status.text == None:
return "Empty status object returned :("
+ if status.retweeted_status and status.retweeted_status.text:
+ status = status.retweeted_status
if status.user is not None:
- tweeter_screen = status.user.screen_name.encode('UTF-8', 'replace')
- tweeter_name = status.user.name.encode('UTF-8', 'replace')
+ tweeter_screen = status.user.screen_name #.encode('UTF-8', 'replace')
+ tweeter_name = status.user.name #.encode('UTF-8', 'replace')
else:
tweeter_screen = "[not returned]" ; tweeter_name = "[not returned]"
- tweetText = status.text.encode('UTF-8', 'replace')
- tweetText = tweetText.replace('\n',' ')
+ tweeter_name = tweeter_name + " RTing " + status.user.name #.encode('UTF-8', 'replace')
+ tweetText = status.full_text
+ if status.media:
+ replacements = defaultdict( list )
+ for medium in status.media:
+ replacements[medium.url].append(medium.media_url_https)
+
+ for k,v in replacements.items():
+
+ v = [re.sub(r"/tweet_video_thumb/([\w\-]+).jpg", r"/tweet_video/\1.mp4", link) for link in v]
+ if len(v) > 1:
+ replacementstring = "[" + " ; ".join(v) +"]"
+ else:
+ replacementstring = v[0]
+ tweetText = tweetText.replace(k, replacementstring)
+
+ for url in status.urls:
+ toReplace = url.expanded_url
+
+ if unobfuscate_urls:
+ import urllib
+ rv = urlparse.urlparse(toReplace)
+ if rv.hostname in {
+ # sourced from http://bit.do/list-of-url-shorteners.php
+ "bit.do", "t.co", "lnkd.in", "db.tt", "qr.ae", "adf.ly",
+ "goo.gl", "bitly.com", "cur.lv", "tinyurl.com", "ow.ly",
+ "bit.ly", "adcrun.ch", "ity.im", "q.gs", "viralurl.com",
+ "is.gd", "po.st", "vur.me", "bc.vc", "twitthis.com", "u.to",
+ "j.mp", "buzurl.com", "cutt.us", "u.bb", "yourls.org",
+ "crisco.com", "x.co", "prettylinkpro.com", "viralurl.biz",
+ "adcraft.co", "virl.ws", "scrnch.me", "filoops.info", "vurl.bz",
+ "vzturl.com", "lemde.fr", "qr.net", "1url.com", "tweez.me",
+ "7vd.cn", "v.gd", "dft.ba", "aka.gr", "tr.im",
+ # added by ASB:
+ "trib.al", "dlvr.it"
+ }:
+ #expand list as needed.
+ response = urllib.urlopen('http://urlex.org/txt/' + toReplace)
+ resptext = response.read()
+ if resptext.startswith('http'): # ie it looks urlish (http or https)
+ if resptext != toReplace:
+ toReplace = resptext
+ # maybe make a note of the domain of the original URL to compile list of shortenable domains?
+
+ # remove tracking utm_ query parameters, for privacy and brevity
+ # code snippet from https://gist.github.com/lepture/5997883
+ rv = urlparse.urlparse(toReplace)
+ if rv.query:
+ query = re.sub(r'utm_\w+=[^&]+&?', '', rv.query)
+ if query:
+ toReplace = '%s://%s%s?%s' % (rv.scheme, rv.hostname, rv.path, query)
+ else:
+ toReplace = '%s://%s%s' % (rv.scheme, rv.hostname, rv.path) # leave off the final '?'
+
+ if expand_included_tweets:
+ if rv.hostname == 'twitter.com' and re.search(r'status/\d+',rv.path):
+ if recurlvl > 2:
+ stringsout = [ "{{ Recursion level too high }}" ] + stringsout
+ else:
+ quotedtweet = getTweet(toReplace, twitapi, inclusion=True, recurlvl=recurlvl+1) # inclusion parameter limits recursion.
+ if not quotedtweet:
+ quotedtweet = [""]
+ quotedtweet[0] = "Q{ " + quotedtweet[0]
+ quotedtweet[-1] += " }"
+ stringsout = quotedtweet + stringsout
+
+ tweetText = tweetText.replace(url.url, toReplace)
+
+ tweetText = tweetText.replace(">",">")
+ tweetText = tweetText.replace("<","<")
+ tweetText = tweetText.replace("&","&")
+ tweetText = tweetText.replace("\n"," ")
stringout = "tweet by %s (%s): %s" %(tweeter_screen,tweeter_name,tweetText)
except twitter.TwitterError:
terror = sys.exc_info()
stringout = "Twitter error: %s" % terror[1].__str__()
- return stringout
+ except Exception:
+ terror = sys.exc_info()
+ stringout = "Error: %s" % terror[1].__str__()
+ stringsout = [stringout] + stringsout
+ if inclusion:
+ return stringsout # don't want to double-encode it, so just pass it on for now and encode later
+
+ return map(lambda x: x.encode('UTF-8', 'replace'), stringsout)