From bf7885f2a107fcd4c58c144a1658a3acc8311f3a Mon Sep 17 00:00:00 2001 From: Ian Jackson Date: Tue, 1 Jun 2010 16:21:03 +0100 Subject: [PATCH 1/1] Always call cancel_time before on_time to make sure we have only one queued up --- infile.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/infile.c b/infile.c index 3c93d1f..32c4497 100644 --- a/infile.c +++ b/infile.c @@ -178,6 +178,8 @@ static void *feedfile_got_article(oop_source *lp, oop_read *rd, /*========== tailing input file ==========*/ +static void tailing_rable_on_time(InputFile *ipf); + static void *tailing_rable_call_time(oop_source *lp, struct timeval tv, void *user) { /* lifetime of ipf here is OK because destruction will cause @@ -190,13 +192,20 @@ static void *tailing_rable_call_time(oop_source *lp, struct timeval tv, /* we just keep calling readable until our caller (oop_rd) * has called try_read, and try_read has found EOF so given EAGAIN */ dbg("**TRACT** ipf=%p reschedule",ipf); - loop->on_time(loop, OOP_TIME_NOW, tailing_rable_call_time, ipf); + tailing_rable_on_time(ipf); - assert(ipf->readable_callback): + assert(ipf->readable_callback); return ipf->readable_callback(loop, &ipf->readable, ipf->readable_callback_user); } +static void tailing_rable_on_time(InputFile *ipf) { + loop->cancel_time(loop, OOP_TIME_NOW, tailing_rable_call_time, ipf); + loop->on_time(loop, OOP_TIME_NOW, tailing_rable_call_time, ipf); + /* on_time is not idempotent - it counts. So we need this to make + * sure we only have one outstanding, as otherwise our cancel doesn't work */ +} + static void tailing_on_cancel(struct oop_readable *rable) { InputFile *ipf= (void*)rable; dbg("**TOR** ipf=%p on_cancel",ipf); @@ -213,7 +222,7 @@ void tailing_make_readable(InputFile *ipf) { if (!ipf || !ipf->readable_callback) /* so callers can be naive */ return; ipf->fake_readable= 1; - loop->on_time(loop, OOP_TIME_NOW, tailing_rable_call_time, ipf); + tailing_rable_on_time(ipf); } static int tailing_on_readable(struct oop_readable *rable, -- 2.30.2