1 /* $Id: buffindexed.c 7602 2007-02-10 22:19:49Z eagle $
3 ** Overview buffer and index method.
8 #include "portable/mmap.h"
20 #include "inn/innconf.h"
24 #include "ovinterface.h"
27 #include "buffindexed.h"
29 #define OVBUFF_MAGIC "ovbuff"
33 #define OVBUFFNASIZ 16
34 #define OVBUFFLASIZ 16
35 #define OVBUFFPASIZ 64
37 #define OVMAXCYCBUFFNAME 8
39 #define OV_HDR_PAGESIZE 16384
40 #define OV_BLOCKSIZE 8192
41 #define OV_BEFOREBITF (1 * OV_BLOCKSIZE)
46 unsigned int blocknum;
52 char magic[OVBUFFMASIZ];
53 char path[OVBUFFPASIZ];
54 char indexa[OVBUFFLASIZ]; /* ASCII version of index */
55 char lena[OVBUFFLASIZ]; /* ASCII version of len */
56 char totala[OVBUFFLASIZ]; /* ASCII version of total */
57 char useda[OVBUFFLASIZ]; /* ASCII version of used */
58 char freea[OVBUFFLASIZ]; /* ASCII version of free */
59 char updateda[OVBUFFLASIZ]; /* ASCII version of updated */
63 typedef struct _OVBUFF {
64 unsigned int index; /* ovbuff index */
65 char path[OVBUFFPASIZ]; /* Path to file */
66 int magicver; /* Magic version number */
67 int fd; /* file descriptor for this
69 off_t len; /* Length of writable area, in
71 off_t base; /* Offset (relative to byte
72 0 of file) to base block */
73 unsigned int freeblk; /* next free block number no
74 freeblk left if equals
76 unsigned int totalblk; /* number of total blocks */
77 unsigned int usedblk; /* number of used blocks */
78 time_t updated; /* Time of last update to
80 void * bitfield; /* Bitfield for ovbuff block in
82 bool needflush; /* true if OVBUFFHEAD is needed
84 struct _OVBUFF *next; /* next ovbuff */
85 int nextchunk; /* next chunk */
87 struct ov_trace_array *trace;
91 typedef struct _OVINDEXHEAD {
92 OV next; /* next block */
93 ARTNUM low; /* lowest article number in the index */
94 ARTNUM high; /* highest article number in the index */
97 typedef struct _OVINDEX {
98 ARTNUM artnum; /* article number */
99 unsigned int blocknum; /* overview data block number */
100 short index; /* overview data block index */
101 TOKEN token; /* token for this article */
102 off_t offset; /* offset from the top in the block */
103 int len; /* length of the data */
104 time_t arrived; /* arrived time of article */
105 time_t expires; /* expire time of article */
108 #define OVINDEXMAX ((OV_BLOCKSIZE-sizeof(OVINDEXHEAD))/sizeof(OVINDEX))
110 typedef struct _OVBLOCK {
111 OVINDEXHEAD ovindexhead; /* overview index header */
112 OVINDEX ovindex[OVINDEXMAX]; /* overview index */
115 typedef struct _OVBLKS {
122 /* Data structure for specifying a location in the group index */
124 int recno; /* Record number in group index */
134 #define OV_TRACENUM 10
135 struct ov_trace_array {
138 struct ov_trace *ov_trace;
141 struct ov_name_table {
144 struct ov_name_table *next;
147 static struct ov_name_table *name_table = NULL;
148 #endif /* OV_DEBUG */
150 #define GROUPHEADERHASHSIZE (16 * 1024)
151 #define GROUPHEADERMAGIC (~(0xf1f0f33d))
155 GROUPLOC hash[GROUPHEADERHASHSIZE];
159 /* The group is matched based on the MD5 of the grouname. This may prove to
160 be inadequate in the future, if so, the right thing to do is to is
161 probably just to add a SHA1 hash into here also. We get a really nice
162 benefit from this being fixed length, we should try to keep it that way.
165 HASH hash; /* MD5 hash of the group name */
166 HASH alias; /* If not empty then this is the hash of the
167 group that this group is an alias for */
168 ARTNUM high; /* High water mark in group */
169 ARTNUM low; /* Low water mark in group */
170 int count; /* Number of articles in group */
171 int flag; /* Posting/Moderation Status */
172 time_t expired; /* When last expiry */
173 time_t deleted; /* When this was deleted, 0 otherwise */
174 GROUPLOC next; /* Next block in this chain */
175 OV baseindex; /* base index buff */
176 OV curindex; /* current index buff */
177 int curindexoffset; /* current index offset for this ovbuff */
178 ARTNUM curhigh; /* High water mark in group */
179 ARTNUM curlow; /* Low water mark in group */
180 OV curdata; /* current offset for this ovbuff */
181 off_t curoffset; /* current offset for this ovbuff */
184 typedef struct _GIBLIST {
186 struct _GIBLIST *next;
189 typedef struct _GDB {
206 GROUPDATABLOCK gdb; /* used for caching current block */
209 #define GROUPDATAHASHSIZE 25
211 static GROUPDATABLOCK *groupdatablock[GROUPDATAHASHSIZE];
213 typedef enum {PREPEND_BLK, APPEND_BLK} ADDINDEX;
214 typedef enum {SRCH_FRWD, SRCH_BKWD} SRCH;
216 #define _PATH_OVBUFFCONFIG "buffindexed.conf"
218 static char LocalLogName[] = "buffindexed";
219 static long pagesize = 0;
220 static OVBUFF *ovbufftab;
222 static GROUPHEADER *GROUPheader = NULL;
223 static GROUPENTRY *GROUPentries = NULL;
224 static int GROUPcount = 0;
225 static GROUPLOC GROUPemptyloc = { -1 };
226 #define NULLINDEX (-1)
227 static OV ovnull = { 0, NULLINDEX };
228 typedef unsigned long ULONG;
229 static ULONG onarray[64], offarray[64];
230 static int longsize = sizeof(long);
232 static bool Needunlink;
233 static bool Cutofflow;
235 static OVSEARCH *Cachesearch;
237 static int ovbuffmode;
239 static GROUPLOC GROUPnewnode(void);
240 static bool GROUPremapifneeded(GROUPLOC loc);
241 static void GROUPLOCclear(GROUPLOC *loc);
242 static bool GROUPLOCempty(GROUPLOC loc);
243 static bool GROUPlockhash(enum inn_locktype type);
244 static bool GROUPlock(GROUPLOC gloc, enum inn_locktype type);
245 static off_t GROUPfilesize(int count);
246 static bool GROUPexpand(int mode);
247 static void *ovopensearch(char *group, int low, int high, bool needov);
248 static void ovclosesearch(void *handle, bool freeblock);
250 static GIBLIST *Giblist;
253 #ifdef MMAP_MISSES_WRITES
254 /* With HP/UX, you definitely do not want to mix mmap-accesses of
255 a file with read()s and write()s of the same file */
256 static off_t mmapwrite(int fd, void *buf, off_t nbyte, off_t offset) {
261 pagefudge = offset % pagesize;
262 mmapoffset = offset - pagefudge;
263 len = pagefudge + nbyte;
265 if ((addr = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, mmapoffset)) == MAP_FAILED) {
268 memcpy(addr+pagefudge, buf, nbyte);
272 #endif /* MMAP_MISSES_WRITES */
274 static bool ovparse_part_line(char *l) {
279 OVBUFF *ovbuff, *tmp = ovbufftab;
281 /* ovbuff partition name */
282 if ((p = strchr(l, ':')) == NULL || p - l <= 0 || p - l > OVMAXCYCBUFFNAME - 1) {
283 syslog(L_ERROR, "%s: bad index in line '%s'", LocalLogName, l);
287 ovbuff = xmalloc(sizeof(OVBUFF));
288 ovbuff->index = strtoul(l, NULL, 10);
289 for (; tmp != (OVBUFF *)NULL; tmp = tmp->next) {
290 if (tmp->index == ovbuff->index) {
291 syslog(L_ERROR, "%s: dupulicate index in line '%s'", LocalLogName, l);
298 /* Path to ovbuff partition */
299 if ((p = strchr(l, ':')) == NULL || p - l <= 0 || p - l > OVBUFFPASIZ - 1) {
300 syslog(L_ERROR, "%s: bad pathname in line '%s'", LocalLogName, l);
305 memset(ovbuff->path, '\0', OVBUFFPASIZ);
306 strlcpy(ovbuff->path, l, OVBUFFPASIZ);
307 if (stat(ovbuff->path, &sb) < 0) {
308 syslog(L_ERROR, "%s: file '%s' does not exist, ignoring '%d'",
309 LocalLogName, ovbuff->path, ovbuff->index);
315 /* Length/size of symbolic partition in KB */
316 len = strtoul(l, NULL, 10) * (off_t) 1024;
318 ** The minimum article offset will be the size of the bitfield itself,
319 ** len / (blocksize * 8), plus however many additional blocks the OVBUFFHEAD
320 ** external header occupies ... then round up to the next block.
322 base = len / (OV_BLOCKSIZE * 8) + OV_BEFOREBITF;
323 tonextblock = OV_HDR_PAGESIZE - (base & (OV_HDR_PAGESIZE - 1));
324 ovbuff->base = base + tonextblock;
325 if (S_ISREG(sb.st_mode) && (len != sb.st_size || ovbuff->base > sb.st_size)) {
326 if (len != sb.st_size)
327 syslog(L_NOTICE, "%s: length mismatch '%lu' for index '%d' (%lu bytes)",
328 LocalLogName, (unsigned long) len, ovbuff->index,
329 (unsigned long) sb.st_size);
330 if (ovbuff->base > sb.st_size)
331 syslog(L_NOTICE, "%s: length must be at least '%lu' for index '%d' (%lu bytes)",
332 LocalLogName, (unsigned long) ovbuff->base, ovbuff->index,
333 (unsigned long) sb.st_size);
339 ovbuff->next = (OVBUFF *)NULL;
340 ovbuff->needflush = false;
341 ovbuff->bitfield = NULL;
342 ovbuff->nextchunk = 1;
344 if (ovbufftab == (OVBUFF *)NULL)
347 for (tmp = ovbufftab; tmp->next != (OVBUFF *)NULL; tmp = tmp->next);
354 ** ovbuffread_config() -- Read the overview partition/file configuration file.
357 static bool ovbuffread_config(void) {
358 char *path, *config, *from, *to, **ctab = (char **)NULL;
359 int ctab_free = 0; /* Index to next free slot in ctab */
362 path = concatpath(innconf->pathetc, _PATH_OVBUFFCONFIG);
363 config = ReadInFile(path, NULL);
364 if (config == NULL) {
365 syslog(L_ERROR, "%s: cannot read %s", LocalLogName, path);
371 for (from = to = config; *from; ) {
372 if (*from == '#') { /* Comment line? */
373 while (*from && *from != '\n')
374 from++; /* Skip past it */
376 continue; /* Back to top of loop */
378 if (*from == '\n') { /* End or just a blank line? */
380 continue; /* Back to top of loop */
383 ctab = xmalloc(sizeof(char *));
385 ctab = xrealloc(ctab, (ctab_free + 1) * sizeof(char *));
386 /* If we're here, we've got the beginning of a real entry */
387 ctab[ctab_free++] = to = from;
389 if (*from && *from == '\\' && *(from + 1) == '\n') {
390 from += 2; /* Skip past backslash+newline */
391 while (*from && isspace((int)*from))
395 if (*from && *from != '\n')
406 for (ctab_i = 0; ctab_i < ctab_free; ctab_i++) {
407 if (!ovparse_part_line(ctab[ctab_i])) {
415 if (ovbufftab == (OVBUFF *)NULL) {
416 syslog(L_ERROR, "%s: no buffindexed defined", LocalLogName);
422 static char hextbl[] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
423 'a', 'b', 'c', 'd', 'e', 'f'};
425 static char *offt2hex(off_t offset, bool leadingzeros) {
429 if (sizeof(off_t) <= 4) {
430 snprintf(buf, sizeof(buf), (leadingzeros) ? "%016lx" : "%lx", offset);
434 for (i = 0; i < OVBUFFLASIZ; i++)
435 buf[i] = '0'; /* Pad with zeros to start */
436 for (i = OVBUFFLASIZ - 1; i >= 0; i--) {
437 buf[i] = hextbl[offset & 0xf];
442 for (p = buf; *p == '0'; p++)
447 return p - 1; /* We converted a "0" and then bypassed all the zeros */
452 static off_t hex2offt(char *hex) {
453 if (sizeof(off_t) <= 4) {
454 unsigned long rpofft;
456 sscanf(hex, "%lx", &rpofft);
462 for (; *hex != '\0'; hex++) {
463 if (*hex >= '0' && *hex <= '9')
465 else if (*hex >= 'a' && *hex <= 'f')
467 else if (*hex >= 'A' && *hex <= 'F')
471 ** We used to have a syslog() message here, but the case
472 ** where we land here because of a ":" happens, er, often.
477 if (isalnum((int)*(hex + 1)))
484 static void ovreadhead(OVBUFF *ovbuff) {
486 char buff[OVBUFFLASIZ+1];
488 memcpy(&rpx, ovbuff->bitfield, sizeof(OVBUFFHEAD));
489 strncpy(buff, rpx.useda, OVBUFFLASIZ);
490 buff[OVBUFFLASIZ] = '\0';
491 ovbuff->usedblk = (unsigned int)hex2offt((char *)buff);
492 strncpy(buff, rpx.freea, OVBUFFLASIZ);
493 buff[OVBUFFLASIZ] = '\0';
494 ovbuff->freeblk = (unsigned int)hex2offt((char *)buff);
498 static void ovflushhead(OVBUFF *ovbuff) {
501 if (!ovbuff->needflush)
503 memset(&rpx, 0, sizeof(OVBUFFHEAD));
504 ovbuff->updated = time(NULL);
505 strncpy(rpx.magic, OVBUFF_MAGIC, strlen(OVBUFF_MAGIC));
506 strncpy(rpx.path, ovbuff->path, OVBUFFPASIZ);
507 /* Don't use sprintf() directly ... the terminating '\0' causes grief */
508 strncpy(rpx.indexa, offt2hex(ovbuff->index, true), OVBUFFLASIZ);
509 strncpy(rpx.lena, offt2hex(ovbuff->len, true), OVBUFFLASIZ);
510 strncpy(rpx.totala, offt2hex(ovbuff->totalblk, true), OVBUFFLASIZ);
511 strncpy(rpx.useda, offt2hex(ovbuff->usedblk, true), OVBUFFLASIZ);
512 strncpy(rpx.freea, offt2hex(ovbuff->freeblk, true), OVBUFFLASIZ);
513 strncpy(rpx.updateda, offt2hex(ovbuff->updated, true), OVBUFFLASIZ);
514 memcpy(ovbuff->bitfield, &rpx, sizeof(OVBUFFHEAD));
515 mmap_flush(ovbuff->bitfield, ovbuff->base);
516 ovbuff->needflush = false;
520 static bool ovlock(OVBUFF *ovbuff, enum inn_locktype type) {
521 return inn_lock_range(ovbuff->fd, type, true, 0, sizeof(OVBUFFHEAD));
524 static bool ovbuffinit_disks(void) {
525 OVBUFF *ovbuff = ovbufftab;
532 ** Discover the state of our ovbuffs. If any of them are in icky shape,
533 ** duck shamelessly & return false.
535 for (; ovbuff != (OVBUFF *)NULL; ovbuff = ovbuff->next) {
536 if (ovbuff->fd < 0) {
537 if ((fd = open(ovbuff->path, ovbuffmode & OV_WRITE ? O_RDWR : O_RDONLY)) < 0) {
538 syslog(L_ERROR, "%s: ERROR opening '%s' : %m", LocalLogName, ovbuff->path);
541 close_on_exec(fd, true);
545 if ((ovbuff->bitfield =
546 mmap(NULL, ovbuff->base, ovbuffmode & OV_WRITE ? (PROT_READ | PROT_WRITE) : PROT_READ,
547 MAP_SHARED, ovbuff->fd, (off_t) 0)) == MAP_FAILED) {
549 "%s: ovinitdisks: mmap for %s offset %d len %lu failed: %m",
550 LocalLogName, ovbuff->path, 0, (unsigned long) ovbuff->base);
553 rpx = (OVBUFFHEAD *)ovbuff->bitfield;
554 ovlock(ovbuff, INN_LOCK_WRITE);
555 if (strncmp(rpx->magic, OVBUFF_MAGIC, strlen(OVBUFF_MAGIC)) == 0) {
556 ovbuff->magicver = 1;
557 if (strncmp(rpx->path, ovbuff->path, OVBUFFPASIZ) != 0) {
558 syslog(L_ERROR, "%s: Path mismatch: read %s for buffindexed %s",
559 LocalLogName, rpx->path, ovbuff->path);
560 ovbuff->needflush = true;
562 strncpy(buf, rpx->indexa, OVBUFFLASIZ);
563 buf[OVBUFFLASIZ] = '\0';
565 if (i != ovbuff->index) {
566 syslog(L_ERROR, "%s: Mismatch: index '%d' for buffindexed %s",
567 LocalLogName, i, ovbuff->path);
568 ovlock(ovbuff, INN_LOCK_UNLOCK);
571 strncpy(buf, rpx->lena, OVBUFFLASIZ);
572 buf[OVBUFFLASIZ] = '\0';
573 tmpo = hex2offt(buf);
574 if (tmpo != ovbuff->len) {
575 syslog(L_ERROR, "%s: Mismatch: read 0x%s length for buffindexed %s",
576 LocalLogName, offt2hex(tmpo, false), ovbuff->path);
577 ovlock(ovbuff, INN_LOCK_UNLOCK);
580 strncpy(buf, rpx->totala, OVBUFFLASIZ);
581 buf[OVBUFFLASIZ] = '\0';
582 ovbuff->totalblk = hex2offt(buf);
583 strncpy(buf, rpx->useda, OVBUFFLASIZ);
584 buf[OVBUFFLASIZ] = '\0';
585 ovbuff->usedblk = hex2offt(buf);
586 strncpy(buf, rpx->freea, OVBUFFLASIZ);
587 buf[OVBUFFLASIZ] = '\0';
588 ovbuff->freeblk = hex2offt(buf);
592 ovbuff->totalblk = (ovbuff->len - ovbuff->base)/OV_BLOCKSIZE;
593 if (ovbuff->totalblk < 1) {
594 syslog(L_ERROR, "%s: too small length '%lu' for buffindexed %s",
595 LocalLogName, (unsigned long) ovbuff->len, ovbuff->path);
596 ovlock(ovbuff, INN_LOCK_UNLOCK);
599 ovbuff->magicver = 1;
603 ovbuff->needflush = true;
605 "%s: No magic cookie found for buffindexed %d, initializing",
606 LocalLogName, ovbuff->index);
610 ovbuff->trace = xcalloc(ovbuff->totalblk, sizeof(ov_trace_array));
611 #endif /* OV_DEBUG */
612 ovlock(ovbuff, INN_LOCK_UNLOCK);
617 static int ovusedblock(OVBUFF *ovbuff, int blocknum, bool set_operation, bool setbitvalue) {
619 int bitoffset; /* From the 'left' side of the long */
622 longoffset = blocknum / (sizeof(long) * 8);
623 bitoffset = blocknum % (sizeof(long) * 8);
624 bitlong = *((ULONG *) ovbuff->bitfield + (OV_BEFOREBITF / sizeof(long))
628 mask = onarray[bitoffset];
631 mask = offarray[bitoffset];
634 *((ULONG *) ovbuff->bitfield + (OV_BEFOREBITF / sizeof(long))
635 + longoffset) = bitlong;
636 return 2; /* XXX Clean up return semantics */
638 /* It's a read operation */
639 mask = onarray[bitoffset];
640 /* return bitlong & mask; doesn't work if sizeof(ulong) > sizeof(int) */
641 if ( bitlong & mask ) return 1; else return 0;
644 static void ovnextblock(OVBUFF *ovbuff) {
645 int i, j, last, lastbit, left;
646 ULONG mask = 0x80000000;
649 last = ovbuff->totalblk/(sizeof(long) * 8);
650 if ((left = ovbuff->totalblk % (sizeof(long) * 8)) != 0) {
653 table = ((ULONG *) ovbuff->bitfield + (OV_BEFOREBITF / sizeof(long)));
654 for (i = ovbuff->nextchunk ; i < last ; i++) {
655 if (i == last - 1 && left != 0) {
656 for (j = 1 ; j < left ; j++) {
659 if ((table[i] & mask) != mask)
662 if ((table[i] ^ ~0) != 0)
667 for (i = 0 ; i < ovbuff->nextchunk ; i++) {
668 if ((table[i] ^ ~0) != 0)
671 if (i == ovbuff->nextchunk) {
672 ovbuff->freeblk = ovbuff->totalblk;
676 if ((i - 1) >= 0 && (last - 1 == i) && left != 0) {
679 lastbit = sizeof(long) * 8;
681 for (j = 0 ; j < lastbit ; j++) {
682 if ((table[i] & onarray[j]) == 0)
686 ovbuff->freeblk = ovbuff->totalblk;
689 ovbuff->freeblk = i * sizeof(long) * 8 + j;
690 ovbuff->nextchunk = i + 1;
692 ovbuff->nextchunk = 0;
696 static OVBUFF *getovbuff(OV ov) {
697 OVBUFF *ovbuff = ovbufftab;
698 for (; ovbuff != (OVBUFF *)NULL; ovbuff = ovbuff->next) {
699 if (ovbuff->index == ov.index)
706 static OV ovblocknew(GROUPENTRY *ge) {
708 static OV ovblocknew(void) {
709 #endif /* OV_DEBUG */
710 static OVBUFF *ovbuffnext = NULL;
715 struct ov_trace_array *trace;
716 #endif /* OV_DEBUG */
718 if (ovbuffnext == NULL)
719 ovbuffnext = ovbufftab;
720 for (ovbuff = ovbuffnext ; ovbuff != (OVBUFF *)NULL ; ovbuff = ovbuff->next) {
721 ovlock(ovbuff, INN_LOCK_WRITE);
723 if (ovbuff->totalblk != ovbuff->usedblk && ovbuff->freeblk == ovbuff->totalblk) {
726 if (ovbuff->totalblk == ovbuff->usedblk || ovbuff->freeblk == ovbuff->totalblk) {
727 /* no space left for this ovbuff */
728 ovlock(ovbuff, INN_LOCK_UNLOCK);
733 if (ovbuff == NULL) {
734 for (ovbuff = ovbufftab ; ovbuff != ovbuffnext ; ovbuff = ovbuff->next) {
735 ovlock(ovbuff, INN_LOCK_WRITE);
737 if (ovbuff->totalblk == ovbuff->usedblk || ovbuff->freeblk == ovbuff->totalblk) {
738 /* no space left for this ovbuff */
739 ovlock(ovbuff, INN_LOCK_UNLOCK);
744 if (ovbuff == ovbuffnext) {
750 recno = ((char *)ge - (char *)&GROUPentries[0])/sizeof(GROUPENTRY);
751 if (ovusedblock(ovbuff, ovbuff->freeblk, false, true)) {
752 syslog(L_FATAL, "%s: 0x%08x trying to occupy new block(%d, %d), but already occupied", LocalLogName, recno, ovbuff->index, ovbuff->freeblk);
756 trace = &ovbuff->trace[ovbuff->freeblk];
757 if (trace->ov_trace == NULL) {
758 trace->ov_trace = xcalloc(OV_TRACENUM, sizeof(struct ov_trace));
759 trace->max = OV_TRACENUM;
760 } else if (trace->cur + 1 == trace->max) {
761 trace->max += OV_TRACENUM;
762 trace->ov_trace = xrealloc(trace->ov_trace, trace->max * sizeof(struct ov_trace));
763 memset(&trace->ov_trace[trace->cur], '\0', sizeof(struct ov_trace) * (trace->max - trace->cur));
765 if (trace->ov_trace[trace->cur].occupied != 0) {
768 trace->ov_trace[trace->cur].gloc.recno = recno;
769 trace->ov_trace[trace->cur].occupied = time(NULL);
770 #endif /* OV_DEBUG */
771 ov.index = ovbuff->index;
772 ov.blocknum = ovbuff->freeblk;
773 ovusedblock(ovbuff, ov.blocknum, true, true);
776 ovbuff->needflush = true;
778 ovlock(ovbuff, INN_LOCK_UNLOCK);
779 ovbuffnext = ovbuff->next;
780 if (ovbuffnext == NULL)
781 ovbuffnext = ovbufftab;
786 static void ovblockfree(OV ov, GROUPENTRY *ge) {
788 static void ovblockfree(OV ov) {
789 #endif /* OV_DEBUG */
793 struct ov_trace_array *trace;
794 #endif /* OV_DEBUG */
796 if (ov.index == NULLINDEX)
798 if ((ovbuff = getovbuff(ov)) == NULL)
800 ovlock(ovbuff, INN_LOCK_WRITE);
802 recno = ((char *)ge - (char *)&GROUPentries[0])/sizeof(GROUPENTRY);
803 if (!ovusedblock(ovbuff, ov.blocknum, false, false)) {
804 syslog(L_FATAL, "%s: 0x%08x trying to free block(%d, %d), but already freed", LocalLogName, recno, ov.index, ov.blocknum);
808 trace = &ovbuff->trace[ov.blocknum];
809 if (trace->ov_trace == NULL) {
810 trace->ov_trace = xcalloc(OV_TRACENUM, sizeof(struct ov_trace));
811 trace->max = OV_TRACENUM;
812 } else if (trace->cur + 1 == trace->max) {
813 trace->max += OV_TRACENUM;
814 trace->ov_trace = xrealloc(trace->ov_trace, trace->max * sizeof(struct ov_trace));
815 memset(&trace->ov_trace[trace->cur], '\0', sizeof(struct ov_trace) * (trace->max - trace->cur));
817 if (trace->ov_trace[trace->cur].freed != 0) {
820 trace->ov_trace[trace->cur].freed = time(NULL);
821 trace->ov_trace[trace->cur].gloc.recno = recno;
823 #endif /* OV_DEBUG */
824 ovusedblock(ovbuff, ov.blocknum, true, false);
826 if (ovbuff->freeblk == ovbuff->totalblk)
827 ovbuff->freeblk = ov.blocknum;
829 ovbuff->needflush = true;
831 ovlock(ovbuff, INN_LOCK_UNLOCK);
835 bool buffindexed_open(int mode) {
839 static int uninitialized = 1;
846 for (i = (longsize * 8) - 1; i >= 0; i--) {
857 pagesize = getpagesize();
858 if (pagesize == -1) {
859 syslog(L_ERROR, "%s: getpagesize failed: %m", LocalLogName);
863 if ((pagesize > OV_HDR_PAGESIZE) || (OV_HDR_PAGESIZE % pagesize)) {
864 syslog(L_ERROR, "%s: OV_HDR_PAGESIZE (%d) is not a multiple of pagesize (%ld)", LocalLogName, OV_HDR_PAGESIZE, pagesize);
868 memset(&groupdatablock, '\0', sizeof(groupdatablock));
869 if (!ovbuffread_config()) {
873 if (!ovbuffinit_disks()) {
877 groupfn = concatpath(innconf->pathdb, "group.index");
878 if (Needunlink && unlink(groupfn) == 0) {
879 syslog(L_NOTICE, "%s: all buffers are brandnew, unlink '%s'", LocalLogName, groupfn);
881 GROUPfd = open(groupfn, ovbuffmode & OV_WRITE ? O_RDWR | O_CREAT : O_RDONLY, 0660);
883 syslog(L_FATAL, "%s: Could not create %s: %m", LocalLogName, groupfn);
888 if (fstat(GROUPfd, &sb) < 0) {
889 syslog(L_FATAL, "%s: Could not fstat %s: %m", LocalLogName, groupfn);
894 if (sb.st_size > sizeof(GROUPHEADER)) {
897 if (mode & OV_WRITE) {
899 * Note: below mapping of groupheader won't work unless we have
900 * both PROT_READ and PROT_WRITE perms.
902 flag |= PROT_WRITE|PROT_READ;
904 GROUPcount = (sb.st_size - sizeof(GROUPHEADER)) / sizeof(GROUPENTRY);
905 if ((GROUPheader = (GROUPHEADER *)mmap(0, GROUPfilesize(GROUPcount), flag,
906 MAP_SHARED, GROUPfd, 0)) == (GROUPHEADER *) -1) {
907 syslog(L_FATAL, "%s: Could not mmap %s in buffindexed_open: %m", LocalLogName, groupfn);
912 GROUPentries = (GROUPENTRY *)((char *)GROUPheader + sizeof(GROUPHEADER));
915 if (!GROUPexpand(mode)) {
921 close_on_exec(GROUPfd, true);
929 static GROUPLOC GROUPfind(char *group, bool Ignoredeleted) {
934 grouphash = Hash(group, strlen(group));
935 memcpy(&i, &grouphash, sizeof(i));
937 loc = GROUPheader->hash[i % GROUPHEADERHASHSIZE];
938 GROUPremapifneeded(loc);
940 while (!GROUPLOCempty(loc)) {
941 if (GROUPentries[loc.recno].deleted == 0 || Ignoredeleted) {
942 if (memcmp(&grouphash, &GROUPentries[loc.recno].hash, sizeof(HASH)) == 0) {
946 loc = GROUPentries[loc.recno].next;
948 return GROUPemptyloc;
951 bool buffindexed_groupstats(char *group, int *lo, int *hi, int *count, int *flag) {
954 gloc = GROUPfind(group, false);
955 if (GROUPLOCempty(gloc)) {
958 GROUPlock(gloc, INN_LOCK_READ);
960 *lo = GROUPentries[gloc.recno].low;
962 *hi = GROUPentries[gloc.recno].high;
964 *count = GROUPentries[gloc.recno].count;
966 *flag = GROUPentries[gloc.recno].flag;
967 GROUPlock(gloc, INN_LOCK_UNLOCK);
971 static void setinitialge(GROUPENTRY *ge, HASH grouphash, char *flag, GROUPLOC next, ARTNUM lo, ARTNUM hi) {
972 ge->hash = grouphash;
976 ge->expired = ge->deleted = ge->count = 0;
978 ge->baseindex = ge->curindex = ge->curdata = ovnull;
979 ge->curindexoffset = ge->curoffset = 0;
983 bool buffindexed_groupadd(char *group, ARTNUM lo, ARTNUM hi, char *flag) {
989 struct ov_name_table *ntp;
990 #endif /* OV_DEBUG */
992 gloc = GROUPfind(group, true);
993 if (!GROUPLOCempty(gloc)) {
994 ge = &GROUPentries[gloc.recno];
995 if (GROUPentries[gloc.recno].deleted != 0) {
996 grouphash = Hash(group, strlen(group));
997 setinitialge(ge, grouphash, flag, ge->next, lo, hi);
1003 grouphash = Hash(group, strlen(group));
1004 memcpy(&i, &grouphash, sizeof(i));
1005 i = i % GROUPHEADERHASHSIZE;
1006 GROUPlockhash(INN_LOCK_WRITE);
1007 gloc = GROUPnewnode();
1008 ge = &GROUPentries[gloc.recno];
1009 setinitialge(ge, grouphash, flag, GROUPheader->hash[i], lo, hi);
1010 GROUPheader->hash[i] = gloc;
1012 ntp = xmalloc(sizeof(struct ov_name_table));
1013 memset(ntp, '\0', sizeof(struct ov_name_table));
1014 ntp->name = xstrdup(group);
1015 ntp->recno = gloc.recno;
1016 if (name_table == NULL)
1019 ntp->next = name_table;
1022 #endif /* OV_DEBUG */
1023 GROUPlockhash(INN_LOCK_UNLOCK);
1027 static off_t GROUPfilesize(int count) {
1028 return ((off_t) count * sizeof(GROUPENTRY)) + sizeof(GROUPHEADER);
1031 /* Check if the given GROUPLOC refers to GROUPENTRY that we don't have mmap'ed,
1032 ** if so then see if the file has been grown by another writer and remmap
1034 static bool GROUPremapifneeded(GROUPLOC loc) {
1037 if (loc.recno < GROUPcount)
1040 if (fstat(GROUPfd, &sb) < 0)
1043 if (GROUPfilesize(GROUPcount) >= sb.st_size)
1047 if (munmap((void *)GROUPheader, GROUPfilesize(GROUPcount)) < 0) {
1048 syslog(L_FATAL, "%s: Could not munmap group.index in GROUPremapifneeded: %m", LocalLogName);
1053 GROUPcount = (sb.st_size - sizeof(GROUPHEADER)) / sizeof(GROUPENTRY);
1054 GROUPheader = (GROUPHEADER *)mmap(0, GROUPfilesize(GROUPcount),
1055 PROT_READ | PROT_WRITE, MAP_SHARED, GROUPfd, 0);
1056 if (GROUPheader == (GROUPHEADER *) -1) {
1057 syslog(L_FATAL, "%s: Could not mmap group.index in GROUPremapifneeded: %m", LocalLogName);
1060 GROUPentries = (GROUPENTRY *)((char *)GROUPheader + sizeof(GROUPHEADER));
1064 /* This function does not need to lock because it's callers are expected to do so */
1065 static bool GROUPexpand(int mode) {
1070 if (munmap((void *)GROUPheader, GROUPfilesize(GROUPcount)) < 0) {
1071 syslog(L_FATAL, "%s: Could not munmap group.index in GROUPexpand: %m", LocalLogName);
1076 if (ftruncate(GROUPfd, GROUPfilesize(GROUPcount)) < 0) {
1077 syslog(L_FATAL, "%s: Could not extend group.index: %m", LocalLogName);
1082 if (mode & OV_WRITE) {
1084 * Note: below check of magic won't work unless we have both PROT_READ
1085 * and PROT_WRITE perms.
1087 flag |= PROT_WRITE|PROT_READ;
1089 GROUPheader = (GROUPHEADER *)mmap(0, GROUPfilesize(GROUPcount),
1090 flag, MAP_SHARED, GROUPfd, 0);
1091 if (GROUPheader == (GROUPHEADER *) -1) {
1092 syslog(L_FATAL, "%s: Could not mmap group.index in GROUPexpand: %m", LocalLogName);
1095 GROUPentries = (GROUPENTRY *)((char *)GROUPheader + sizeof(GROUPHEADER));
1096 if (GROUPheader->magic != GROUPHEADERMAGIC) {
1097 GROUPheader->magic = GROUPHEADERMAGIC;
1098 GROUPLOCclear(&GROUPheader->freelist);
1099 for (i = 0; i < GROUPHEADERHASHSIZE; i++)
1100 GROUPLOCclear(&GROUPheader->hash[i]);
1102 /* Walk the new entries from the back to the front, adding them to the freelist */
1103 for (i = GROUPcount - 1; (GROUPcount - 1024) <= i; i--) {
1104 GROUPentries[i].next = GROUPheader->freelist;
1105 GROUPheader->freelist.recno = i;
1110 static GROUPLOC GROUPnewnode(void) {
1113 /* If we didn't find any free space, then make some */
1114 if (GROUPLOCempty(GROUPheader->freelist)) {
1115 if (!GROUPexpand(ovbuffmode)) {
1116 return GROUPemptyloc;
1119 assert(!GROUPLOCempty(GROUPheader->freelist));
1120 loc = GROUPheader->freelist;
1121 GROUPheader->freelist = GROUPentries[GROUPheader->freelist.recno].next;
1125 bool buffindexed_groupdel(char *group) {
1129 gloc = GROUPfind(group, false);
1130 if (GROUPLOCempty(gloc)) {
1133 GROUPlock(gloc, INN_LOCK_WRITE);
1134 ge = &GROUPentries[gloc.recno];
1135 ge->deleted = time(NULL);
1136 HashClear(&ge->hash);
1137 GROUPlock(gloc, INN_LOCK_UNLOCK);
1141 static void GROUPLOCclear(GROUPLOC *loc) {
1145 static bool GROUPLOCempty(GROUPLOC loc) {
1146 return (loc.recno < 0);
1149 static bool GROUPlockhash(enum inn_locktype type) {
1150 return inn_lock_range(GROUPfd, type, true, 0, sizeof(GROUPHEADER));
1153 static bool GROUPlock(GROUPLOC gloc, enum inn_locktype type) {
1154 return inn_lock_range(GROUPfd,
1157 sizeof(GROUPHEADER) + (sizeof(GROUPENTRY) * gloc.recno),
1158 sizeof(GROUPENTRY));
1162 static bool ovsetcurindexblock(GROUPENTRY *ge, GROUPENTRY *georig) {
1164 static bool ovsetcurindexblock(GROUPENTRY *ge) {
1165 #endif /* OV_DEBUG */
1168 OVINDEXHEAD ovindexhead;
1170 /* there is no index */
1172 ov = ovblocknew(georig ? georig : ge);
1175 #endif /* OV_DEBUG */
1176 if (ov.index == NULLINDEX) {
1177 syslog(L_ERROR, "%s: ovsetcurindexblock could not get new block", LocalLogName);
1180 if ((ovbuff = getovbuff(ov)) == NULL) {
1181 syslog(L_ERROR, "%s: ovsetcurindexblock could not get ovbuff block for new, %d, %d", LocalLogName, ov.index, ov.blocknum);
1184 ovindexhead.next = ovnull;
1185 ovindexhead.low = 0;
1186 ovindexhead.high = 0;
1187 #ifdef MMAP_MISSES_WRITES
1188 if (mmapwrite(ovbuff->fd, &ovindexhead, sizeof(OVINDEXHEAD), ovbuff->base + ov.blocknum * OV_BLOCKSIZE) != sizeof(OVINDEXHEAD)) {
1190 if (pwrite(ovbuff->fd, &ovindexhead, sizeof(OVINDEXHEAD), ovbuff->base + ov.blocknum * OV_BLOCKSIZE) != sizeof(OVINDEXHEAD)) {
1191 #endif /* MMAP_MISSES_WRITES */
1192 syslog(L_ERROR, "%s: could not write index record index '%d', blocknum '%d': %m", LocalLogName, ge->curindex.index, ge->curindex.blocknum);
1195 if (ge->baseindex.index == NULLINDEX) {
1198 if ((ovbuff = getovbuff(ge->curindex)) == NULL)
1201 if (!ovusedblock(ovbuff, ge->curindex.blocknum, false, false)) {
1202 syslog(L_FATAL, "%s: block(%d, %d) not occupied (index)", LocalLogName, ovbuff->index, ge->curindex.blocknum);
1205 #endif /* OV_DEBUG */
1206 ovindexhead.next = ov;
1207 ovindexhead.low = ge->curlow;
1208 ovindexhead.high = ge->curhigh;
1209 #ifdef MMAP_MISSES_WRITES
1210 if (mmapwrite(ovbuff->fd, &ovindexhead, sizeof(OVINDEXHEAD), ovbuff->base + ge->curindex.blocknum * OV_BLOCKSIZE) != sizeof(OVINDEXHEAD)) {
1212 if (pwrite(ovbuff->fd, &ovindexhead, sizeof(OVINDEXHEAD), ovbuff->base + ge->curindex.blocknum * OV_BLOCKSIZE) != sizeof(OVINDEXHEAD)) {
1213 #endif /* MMAP_MISSES_WRITES */
1214 syslog(L_ERROR, "%s: could not write index record index '%d', blocknum '%d': %m", LocalLogName, ge->curindex.index, ge->curindex.blocknum);
1219 ge->curindexoffset = 0;
1226 static bool ovaddrec(GROUPENTRY *ge, ARTNUM artnum, TOKEN token, char *data, int len, time_t arrived, time_t expires, GROUPENTRY *georig) {
1228 static bool ovaddrec(GROUPENTRY *ge, ARTNUM artnum, TOKEN token, char *data, int len, time_t arrived, time_t expires) {
1229 #endif /* OV_DEBUG */
1233 OVINDEXHEAD ovindexhead;
1234 bool needupdate = false;
1237 #endif /* OV_DEBUG */
1240 if (OV_BLOCKSIZE < len) {
1241 syslog(L_ERROR, "%s: overview data must be under %d (%d)", LocalLogName, OV_BLOCKSIZE, len);
1244 if (ge->curdata.index == NULLINDEX) {
1245 /* no data block allocated */
1247 ov = ovblocknew(georig ? georig : ge);
1250 #endif /* OV_DEBUG */
1251 if (ov.index == NULLINDEX) {
1252 syslog(L_ERROR, "%s: ovaddrec could not get new block", LocalLogName);
1255 if ((ovbuff = getovbuff(ov)) == NULL) {
1256 syslog(L_ERROR, "%s: ovaddrec could not get ovbuff block for new, %d, %d, %ld", LocalLogName, ov.index, ov.blocknum, artnum);
1261 } else if ((ovbuff = getovbuff(ge->curdata)) == NULL)
1263 else if (OV_BLOCKSIZE - ge->curoffset < len) {
1264 /* too short to store data, allocate new block */
1266 ov = ovblocknew(georig ? georig : ge);
1269 #endif /* OV_DEBUG */
1270 if (ov.index == NULLINDEX) {
1271 syslog(L_ERROR, "%s: ovaddrec could not get new block", LocalLogName);
1274 if ((ovbuff = getovbuff(ov)) == NULL) {
1275 syslog(L_ERROR, "%s: ovaddrec could not get ovbuff block for new, %d, %d, %ld", LocalLogName, ov.index, ov.blocknum, artnum);
1282 if (!ovusedblock(ovbuff, ge->curdata.blocknum, false, false)) {
1283 syslog(L_FATAL, "%s: block(%d, %d) not occupied", LocalLogName, ovbuff->index, ge->curdata.blocknum);
1284 buffindexed_close();
1287 #endif /* OV_DEBUG */
1288 #ifdef MMAP_MISSES_WRITES
1289 if (mmapwrite(ovbuff->fd, data, len, ovbuff->base + ge->curdata.blocknum * OV_BLOCKSIZE + ge->curoffset) != len) {
1291 if (pwrite(ovbuff->fd, data, len, ovbuff->base + ge->curdata.blocknum * OV_BLOCKSIZE + ge->curoffset) != len) {
1292 #endif /* MMAP_MISSES_WRITES */
1293 syslog(L_ERROR, "%s: could not append overview record index '%d', blocknum '%d': %m", LocalLogName, ge->curdata.index, ge->curdata.blocknum);
1296 memset(&ie, '\0', sizeof(ie));
1299 ie.index = ge->curdata.index;
1300 ie.blocknum = ge->curdata.blocknum;
1301 ie.offset = ge->curoffset;
1303 ie.arrived = arrived;
1304 ie.expires = expires;
1306 if (ge->baseindex.index == NULLINDEX || ge->curindexoffset == OVINDEXMAX) {
1308 if (!ovsetcurindexblock(ge, georig)) {
1310 if (!ovsetcurindexblock(ge)) {
1311 #endif /* OV_DEBUG */
1312 syslog(L_ERROR, "%s: could not set current index", LocalLogName);
1316 if ((ovbuff = getovbuff(ge->curindex)) == NULL)
1319 if (!ovusedblock(ovbuff, ge->curindex.blocknum, false, false)) {
1320 syslog(L_FATAL, "%s: block(%d, %d) not occupied (index)", LocalLogName, ovbuff->index, ge->curindex.blocknum);
1321 buffindexed_close();
1324 #endif /* OV_DEBUG */
1325 #ifdef MMAP_MISSES_WRITES
1326 if (mmapwrite(ovbuff->fd, &ie, sizeof(ie), ovbuff->base + ge->curindex.blocknum * OV_BLOCKSIZE + sizeof(OVINDEXHEAD) + sizeof(ie) * ge->curindexoffset) != sizeof(ie)) {
1328 if (pwrite(ovbuff->fd, &ie, sizeof(ie), ovbuff->base + ge->curindex.blocknum * OV_BLOCKSIZE + sizeof(OVINDEXHEAD) + sizeof(ie) * ge->curindexoffset) != sizeof(ie)) {
1329 #endif /* MMAP_MISSES_WRITES */
1330 syslog(L_ERROR, "%s: could not write index record index '%d', blocknum '%d': %m", LocalLogName, ge->curindex.index, ge->curindex.blocknum);
1333 if ((ge->curlow <= 0) || (ge->curlow > artnum)) {
1334 ge->curlow = artnum;
1337 if ((ge->curhigh <= 0) || (ge->curhigh < artnum)) {
1338 ge->curhigh = artnum;
1342 ovindexhead.next = ovnull;
1343 ovindexhead.low = ge->curlow;
1344 ovindexhead.high = ge->curhigh;
1345 #ifdef MMAP_MISSES_WRITES
1346 if (mmapwrite(ovbuff->fd, &ovindexhead, sizeof(OVINDEXHEAD), ovbuff->base + ge->curindex.blocknum * OV_BLOCKSIZE) != sizeof(OVINDEXHEAD)) {
1348 if (pwrite(ovbuff->fd, &ovindexhead, sizeof(OVINDEXHEAD), ovbuff->base + ge->curindex.blocknum * OV_BLOCKSIZE) != sizeof(OVINDEXHEAD)) {
1349 #endif /* MMAP_MISSES_WRITES */
1350 syslog(L_ERROR, "%s: could not write index record index '%d', blocknum '%d': %m", LocalLogName, ge->curindex.index, ge->curindex.blocknum);
1354 if ((ge->low <= 0) || (ge->low > artnum))
1356 if ((ge->high <= 0) || (ge->high < artnum))
1358 ge->curindexoffset++;
1359 ge->curoffset += len;
1364 bool buffindexed_add(char *group, ARTNUM artnum, TOKEN token, char *data, int len, time_t arrived, time_t expires) {
1368 if (len > OV_BLOCKSIZE) {
1369 syslog(L_ERROR, "%s: overview data is too large %d", LocalLogName, len);
1373 gloc = GROUPfind(group, false);
1374 if (GROUPLOCempty(gloc)) {
1377 GROUPlock(gloc, INN_LOCK_WRITE);
1378 /* prepend block(s) if needed. */
1379 ge = &GROUPentries[gloc.recno];
1380 if (Cutofflow && ge->low > artnum) {
1381 GROUPlock(gloc, INN_LOCK_UNLOCK);
1385 if (!ovaddrec(ge, artnum, token, data, len, arrived, expires, NULL)) {
1387 if (!ovaddrec(ge, artnum, token, data, len, arrived, expires)) {
1388 #endif /* OV_DEBUG */
1390 GROUPlock(gloc, INN_LOCK_UNLOCK);
1391 syslog(L_ERROR, "%s: no space left for buffer, adding '%s'", LocalLogName, group);
1394 syslog(L_ERROR, "%s: could not add overview for '%s'", LocalLogName, group);
1396 GROUPlock(gloc, INN_LOCK_UNLOCK);
1401 bool buffindexed_cancel(TOKEN token UNUSED) {
1406 static void freegroupblock(GROUPENTRY *ge) {
1408 static void freegroupblock(void) {
1409 #endif /* OV_DEBUG */
1410 GROUPDATABLOCK *gdb;
1414 for (giblist = Giblist ; giblist != NULL ; giblist = giblist->next) {
1416 ovblockfree(giblist->ov, ge);
1418 ovblockfree(giblist->ov);
1419 #endif /* OV_DEBUG */
1421 for (i = 0 ; i < GROUPDATAHASHSIZE ; i++) {
1422 for (gdb = groupdatablock[i] ; gdb != NULL ; gdb = gdb->next) {
1424 ovblockfree(gdb->datablk, ge);
1426 ovblockfree(gdb->datablk);
1427 #endif /* OV_DEBUG */
1432 static void ovgroupunmap(void) {
1433 GROUPDATABLOCK *gdb, *gdbnext;
1435 GIBLIST *giblist, *giblistnext;
1437 for (i = 0 ; i < GROUPDATAHASHSIZE ; i++) {
1438 for (gdb = groupdatablock[i] ; gdb != NULL ; gdb = gdbnext) {
1439 gdbnext = gdb->next;
1442 groupdatablock[i] = NULL;
1444 for (giblist = Giblist ; giblist != NULL ; giblist = giblistnext) {
1445 giblistnext = giblist->next;
1449 if (!Cache && (Gib != NULL)) {
1452 if (Cachesearch != NULL) {
1453 free(Cachesearch->group);
1460 static void insertgdb(OV *ov, GROUPDATABLOCK *gdb) {
1461 gdb->next = groupdatablock[(ov->index + ov->blocknum) % GROUPDATAHASHSIZE];
1462 groupdatablock[(ov->index + ov->blocknum) % GROUPDATAHASHSIZE] = gdb;
1466 static GROUPDATABLOCK *searchgdb(OV *ov) {
1467 GROUPDATABLOCK *gdb;
1469 gdb = groupdatablock[(ov->index + ov->blocknum) % GROUPDATAHASHSIZE];
1470 for (; gdb != NULL ; gdb = gdb->next) {
1471 if (ov->index == gdb->datablk.index && ov->blocknum == gdb->datablk.blocknum)
1477 static int INDEXcompare(const void *p1, const void *p2) {
1481 oi1 = (OVINDEX *)p1;
1482 oi2 = (OVINDEX *)p2;
1483 return oi1->artnum - oi2->artnum;
1486 static bool ovgroupmmap(GROUPENTRY *ge, int low, int high, bool needov) {
1487 OV ov = ge->baseindex;
1489 GROUPDATABLOCK *gdb;
1490 int pagefudge, limit, i, count, len;
1491 off_t offset, mmapoffset;
1496 if (high - low < 0) {
1500 Gibcount = ge->count;
1503 Gib = xmalloc(Gibcount * sizeof(OVINDEX));
1505 while (ov.index != NULLINDEX) {
1506 ovbuff = getovbuff(ov);
1507 if (ovbuff == NULL) {
1508 syslog(L_ERROR, "%s: ovgroupmmap ovbuff is null(ovindex is %d, ovblock is %d", LocalLogName, ov.index, ov.blocknum);
1512 offset = ovbuff->base + (ov.blocknum * OV_BLOCKSIZE);
1513 pagefudge = offset % pagesize;
1514 mmapoffset = offset - pagefudge;
1515 len = pagefudge + OV_BLOCKSIZE;
1516 if ((addr = mmap(NULL, len, PROT_READ, MAP_SHARED, ovbuff->fd, mmapoffset)) == MAP_FAILED) {
1517 syslog(L_ERROR, "%s: ovgroupmmap could not mmap index block: %m", LocalLogName);
1521 ovblock = (OVBLOCK *)((char *)addr + pagefudge);
1522 if (ov.index == ge->curindex.index && ov.blocknum == ge->curindex.blocknum) {
1523 limit = ge->curindexoffset;
1527 for (i = 0 ; i < limit ; i++) {
1528 if (Gibcount == count) {
1529 Gibcount += OV_FUDGE;
1530 Gib = xrealloc(Gib, Gibcount * sizeof(OVINDEX));
1532 Gib[count++] = ovblock->ovindex[i];
1534 giblist = xmalloc(sizeof(GIBLIST));
1536 giblist->next = Giblist;
1538 ov = ovblock->ovindexhead.next;
1542 qsort(Gib, Gibcount, sizeof(OVINDEX), INDEXcompare);
1543 /* Remove duplicates. */
1544 for (i = 0; i < Gibcount - 1; i++) {
1545 if (Gib[i].artnum == Gib[i+1].artnum) {
1546 /* lower position is removed */
1553 for (i = 0 ; i < Gibcount ; i++) {
1554 if (Gib[i].artnum == 0 || Gib[i].artnum < low || Gib[i].artnum > high)
1556 ov.index = Gib[i].index;
1557 ov.blocknum = Gib[i].blocknum;
1558 gdb = searchgdb(&ov);
1561 ovbuff = getovbuff(ov);
1564 gdb = xmalloc(sizeof(GROUPDATABLOCK));
1567 gdb->mmapped = false;
1568 insertgdb(&ov, gdb);
1573 if (count * OV_BLOCKSIZE > innconf->keepmmappedthreshold * 1024)
1574 /* large retrieval, mmap is done in ovsearch() */
1576 for (i = 0 ; i < GROUPDATAHASHSIZE ; i++) {
1577 for (gdb = groupdatablock[i] ; gdb != NULL ; gdb = gdb->next) {
1579 ovbuff = getovbuff(ov);
1580 offset = ovbuff->base + (ov.blocknum * OV_BLOCKSIZE);
1581 pagefudge = offset % pagesize;
1582 mmapoffset = offset - pagefudge;
1583 gdb->len = pagefudge + OV_BLOCKSIZE;
1584 if ((gdb->addr = mmap(NULL, gdb->len, PROT_READ, MAP_SHARED, ovbuff->fd, mmapoffset)) == MAP_FAILED) {
1585 syslog(L_ERROR, "%s: ovgroupmmap could not mmap data block: %m", LocalLogName);
1590 gdb->data = (char *)gdb->addr + pagefudge;
1591 gdb->mmapped = true;
1597 static void *ovopensearch(char *group, int low, int high, bool needov) {
1602 gloc = GROUPfind(group, false);
1603 if (GROUPLOCempty(gloc))
1606 ge = &GROUPentries[gloc.recno];
1609 if (high > ge->high)
1612 if (!ovgroupmmap(ge, low, high, needov)) {
1616 search = xmalloc(sizeof(OVSEARCH));
1620 search->group = xstrdup(group);
1621 search->needov = needov;
1622 search->gloc = gloc;
1623 search->count = ge->count;
1624 search->gdb.mmapped = false;
1625 return (void *)search;
1628 void *buffindexed_opensearch(char *group, int low, int high) {
1635 if (Cachesearch != NULL) {
1636 free(Cachesearch->group);
1641 gloc = GROUPfind(group, false);
1642 if (GROUPLOCempty(gloc)) {
1645 GROUPlock(gloc, INN_LOCK_WRITE);
1646 if ((handle = ovopensearch(group, low, high, true)) == NULL)
1647 GROUPlock(gloc, INN_LOCK_UNLOCK);
1651 static bool ovsearch(void *handle, ARTNUM *artnum, char **data, int *len, TOKEN *token, time_t *arrived, time_t *expires) {
1652 OVSEARCH *search = (OVSEARCH *)handle;
1654 GROUPDATABLOCK *gdb;
1655 off_t offset, mmapoffset;
1660 if (search->cur == Gibcount) {
1663 while (Gib[search->cur].artnum == 0 || Gib[search->cur].artnum < search->lo) {
1665 if (search->cur == Gibcount)
1668 if (Gib[search->cur].artnum > search->hi)
1671 if (search->needov) {
1672 if (Gib[search->cur].index == NULLINDEX) {
1676 *artnum = Gib[search->cur].artnum;
1679 *artnum = Gib[search->cur].artnum;
1681 *len = Gib[search->cur].len;
1683 *arrived = Gib[search->cur].arrived;
1685 *expires = Gib[search->cur].expires;
1687 srchov.index = Gib[search->cur].index;
1688 srchov.blocknum = Gib[search->cur].blocknum;
1689 gdb = searchgdb(&srchov);
1696 if (!gdb->mmapped) {
1697 /* block needs to be mmapped */
1698 if (search->gdb.mmapped) {
1699 /* check previous mmapped area */
1700 if (search->gdb.datablk.blocknum != srchov.blocknum || search->gdb.datablk.index != srchov.index) {
1701 /* different one, release previous one */
1702 munmap(search->gdb.addr, search->gdb.len);
1709 search->gdb.datablk.blocknum = srchov.blocknum;
1710 search->gdb.datablk.index = srchov.index;
1711 ovbuff = getovbuff(srchov);
1712 offset = ovbuff->base + (srchov.blocknum * OV_BLOCKSIZE);
1713 pagefudge = offset % pagesize;
1714 mmapoffset = offset - pagefudge;
1715 search->gdb.len = pagefudge + OV_BLOCKSIZE;
1716 if ((search->gdb.addr = mmap(NULL, search->gdb.len, PROT_READ, MAP_SHARED, ovbuff->fd, mmapoffset)) == MAP_FAILED) {
1717 syslog(L_ERROR, "%s: ovsearch could not mmap data block: %m", LocalLogName);
1720 gdb->data = search->gdb.data = (char *)search->gdb.addr + pagefudge;
1721 search->gdb.mmapped = true;
1724 *data = (char *)gdb->data + Gib[search->cur].offset;
1729 if (Gib[search->cur].index == NULLINDEX && !search->needov) {
1733 *token = Gib[search->cur].token;
1739 bool buffindexed_search(void *handle, ARTNUM *artnum, char **data, int *len, TOKEN *token, time_t *arrived) {
1740 return(ovsearch(handle, artnum, data, len, token, arrived, NULL));
1743 static void ovclosesearch(void *handle, bool freeblock) {
1744 OVSEARCH *search = (OVSEARCH *)handle;
1745 GROUPDATABLOCK *gdb;
1750 #endif /* OV_DEBUG */
1752 for (i = 0 ; i < GROUPDATAHASHSIZE ; i++) {
1753 for (gdb = groupdatablock[i] ; gdb != NULL ; gdb = gdb->next) {
1755 munmap(gdb->addr, gdb->len);
1758 if (search->gdb.mmapped)
1759 munmap(search->gdb.addr, search->gdb.len);
1762 gloc = GROUPfind(search->group, false);
1763 if (!GROUPLOCempty(gloc)) {
1764 ge = &GROUPentries[gloc.recno];
1769 #endif /* OV_DEBUG */
1773 Cachesearch = search;
1775 free(search->group);
1781 void buffindexed_closesearch(void *handle) {
1782 OVSEARCH *search = (OVSEARCH *)handle;
1785 gloc = search->gloc;
1786 ovclosesearch(handle, false);
1787 GROUPlock(gloc, INN_LOCK_UNLOCK);
1790 /* get token from sorted index */
1791 static bool gettoken(ARTNUM artnum, TOKEN *token) {
1792 int i, j, offset, limit;
1795 for (i = (limit - offset) / 2 ; i > 0 ; i = (limit - offset) / 2) {
1796 if (Gib[offset + i].artnum == artnum) {
1797 *token = Gib[offset + i].token;
1799 } else if (Gib[offset + i].artnum == 0) {
1800 /* case for duplicated index */
1801 for (j = offset + i - 1; j >= offset ; j --) {
1802 if (Gib[j].artnum != 0)
1806 /* article not found */
1809 if (Gib[j].artnum == artnum) {
1810 *token = Gib[j].token;
1812 } else if (Gib[j].artnum < artnum) {
1813 /* limit is not changed */
1816 /* offset is not changed */
1819 } else if (Gib[offset + i].artnum < artnum) {
1820 /* limit is unchanged */
1823 /* offset is unchanged */
1828 if (Gib[offset].artnum != artnum) {
1829 /* article not found */
1832 *token = Gib[offset].token;
1836 bool buffindexed_getartinfo(char *group, ARTNUM artnum, TOKEN *token) {
1839 bool retval, grouplocked = false;
1842 if (Cachesearch != NULL && strcmp(Cachesearch->group, group) != 0) {
1845 free(Cachesearch->group);
1849 if (gettoken(artnum, token))
1852 /* examine to see if overview index are increased */
1853 gloc = GROUPfind(group, false);
1854 if (GROUPLOCempty(gloc)) {
1857 GROUPlock(gloc, INN_LOCK_WRITE);
1858 if ((Cachesearch != NULL) && (GROUPentries[gloc.recno].count == Cachesearch->count)) {
1859 /* no new overview data is stored */
1860 GROUPlock(gloc, INN_LOCK_UNLOCK);
1866 if (Cachesearch != NULL) {
1867 free(Cachesearch->group);
1876 gloc = GROUPfind(group, false);
1877 if (GROUPLOCempty(gloc)) {
1880 GROUPlock(gloc, INN_LOCK_WRITE);
1882 if (!(handle = ovopensearch(group, artnum, artnum, false))) {
1883 GROUPlock(gloc, INN_LOCK_UNLOCK);
1886 retval = buffindexed_search(handle, NULL, NULL, NULL, token, NULL);
1887 ovclosesearch(handle, false);
1888 GROUPlock(gloc, INN_LOCK_UNLOCK);
1892 bool buffindexed_expiregroup(char *group, int *lo, struct history *h) {
1894 GROUPENTRY newge, *ge;
1895 GROUPLOC gloc, next;
1899 ARTNUM artnum, low, high;
1903 time_t arrived, expires;
1905 if (group == NULL) {
1906 for (i = 0 ; i < GROUPheader->freelist.recno ; i++) {
1908 GROUPlock(gloc, INN_LOCK_WRITE);
1909 ge = &GROUPentries[gloc.recno];
1910 if (ge->expired >= OVrealnow || ge->count == 0) {
1911 GROUPlock(gloc, INN_LOCK_UNLOCK);
1914 if (!ovgroupmmap(ge, ge->low, ge->high, true)) {
1915 GROUPlock(gloc, INN_LOCK_UNLOCK);
1916 syslog(L_ERROR, "%s: could not mmap overview for hidden groups(%d)", LocalLogName, i);
1919 for (j = 0 ; j < Gibcount ; j++) {
1920 if (Gib[j].artnum == 0)
1922 /* this may be duplicated, but ignore it in this case */
1923 OVEXPremove(Gib[j].token, true, NULL, 0);
1931 ge->expired = time(NULL);
1933 GROUPlock(gloc, INN_LOCK_UNLOCK);
1937 gloc = GROUPfind(group, false);
1938 if (GROUPLOCempty(gloc)) {
1941 GROUPlock(gloc, INN_LOCK_WRITE);
1942 ge = &GROUPentries[gloc.recno];
1943 if (ge->count == 0) {
1946 ge->expired = time(NULL);
1947 GROUPlock(gloc, INN_LOCK_UNLOCK);
1957 setinitialge(&newge, hash, &flag, next, 0, high);
1958 if ((handle = ovopensearch(group, low, high, true)) == NULL) {
1959 ge->expired = time(NULL);
1960 GROUPlock(gloc, INN_LOCK_UNLOCK);
1961 syslog(L_ERROR, "%s: could not open overview for '%s'", LocalLogName, group);
1964 while (ovsearch(handle, &artnum, &data, &len, &token, &arrived, &expires)) {
1968 if (!SMprobe(EXPENSIVESTAT, &token, NULL) || OVstatall) {
1969 if ((ah = SMretrieve(token, RETR_STAT)) == NULL)
1973 if (!OVhisthasmsgid(h, data))
1976 if (innconf->groupbaseexpiry && OVgroupbasedexpire(token, group, data, len, arrived, expires))
1979 if (!ovaddrec(&newge, artnum, token, data, len, arrived, expires, ge)) {
1981 if (!ovaddrec(&newge, artnum, token, data, len, arrived, expires)) {
1982 #endif /* OV_DEBUG */
1983 ovclosesearch(handle, true);
1984 ge->expired = time(NULL);
1985 GROUPlock(gloc, INN_LOCK_UNLOCK);
1986 syslog(L_ERROR, "%s: could not add new overview for '%s'", LocalLogName, group);
1991 /* no article for the group */
1992 newge.low = newge.high;
1996 /* lomark should be himark + 1, if no article for the group */
2001 ovclosesearch(handle, true);
2002 ge->expired = time(NULL);
2003 GROUPlock(gloc, INN_LOCK_UNLOCK);
2007 bool buffindexed_ctl(OVCTLTYPE type, void *val) {
2008 int total, used, *i, j;
2009 OVBUFF *ovbuff = ovbufftab;
2010 OVSORTTYPE *sorttype;
2012 GROUPDATABLOCK *gdb;
2016 for (total = used = 0 ; ovbuff != (OVBUFF *)NULL ; ovbuff = ovbuff->next) {
2017 ovlock(ovbuff, INN_LOCK_READ);
2019 total += ovbuff->totalblk;
2020 used += ovbuff->usedblk;
2021 ovlock(ovbuff, INN_LOCK_UNLOCK);
2024 *i = (used * 100) / total;
2027 sorttype = (OVSORTTYPE *)val;
2028 *sorttype = OVNOSORT;
2031 Cutofflow = *(bool *)val;
2033 case OVSTATICSEARCH:
2036 for (j = 0 ; j < GROUPDATAHASHSIZE ; j++) {
2037 for (gdb = groupdatablock[j] ; gdb != NULL ; gdb = gdb->next) {
2046 Cache = *(bool *)val;
2049 boolval = (bool *)val;
2054 if (Cachesearch != NULL) {
2055 free(Cachesearch->group);
2066 void buffindexed_close(void) {
2068 OVBUFF *ovbuffnext, *ovbuff = ovbufftab;
2074 struct ov_trace_array *trace;
2075 struct ov_name_table *ntp;
2077 #endif /* OV_DEBUG */
2080 for (; ovbuff != (OVBUFF *)NULL; ovbuff = ovbuff->next) {
2081 for (i = 0 ; i < ovbuff->totalblk ; i++) {
2082 trace = &ovbuff->trace[i];
2083 if (trace->ov_trace == NULL)
2085 for (j = 0 ; j <= trace->cur && j < trace->max ; j++) {
2086 if (trace->ov_trace[j].occupied != 0 ||
2087 trace->ov_trace[j].freed != 0) {
2089 length = strlen(innconf->pathtmp) + 11;
2090 path = xmalloc(length);
2092 snprintf(path, length, "%s/%d", innconf->pathtmp, pid);
2093 if ((F = fopen(path, "w")) == NULL) {
2094 syslog(L_ERROR, "%s: could not open %s: %m", LocalLogName, path);
2098 fprintf(F, "%d: % 6d, % 2d: 0x%08x, % 10d, % 10d\n", ovbuff->index, i, j,
2099 trace->ov_trace[j].gloc.recno,
2100 trace->ov_trace[j].occupied,
2101 trace->ov_trace[j].freed);
2106 if ((ntp = name_table) != NULL) {
2108 length = strlen(innconf->pathtmp) + 11;
2109 path = xmalloc(length);
2111 sprintf(path, length, "%s/%d", innconf->pathtmp, pid);
2112 if ((F = fopen(path, "w")) == NULL) {
2113 syslog(L_ERROR, "%s: could not open %s: %m", LocalLogName, path);
2118 fprintf(F, "0x%08x: %s\n", ntp->recno, ntp->name);
2127 #endif /* OV_DEBUG */
2131 if (Cachesearch != NULL) {
2132 free(Cachesearch->group);
2137 if (fstat(GROUPfd, &sb) < 0)
2142 if (munmap((void *)GROUPheader, GROUPfilesize(GROUPcount)) < 0) {
2143 syslog(L_FATAL, "%s: could not munmap group.index in buffindexed_close: %m", LocalLogName);
2147 for (; ovbuff != (OVBUFF *)NULL; ovbuff = ovbuffnext) {
2148 ovbuffnext = ovbuff->next;
2155 static int countgdb(void) {
2157 GROUPDATABLOCK *gdb;
2159 for (i = 0 ; i < GROUPDATAHASHSIZE ; i++) {
2160 for (gdb = groupdatablock[i] ; gdb != NULL ; gdb = gdb->next)
2166 main(int argc, char **argv) {
2167 char *group, flag[2], buff[OV_BLOCKSIZE];
2168 int lo, hi, count, flags, i;
2178 fprintf(stderr, "only one argument can be specified\n");
2181 /* if innconf isn't already read in, do so. */
2182 if (innconf == NULL) {
2183 if (!innconf_read(NULL)) {
2184 fprintf(stderr, "reading inn.conf failed\n");
2188 if (!buffindexed_open(OV_READ)) {
2189 fprintf(stderr, "buffindexed_open failed\n");
2192 fprintf(stdout, "GROUPheader->freelist.recno is %d(0x%08x)\n", GROUPheader->freelist.recno, GROUPheader->freelist.recno);
2194 if (isdigit(*group)) {
2195 gloc.recno = atoi(group);
2196 ge = &GROUPentries[gloc.recno];
2197 fprintf(stdout, "left articles are %d for %d, last expiry is %d\n", ge->count, gloc.recno, ge->expired);
2198 if (ge->count == 0) {
2199 GROUPlock(gloc, INN_LOCK_UNLOCK);
2202 if (!ovgroupmmap(ge, ge->low, ge->high, true)) {
2203 fprintf(stderr, "ovgroupmmap failed\n");
2204 GROUPlock(gloc, INN_LOCK_UNLOCK);
2206 for (giblist = Giblist, i = 0 ; giblist != NULL ; giblist = giblist->next, i++);
2207 fprintf(stdout, "%d index block(s)\n", i);
2208 fprintf(stdout, "%d data block(s)\n", countgdb());
2209 for (giblist = Giblist ; giblist != NULL ; giblist = giblist->next) {
2210 fprintf(stdout, " % 8d(% 5d)\n", giblist->ov.blocknum, giblist->ov.index);
2212 for (i = 0 ; i < Gibcount ; i++) {
2213 if (Gib[i].artnum == 0)
2215 if (Gib[i].index == NULLINDEX)
2216 fprintf(stdout, " %d empty\n");
2218 fprintf(stdout, " %d %d\n", Gib[i].offset, Gib[i].len);
2222 GROUPlock(gloc, INN_LOCK_UNLOCK);
2225 gloc = GROUPfind(group, false);
2226 if (GROUPLOCempty(gloc)) {
2227 fprintf(stderr, "gloc is null\n");
2229 GROUPlock(gloc, INN_LOCK_READ);
2230 ge = &GROUPentries[gloc.recno];
2231 fprintf(stdout, "base %d(%d), cur %d(%d), expired at %s\n", ge->baseindex.blocknum, ge->baseindex.index, ge->curindex.blocknum, ge->curindex.index, ge->expired == 0 ? "none\n" : ctime(&ge->expired));
2232 if (!buffindexed_groupstats(group, &lo, &hi, &count, &flags)) {
2233 fprintf(stderr, "buffindexed_groupstats failed for group %s\n", group);
2236 flag[0] = (char)flags;
2238 fprintf(stdout, "%s: low is %d, high is %d, count is %d, flag is '%s'\n", group, lo, hi, count, flag);
2239 if ((search = (OVSEARCH *)ovopensearch(group, lo, hi, true)) == NULL) {
2240 fprintf(stderr, "ovopensearch failed for group %s\n", group);
2243 fprintf(stdout, " gloc is %d(0x%08x)\n", search->gloc.recno, search->gloc.recno);
2244 for (giblist = Giblist, i = 0 ; giblist != NULL ; giblist = giblist->next, i++);
2245 fprintf(stdout, "%d index block(s)\n", i);
2246 fprintf(stdout, "%d data block(s)\n", countgdb());
2247 for (giblist = Giblist ; giblist != NULL ; giblist = giblist->next) {
2248 fprintf(stdout, " % 8d(% 5d)\n", giblist->ov.blocknum, giblist->ov.index);
2250 for (i = 0 ; i < Gibcount ; i++) {
2251 if (Gib[i].artnum == 0)
2253 if (Gib[i].index == NULLINDEX)
2254 fprintf(stdout, " %d empty\n");
2256 fprintf(stdout, " %d %d\n", Gib[i].offset, Gib[i].len);
2264 while (buffindexed_search((void *)search, &artnum, &data, &len, &token, NULL)) {
2266 fprintf(stdout, "%d: len is 0\n", artnum);
2268 memcpy(buff, data, len);
2270 fprintf(stdout, "%d: %s\n", artnum, buff);
2275 #endif /* BUFF_DEBUG */