+
+
+/***** TRANSPORT PEERS definitions *****/
+
+static void transport_peers_debug(struct site *st, transport_peers *dst,
+ const char *didwhat,
+ int nargs, const struct comm_addr *args,
+ size_t stride) {
+ int i;
+ char *argp;
+
+ if (!(st->log_events & LOG_PEER_ADDRS))
+ return; /* an optimisation */
+
+ slog(st, LOG_PEER_ADDRS, "peers (%s) %s nargs=%d => npeers=%d",
+ (dst==&st->peers ? "data" :
+ dst==&st->setup_peers ? "setup" : "UNKNOWN"),
+ didwhat, nargs, dst->npeers);
+
+ for (i=0, argp=(void*)args;
+ i<nargs;
+ i++, (argp+=stride?stride:sizeof(*args))) {
+ const struct comm_addr *ca=(void*)argp;
+ slog(st, LOG_PEER_ADDRS, " args: addrs[%d]=%s",
+ i, comm_addr_to_string(ca));
+ }
+ for (i=0; i<dst->npeers; i++) {
+ struct timeval diff;
+ timersub(tv_now,&dst->peers[i].last,&diff);
+ const struct comm_addr *ca=&dst->peers[i].addr;
+ slog(st, LOG_PEER_ADDRS, " peers: addrs[%d]=%s T-%ld.%06ld",
+ i, comm_addr_to_string(ca),
+ (unsigned long)diff.tv_sec, (unsigned long)diff.tv_usec);
+ }
+}
+
+static void transport_peers_expire(struct site *st, transport_peers *peers) {
+ /* peers must be sorted first */
+ int previous_peers=peers->npeers;
+ struct timeval oldest;
+ oldest.tv_sec = tv_now->tv_sec - st->mobile_peer_expiry;
+ oldest.tv_usec = tv_now->tv_usec;
+ while (peers->npeers>1 &&
+ timercmp(&peers->peers[peers->npeers-1].last, &oldest, <))
+ peers->npeers--;
+ if (peers->npeers != previous_peers)
+ transport_peers_debug(st,peers,"expire", 0,0,0);
+}
+
+static bool_t transport_peer_record_one(struct site *st, transport_peers *peers,
+ const struct comm_addr *ca,
+ const struct timeval *tv) {
+ /* returns false if output is full */
+ int search;
+
+ if (peers->npeers >= st->transport_peers_max)
+ return 0;
+
+ for (search=0; search<peers->npeers; search++)
+ if (comm_addr_equal(&peers->peers[search].addr, ca))
+ return 1;
+
+ peers->peers[peers->npeers].addr = *ca;
+ peers->peers[peers->npeers].last = *tv;
+ peers->npeers++;
+ return 1;
+}
+
+static void transport_record_peers(struct site *st, transport_peers *peers,
+ const struct comm_addr *addrs, int naddrs,
+ const char *m) {
+ /* We add addrs into peers. The new entries end up at the front
+ * and displace entries towards the end (perhaps even off the
+ * end). Any existing matching entries are moved up to the front.
+ *
+ * Caller must first call transport_peers_expire. */
+
+ if (naddrs==1 && peers->npeers>=1 &&
+ comm_addr_equal(&addrs[0], &peers->peers[0].addr)) {
+ /* optimisation, also avoids debug for trivial updates */
+ peers->peers[0].last = *tv_now;
+ return;
+ }
+
+ int old_npeers=peers->npeers;
+ transport_peer old_peers[old_npeers];
+ COPY_ARRAY(old_peers,peers->peers,old_npeers);
+
+ peers->npeers=0;
+ int i;
+ for (i=0; i<naddrs; i++) {
+ if (!transport_peer_record_one(st,peers, &addrs[i], tv_now))
+ break;
+ }
+ for (i=0; i<old_npeers; i++) {
+ const transport_peer *old=&old_peers[i];
+ if (!transport_peer_record_one(st,peers, &old->addr, &old->last))
+ break;
+ }
+
+ transport_peers_debug(st,peers,m, naddrs,addrs,0);
+}
+
+static void transport_expire_record_peers(struct site *st,
+ transport_peers *peers,
+ const struct comm_addr *addrs,
+ int naddrs, const char *m) {
+ /* Convenience function */
+ transport_peers_expire(st,peers);
+ transport_record_peers(st,peers,addrs,naddrs,m);
+}
+
+static bool_t transport_compute_setupinit_peers(struct site *st,
+ const struct comm_addr *configured_addrs /* 0 if none or not found */,
+ int n_configured_addrs /* 0 if none or not found */,
+ const struct comm_addr *incoming_packet_addr /* 0 if none */) {
+ if (!n_configured_addrs && !incoming_packet_addr &&
+ !transport_peers_valid(&st->peers))
+ return False;
+
+ slog(st,LOG_SETUP_INIT,
+ "using: %d configured addr(s);%s %d old peer addrs(es)",
+ n_configured_addrs,
+ incoming_packet_addr ? " incoming packet address;" : "",
+ st->peers.npeers);
+
+ /* Non-mobile peers try addresses until one is plausible. The
+ * effect is that this code always tries first the configured
+ * address if supplied, or otherwise the address of the incoming
+ * PROD, or finally the existing data peer if one exists; this is
+ * as desired. */
+
+ transport_peers_copy(st,&st->setup_peers,&st->peers);
+ transport_peers_expire(st,&st->setup_peers);
+
+ if (incoming_packet_addr)
+ transport_record_peers(st,&st->setup_peers,
+ incoming_packet_addr,1, "incoming");
+
+ if (n_configured_addrs)
+ transport_record_peers(st,&st->setup_peers,
+ configured_addrs,n_configured_addrs, "setupinit");
+
+ assert(transport_peers_valid(&st->setup_peers));
+ return True;
+}
+
+static void transport_setup_msgok(struct site *st, const struct comm_addr *a) {
+ if (st->peer_mobile)
+ transport_expire_record_peers(st,&st->setup_peers,a,1,"setupmsg");
+}
+static void transport_data_msgok(struct site *st, const struct comm_addr *a) {
+ if (st->peer_mobile)
+ transport_expire_record_peers(st,&st->peers,a,1,"datamsg");
+}
+
+static int transport_peers_valid(transport_peers *peers) {
+ return peers->npeers;
+}
+static void transport_peers_clear(struct site *st, transport_peers *peers) {
+ peers->npeers= 0;
+ transport_peers_debug(st,peers,"clear",0,0,0);
+}
+static void transport_peers_copy(struct site *st, transport_peers *dst,
+ const transport_peers *src) {
+ dst->npeers=src->npeers;
+ COPY_ARRAY(dst->peers, src->peers, dst->npeers);
+ transport_peers_debug(st,dst,"copy",
+ src->npeers, &src->peers->addr, sizeof(*src->peers));
+}
+
+static void transport_resolve_complete(struct site *st,
+ const struct comm_addr *addrs,
+ int naddrs) {
+ transport_expire_record_peers(st,&st->peers,addrs,naddrs,
+ "resolved data");
+ transport_expire_record_peers(st,&st->setup_peers,addrs,naddrs,
+ "resolved setup");
+}
+
+static void transport_resolve_complete_tardy(struct site *st,
+ const struct comm_addr *addrs,
+ int naddrs) {
+ transport_expire_record_peers(st,&st->peers,addrs,naddrs,
+ "resolved tardily");
+}
+
+static void transport_peers__copy_by_mask(transport_peer *out, int *nout_io,
+ unsigned mask,
+ const transport_peers *inp) {
+ /* out and in->peers may be the same region, or nonoverlapping */
+ const transport_peer *in=inp->peers;
+ int slot;
+ for (slot=0; slot<inp->npeers; slot++) {
+ if (!(mask & (1U << slot)))
+ continue;
+ if (!(out==in && slot==*nout_io))
+ COPY_OBJ(out[*nout_io], in[slot]);
+ (*nout_io)++;
+ }
+}
+
+void transport_xmit(struct site *st, transport_peers *peers,
+ struct buffer_if *buf, bool_t candebug) {
+ int slot;
+ transport_peers_expire(st, peers);
+ unsigned failed=0; /* bitmask */
+ assert(MAX_PEER_ADDRS < sizeof(unsigned)*CHAR_BIT);
+
+ int nfailed=0;
+ for (slot=0; slot<peers->npeers; slot++) {
+ transport_peer *peer=&peers->peers[slot];
+ bool_t ok =
+ peer->addr.comm->sendmsg(peer->addr.comm->st, buf, &peer->addr);
+ if (candebug)
+ dump_packet(st, buf, &peer->addr, False, ok);
+ if (!ok) {
+ failed |= 1U << slot;
+ nfailed++;
+ }
+ if (ok && !st->peer_mobile)
+ break;
+ }
+ /* Now we need to demote/delete failing addrs: if we are mobile we
+ * merely demote them; otherwise we delete them. */
+ if (st->local_mobile) {
+ unsigned expected = ((1U << nfailed)-1) << (peers->npeers-nfailed);
+ /* `expected' has all the failures at the end already */
+ if (failed != expected) {
+ int fslot=0;
+ transport_peer failedpeers[nfailed];
+ transport_peers__copy_by_mask(failedpeers, &fslot, failed,peers);
+ assert(fslot == nfailed);
+ int wslot=0;
+ transport_peers__copy_by_mask(peers->peers,&wslot,~failed,peers);
+ assert(wslot+nfailed == peers->npeers);
+ COPY_ARRAY(peers->peers+wslot, failedpeers, nfailed);
+ transport_peers_debug(st,peers,"mobile failure reorder",0,0,0);
+ }
+ } else {
+ if (failed && peers->npeers > 1) {
+ int wslot=0;
+ transport_peers__copy_by_mask(peers->peers,&wslot,~failed,peers);
+ peers->npeers=wslot;
+ transport_peers_debug(st,peers,"non-mobile failure cleanup",0,0,0);
+ }
+ }
+}
+
+/***** END of transport peers declarations *****/