X-Git-Url: http://www.chiark.greenend.org.uk/ucgi/~mdw/git/disorder/blobdiff_plain/dbf24eb489ca9ae6046d1d0048c1f83afeb1e634..50ae38dd3f0fa96b2b50cbb80c18ed7c5c01ec7b:/server/speaker.c diff --git a/server/speaker.c b/server/speaker.c index f7e2f59..b270f68 100644 --- a/server/speaker.c +++ b/server/speaker.c @@ -17,14 +17,35 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA */ - -/* This program deliberately does not use the garbage collector even though it - * might be convenient to do so. This is for two reasons. Firstly some libao - * drivers are implemented using threads and we do not want to have to deal - * with potential interactions between threading and garbage collection. - * Secondly this process needs to be able to respond quickly and this is not - * compatible with the collector hanging the program even relatively - * briefly. */ +/** @file server/speaker.c + * @brief Speaker processs + * + * This program is responsible for transmitting a single coherent audio stream + * to its destination (over the network, to some sound API, to some + * subprocess). It receives connections from decoders via file descriptor + * passing from the main server and plays them in the right order. + * + * For the ALSA API, 8- and 16- bit + * stereo and mono are supported, with any sample rate (within the limits that + * ALSA can deal with.) + * + * When communicating with a subprocess, sox is invoked to convert the inbound + * data to a single consistent format. The same applies for network (RTP) + * play, though in that case currently only 44.1KHz 16-bit stereo is supported. + * + * The inbound data starts with a structure defining the data format. Note + * that this is NOT portable between different platforms or even necessarily + * between versions; the speaker is assumed to be built from the same source + * and run on the same host as the main server. + * + * This program deliberately does not use the garbage collector even though it + * might be convenient to do so. This is for two reasons. Firstly some sound + * APIs use thread threads and we do not want to have to deal with potential + * interactions between threading and garbage collection. Secondly this + * process needs to be able to respond quickly and this is not compatible with + * the collector hanging the program even relatively briefly. + */ #include #include "types.h" @@ -70,20 +91,32 @@ # define MACHINE_AO_FMT AO_FMT_LITTLE #endif -#define BUFFER_SECONDS 5 /* How many seconds of input to - * buffer. */ +/** @brief How many seconds of input to buffer + * + * While any given connection has this much audio buffered, no more reads will + * be issued for that connection. The decoder will have to wait. + */ +#define BUFFER_SECONDS 5 #define FRAMES 4096 /* Frame batch size */ -#define NETWORK_BYTES 1024 /* Bytes to send per network packet */ -/* (don't make this too big or arithmetic will start to overflow) */ +/** @brief Bytes to send per network packet + * + * Don't make this too big or arithmetic will start to overflow. + */ +#define NETWORK_BYTES (1024+sizeof(struct rtp_header)) -#define RTP_AHEAD 2 /* Max RTP playahead (seconds) */ +/** @brief Maximum RTP playahead (ms) */ +#define RTP_AHEAD_MS 1000 -#define NFDS 256 /* Max FDs to poll for */ +/** @brief Maximum number of FDs to poll for */ +#define NFDS 256 -/* Known tracks are kept in a linked list. We don't normally to have - * more than two - maybe three at the outside. */ +/** @brief Track structure + * + * Known tracks are kept in a linked list. Usually there will be at most two + * of these but rearranging the queue can cause there to be more. + */ static struct track { struct track *next; /* next track */ int fd; /* input FD */ @@ -106,20 +139,82 @@ static struct pollfd fds[NFDS]; /* if we need more than that */ static int fdno; /* fd number */ static size_t bufsize; /* buffer size */ #if API_ALSA -static snd_pcm_t *pcm; /* current pcm handle */ +/** @brief The current PCM handle */ +static snd_pcm_t *pcm; static snd_pcm_uframes_t last_pcm_bufsize; /* last seen buffer size */ #endif -static int ready; /* ready to send audio */ + +/** @brief Ready to send audio + * + * This is set when the destination is ready to receive audio. Generally + * this implies that the sound device is open. In the ALSA backend it + * does @b not necessarily imply that is has the right sample format. + */ +static int ready; + static int forceplay; /* frames to force play */ static int cmdfd = -1; /* child process input */ static int bfd = -1; /* broadcast FD */ -static uint32_t rtp_time; /* RTP timestamp */ -static struct timeval rtp_time_real; /* corresponding real time */ + +/** @brief RTP timestamp + * + * This counts the number of samples played (NB not the number of frames + * played). + * + * The timestamp in the packet header is only 32 bits wide. With 44100Hz + * stereo, that only gives about half a day before wrapping, which is not + * particularly convenient for certain debugging purposes. Therefore the + * timestamp is maintained as a 64-bit integer, giving around six million years + * before wrapping, and truncated to 32 bits when transmitting. + */ +static uint64_t rtp_time; + +/** @brief RTP base timestamp + * + * This is the real time correspoding to an @ref rtp_time of 0. It is used + * to recalculate the timestamp after idle periods. + */ +static struct timeval rtp_time_0; + static uint16_t rtp_seq; /* frame sequence number */ static uint32_t rtp_id; /* RTP SSRC */ static int idled; /* set when idled */ static int audio_errors; /* audio error counter */ +/** @brief Structure of a backend */ +struct speaker_backend { + /** @brief Which backend this is + * + * @c -1 terminates the list. + */ + int backend; + + /** @brief Initialization + * + * Called once at startup. This is responsible for one-time setup + * operations, for instance opening a network socket to transmit to. + * + * When writing to a native sound API this might @b not imply opening the + * native sound device - that might be done by @c activate below. + */ + void (*init)(void); + + /** @brief Activation + * @return 0 on success, non-0 on error + * + * Called to activate the output device. + * + * After this function succeeds, @ref ready should be non-0. As well as + * opening the audio device, this function is responsible for reconfiguring + * if it necessary to cope with different samples formats (for backends that + * don't demand a single fixed sample format for the lifetime of the server). + */ + int (*activate)(void); +}; + +/** @brief Selected backend */ +static const struct speaker_backend *backend; + static const struct option options[] = { { "help", no_argument, 0, 'h' }, { "version", no_argument, 0, 'V' }, @@ -152,12 +247,12 @@ static void version(void) { exit(0); } -/* Return the number of bytes per frame in FORMAT. */ +/** @brief Return the number of bytes per frame in @p format */ static size_t bytes_per_frame(const ao_sample_format *format) { return format->channels * format->bits / 8; } -/* Find track ID, maybe creating it if not found. */ +/** @brief Find track @p id, maybe creating it if not found */ static struct track *findtrack(const char *id, int create) { struct track *t; @@ -177,7 +272,7 @@ static struct track *findtrack(const char *id, int create) { return t; } -/* Remove track ID (but do not destroy it). */ +/** @brief Remove track @p id (but do not destroy it) */ static struct track *removetrack(const char *id) { struct track *t, **tt; @@ -189,7 +284,7 @@ static struct track *removetrack(const char *id) { return t; } -/* Destroy a track. */ +/** @brief Destroy a track */ static void destroy(struct track *t) { D(("destroy %s", t->id)); if(t->fd != -1) xclose(t->fd); @@ -197,7 +292,7 @@ static void destroy(struct track *t) { free(t); } -/* Notice a new FD. */ +/** @brief Notice a new connection */ static void acquire(struct track *t, int fd) { D(("acquire %s %d", t->id, fd)); if(t->fd != -1) @@ -206,7 +301,110 @@ static void acquire(struct track *t, int fd) { nonblock(fd); } -/* Read data into a sample buffer. Return 0 on success, -1 on EOF. */ +/** @brief Return true if A and B denote identical libao formats, else false */ +static int formats_equal(const ao_sample_format *a, + const ao_sample_format *b) { + return (a->bits == b->bits + && a->rate == b->rate + && a->channels == b->channels + && a->byte_format == b->byte_format); +} + +/** @brief Compute arguments to sox */ +static void soxargs(const char ***pp, char **qq, ao_sample_format *ao) { + int n; + + *(*pp)++ = "-t.raw"; + *(*pp)++ = "-s"; + *(*pp)++ = *qq; n = sprintf(*qq, "-r%d", ao->rate); *qq += n + 1; + *(*pp)++ = *qq; n = sprintf(*qq, "-c%d", ao->channels); *qq += n + 1; + /* sox 12.17.9 insists on -b etc; CVS sox insists on - etc; both are + * deployed! */ + switch(config->sox_generation) { + case 0: + if(ao->bits != 8 + && ao->byte_format != AO_FMT_NATIVE + && ao->byte_format != MACHINE_AO_FMT) { + *(*pp)++ = "-x"; + } + switch(ao->bits) { + case 8: *(*pp)++ = "-b"; break; + case 16: *(*pp)++ = "-w"; break; + case 32: *(*pp)++ = "-l"; break; + case 64: *(*pp)++ = "-d"; break; + default: fatal(0, "cannot handle sample size %d", (int)ao->bits); + } + break; + case 1: + switch(ao->byte_format) { + case AO_FMT_NATIVE: break; + case AO_FMT_BIG: *(*pp)++ = "-B"; break; + case AO_FMT_LITTLE: *(*pp)++ = "-L"; break; + } + *(*pp)++ = *qq; n = sprintf(*qq, "-%d", ao->bits/8); *qq += n + 1; + break; + } +} + +/** @brief Enable format translation + * + * If necessary, replaces a tracks inbound file descriptor with one connected + * to a sox invocation, which performs the required translation. + */ +static void enable_translation(struct track *t) { + switch(config->speaker_backend) { + case BACKEND_COMMAND: + case BACKEND_NETWORK: + /* These backends need a specific sample format */ + break; + case BACKEND_ALSA: + /* ALSA can cope */ + return; + } + if(!formats_equal(&t->format, &config->sample_format)) { + char argbuf[1024], *q = argbuf; + const char *av[18], **pp = av; + int soxpipe[2]; + pid_t soxkid; + + *pp++ = "sox"; + soxargs(&pp, &q, &t->format); + *pp++ = "-"; + soxargs(&pp, &q, &config->sample_format); + *pp++ = "-"; + *pp++ = 0; + if(debugging) { + for(pp = av; *pp; pp++) + D(("sox arg[%d] = %s", pp - av, *pp)); + D(("end args")); + } + xpipe(soxpipe); + soxkid = xfork(); + if(soxkid == 0) { + signal(SIGPIPE, SIG_DFL); + xdup2(t->fd, 0); + xdup2(soxpipe[1], 1); + fcntl(0, F_SETFL, fcntl(0, F_GETFL) & ~O_NONBLOCK); + close(soxpipe[0]); + close(soxpipe[1]); + close(t->fd); + execvp("sox", (char **)av); + _exit(1); + } + D(("forking sox for format conversion (kid = %d)", soxkid)); + close(t->fd); + close(soxpipe[1]); + t->fd = soxpipe[0]; + t->format = config->sample_format; + } +} + +/** @brief Read data into a sample buffer + * @param t Pointer to track + * @return 0 on success, -1 on EOF + * + * This is effectively the read callback on @c t->fd. + */ static int fill(struct track *t) { size_t where, left; int n; @@ -242,6 +440,8 @@ static int fill(struct track *t) { /* Check that our assumptions are met. */ if(t->format.bits & 7) fatal(0, "bits per sample not a multiple of 8"); + /* If the input format is unsuitable, arrange to translate it */ + enable_translation(t); /* Make a new buffer for audio data. */ t->size = bytes_per_frame(&t->format) * t->format.rate * BUFFER_SECONDS; t->buffer = xmalloc(t->size); @@ -253,16 +453,7 @@ static int fill(struct track *t) { return 0; } -/* Return true if A and B denote identical libao formats, else false. */ -static int formats_equal(const ao_sample_format *a, - const ao_sample_format *b) { - return (a->bits == b->bits - && a->rate == b->rate - && a->channels == b->channels - && a->byte_format == b->byte_format); -} - -/* Close the sound device. */ +/** @brief Close the sound device */ static void idle(void) { D(("idle")); #if API_ALSA @@ -284,7 +475,7 @@ static void idle(void) { ready = 0; } -/* Abandon the current track */ +/** @brief Abandon the current track */ static void abandon(void) { struct speaker_message sm; @@ -300,6 +491,7 @@ static void abandon(void) { } #if API_ALSA +/** @brief Log ALSA parameters */ static void log_params(snd_pcm_hw_params_t *hwparams, snd_pcm_sw_params_t *swparams) { snd_pcm_uframes_t f; @@ -326,203 +518,18 @@ static void log_params(snd_pcm_hw_params_t *hwparams, } #endif -static void soxargs(const char ***pp, char **qq, ao_sample_format *ao) { - int n; - - *(*pp)++ = "-t.raw"; - *(*pp)++ = "-s"; - *(*pp)++ = *qq; n = sprintf(*qq, "-r%d", ao->rate); *qq += n + 1; - *(*pp)++ = *qq; n = sprintf(*qq, "-c%d", ao->channels); *qq += n + 1; - /* sox 12.17.9 insists on -b etc; CVS sox insists on - etc; both are - * deployed! */ - switch(config->sox_generation) { - case 0: - if(ao->bits != 8 - && ao->byte_format != AO_FMT_NATIVE - && ao->byte_format != MACHINE_AO_FMT) { - *(*pp)++ = "-x"; - } - switch(ao->bits) { - case 8: *(*pp)++ = "-b"; break; - case 16: *(*pp)++ = "-w"; break; - case 32: *(*pp)++ = "-l"; break; - case 64: *(*pp)++ = "-d"; break; - default: fatal(0, "cannot handle sample size %d", (int)ao->bits); - } - break; - case 1: - switch(ao->byte_format) { - case AO_FMT_NATIVE: break; - case AO_FMT_BIG: *(*pp)++ = "-B"; break; - case AO_FMT_LITTLE: *(*pp)++ = "-L"; break; - } - *(*pp)++ = *qq; n = sprintf(*qq, "-%d", ao->bits/8); *qq += n + 1; - break; - } -} - -/* Make sure the sound device is open and has the right sample format. Return - * 0 on success and -1 on error. */ +/** @brief Enable sound output + * + * Makes sure the sound device is open and has the right sample format. Return + * 0 on success and -1 on error. + */ static int activate(void) { /* If we don't know the format yet we cannot start. */ if(!playing->got_format) { D((" - not got format for %s", playing->id)); return -1; } - switch(config->speaker_backend) { - case BACKEND_COMMAND: - case BACKEND_NETWORK: - /* If we pass audio on to some other agent then we enforce the configured - * sample format on the *inbound* audio data. */ - if(!formats_equal(&playing->format, &config->sample_format)) { - char argbuf[1024], *q = argbuf; - const char *av[18], **pp = av; - int soxpipe[2]; - pid_t soxkid; - *pp++ = "sox"; - soxargs(&pp, &q, &playing->format); - *pp++ = "-"; - soxargs(&pp, &q, &config->sample_format); - *pp++ = "-"; - *pp++ = 0; - if(debugging) { - for(pp = av; *pp; pp++) - D(("sox arg[%d] = %s", pp - av, *pp)); - D(("end args")); - } - xpipe(soxpipe); - soxkid = xfork(); - if(soxkid == 0) { - xdup2(playing->fd, 0); - xdup2(soxpipe[1], 1); - fcntl(0, F_SETFL, fcntl(0, F_GETFL) & ~O_NONBLOCK); - close(soxpipe[0]); - close(soxpipe[1]); - close(playing->fd); - execvp("sox", (char **)av); - _exit(1); - } - D(("forking sox for format conversion (kid = %d)", soxkid)); - close(playing->fd); - close(soxpipe[1]); - playing->fd = soxpipe[0]; - playing->format = config->sample_format; - ready = 0; - } - if(!ready) { - pcm_format = config->sample_format; - bufsize = 3 * FRAMES; - bpf = bytes_per_frame(&config->sample_format); - D(("acquired audio device")); - ready = 1; - } - return 0; - case BACKEND_ALSA: -#if API_ALSA - /* If we need to change format then close the current device. */ - if(pcm && !formats_equal(&playing->format, &pcm_format)) - idle(); - if(!pcm) { - snd_pcm_hw_params_t *hwparams; - snd_pcm_sw_params_t *swparams; - snd_pcm_uframes_t pcm_bufsize; - int err; - int sample_format = 0; - unsigned rate; - - D(("snd_pcm_open")); - if((err = snd_pcm_open(&pcm, - config->device, - SND_PCM_STREAM_PLAYBACK, - SND_PCM_NONBLOCK))) { - error(0, "error from snd_pcm_open: %d", err); - goto error; - } - snd_pcm_hw_params_alloca(&hwparams); - D(("set up hw params")); - if((err = snd_pcm_hw_params_any(pcm, hwparams)) < 0) - fatal(0, "error from snd_pcm_hw_params_any: %d", err); - if((err = snd_pcm_hw_params_set_access(pcm, hwparams, - SND_PCM_ACCESS_RW_INTERLEAVED)) < 0) - fatal(0, "error from snd_pcm_hw_params_set_access: %d", err); - switch(playing->format.bits) { - case 8: - sample_format = SND_PCM_FORMAT_S8; - break; - case 16: - switch(playing->format.byte_format) { - case AO_FMT_NATIVE: sample_format = SND_PCM_FORMAT_S16; break; - case AO_FMT_LITTLE: sample_format = SND_PCM_FORMAT_S16_LE; break; - case AO_FMT_BIG: sample_format = SND_PCM_FORMAT_S16_BE; break; - error(0, "unrecognized byte format %d", playing->format.byte_format); - goto fatal; - } - break; - default: - error(0, "unsupported sample size %d", playing->format.bits); - goto fatal; - } - if((err = snd_pcm_hw_params_set_format(pcm, hwparams, - sample_format)) < 0) { - error(0, "error from snd_pcm_hw_params_set_format (%d): %d", - sample_format, err); - goto fatal; - } - rate = playing->format.rate; - if((err = snd_pcm_hw_params_set_rate_near(pcm, hwparams, &rate, 0)) < 0) { - error(0, "error from snd_pcm_hw_params_set_rate (%d): %d", - playing->format.rate, err); - goto fatal; - } - if(rate != (unsigned)playing->format.rate) - info("want rate %d, got %u", playing->format.rate, rate); - if((err = snd_pcm_hw_params_set_channels(pcm, hwparams, - playing->format.channels)) < 0) { - error(0, "error from snd_pcm_hw_params_set_channels (%d): %d", - playing->format.channels, err); - goto fatal; - } - bufsize = 3 * FRAMES; - pcm_bufsize = bufsize; - if((err = snd_pcm_hw_params_set_buffer_size_near(pcm, hwparams, - &pcm_bufsize)) < 0) - fatal(0, "error from snd_pcm_hw_params_set_buffer_size (%d): %d", - 3 * FRAMES, err); - if(pcm_bufsize != 3 * FRAMES && pcm_bufsize != last_pcm_bufsize) - info("asked for PCM buffer of %d frames, got %d", - 3 * FRAMES, (int)pcm_bufsize); - last_pcm_bufsize = pcm_bufsize; - if((err = snd_pcm_hw_params(pcm, hwparams)) < 0) - fatal(0, "error calling snd_pcm_hw_params: %d", err); - D(("set up sw params")); - snd_pcm_sw_params_alloca(&swparams); - if((err = snd_pcm_sw_params_current(pcm, swparams)) < 0) - fatal(0, "error calling snd_pcm_sw_params_current: %d", err); - if((err = snd_pcm_sw_params_set_avail_min(pcm, swparams, FRAMES)) < 0) - fatal(0, "error calling snd_pcm_sw_params_set_avail_min %d: %d", - FRAMES, err); - if((err = snd_pcm_sw_params(pcm, swparams)) < 0) - fatal(0, "error calling snd_pcm_sw_params: %d", err); - pcm_format = playing->format; - bpf = bytes_per_frame(&pcm_format); - D(("acquired audio device")); - log_params(hwparams, swparams); - ready = 1; - } - return 0; - fatal: - abandon(); - error: - /* We assume the error is temporary and that we'll retry in a bit. */ - if(pcm) { - snd_pcm_close(pcm); - pcm = 0; - } - return -1; -#endif - default: - assert(!"reached"); - } + return backend->activate(); } /* Check to see whether the current track has finished playing */ @@ -541,6 +548,7 @@ static void fork_cmd(void) { xpipe(pfd); cmdpid = xfork(); if(!cmdpid) { + signal(SIGPIPE, SIG_DFL); xdup2(pfd[0], 0); close(pfd[0]); close(pfd[1]); @@ -553,7 +561,7 @@ static void fork_cmd(void) { } static void play(size_t frames) { - size_t avail_bytes, written_frames; + size_t avail_bytes, write_bytes, written_frames; ssize_t written_bytes; struct rtp_header header; struct iovec vec[2]; @@ -642,21 +650,67 @@ static void play(size_t frames) { case BACKEND_NETWORK: /* We transmit using RTP (RFC3550) and attempt to conform to the internet * AVT profile (RFC3551). */ - if(rtp_time_real.tv_sec == 0) - xgettimeofday(&rtp_time_real, 0); + if(idled) { + /* There may have been a gap. Fix up the RTP time accordingly. */ struct timeval now; + uint64_t delta; + uint64_t target_rtp_time; + + /* Find the current time */ xgettimeofday(&now, 0); - /* There's been a gap. Fix up the RTP time accordingly. */ - const long offset = (((now.tv_sec + now.tv_usec /1000000.0) - - (rtp_time_real.tv_sec + rtp_time_real.tv_usec / 1000000.0)) - * playing->format.rate * playing->format.channels); - info("offset RTP timestamp by %ld", offset); - rtp_time += offset; + /* Find the number of microseconds elapsed since rtp_time=0 */ + delta = tvsub_us(now, rtp_time_0); + assert(delta <= UINT64_MAX / 88200); + target_rtp_time = (delta * playing->format.rate + * playing->format.channels) / 1000000; + /* Overflows at ~6 years uptime with 44100Hz stereo */ + + /* rtp_time is the number of samples we've played. NB that we play + * RTP_AHEAD_MS ahead of ourselves, so it may legitimately be ahead of + * the value we deduce from time comparison. + * + * Suppose we have 1s track started at t=0, and another track begins to + * play at t=2s. Suppose RTP_AHEAD_MS=1000 and 44100Hz stereo. In that + * case we'll send 1s of audio as fast as we can, giving rtp_time=88200. + * rtp_time stops at this point. + * + * At t=2s we'll have calculated target_rtp_time=176400. In this case we + * set rtp_time=176400 and the player can correctly conclude that it + * should leave 1s between the tracks. + * + * Suppose instead that the second track arrives at t=0.5s, and that + * we've managed to transmit the whole of the first track already. We'll + * have target_rtp_time=44100. + * + * The desired behaviour is to play the second track back to back with + * first. In this case therefore we do not modify rtp_time. + * + * Is it ever right to reduce rtp_time? No; for that would imply + * transmitting packets with overlapping timestamp ranges, which does not + * make sense. + */ + if(target_rtp_time > rtp_time) { + /* More time has elapsed than we've transmitted samples. That implies + * we've been 'sending' silence. */ + info("advancing rtp_time by %"PRIu64" samples", + target_rtp_time - rtp_time); + rtp_time = target_rtp_time; + } else if(target_rtp_time < rtp_time) { + const int64_t samples_ahead = ((uint64_t)RTP_AHEAD_MS + * config->sample_format.rate + * config->sample_format.channels + / 1000); + + if(target_rtp_time + samples_ahead < rtp_time) { + info("reversing rtp_time by %"PRIu64" samples", + rtp_time - target_rtp_time); + } + } } header.vpxcc = 2 << 6; /* V=2, P=0, X=0, CC=0 */ header.seq = htons(rtp_seq++); - header.timestamp = htonl(rtp_time); + header.timestamp = htonl((uint32_t)rtp_time); header.ssrc = rtp_id; header.mpt = (idled ? 0x80 : 0x00) | 10; /* 10 = L16 = 16-bit x 2 x 44100KHz. We ought to deduce this value from @@ -666,6 +720,7 @@ static void play(size_t frames) { idled = 0; if(avail_bytes > NETWORK_BYTES - sizeof header) { avail_bytes = NETWORK_BYTES - sizeof header; + /* Always send a whole number of frames */ avail_bytes -= avail_bytes % bpf; } /* "The RTP clock rate used for generating the RTP timestamp is independent @@ -676,45 +731,30 @@ static void play(size_t frames) { * generated per second is then the sampling rate times the channel * count.)" */ - vec[0].iov_base = (void *)&header; - vec[0].iov_len = sizeof header; - vec[1].iov_base = playing->buffer + playing->start; - vec[1].iov_len = avail_bytes; -#if 0 - { - char buffer[3 * sizeof header + 1]; - size_t n; - const uint8_t *ptr = (void *)&header; - - for(n = 0; n < sizeof header; ++n) - sprintf(&buffer[3 * n], "%02x ", *ptr++); - info(buffer); - } -#endif - do { - written_bytes = writev(bfd, - vec, - 2); - } while(written_bytes < 0 && errno == EINTR); - if(written_bytes < 0) { - error(errno, "error transmitting audio data"); - ++audio_errors; - if(audio_errors == 10) - fatal(0, "too many audio errors"); + write_bytes = avail_bytes; + if(write_bytes) { + vec[0].iov_base = (void *)&header; + vec[0].iov_len = sizeof header; + vec[1].iov_base = playing->buffer + playing->start; + vec[1].iov_len = avail_bytes; + do { + written_bytes = writev(bfd, + vec, + 2); + } while(written_bytes < 0 && errno == EINTR); + if(written_bytes < 0) { + error(errno, "error transmitting audio data"); + ++audio_errors; + if(audio_errors == 10) + fatal(0, "too many audio errors"); return; - } + } + } else audio_errors /= 2; written_bytes = avail_bytes; written_frames = written_bytes / bpf; /* Advance RTP's notion of the time */ rtp_time += written_frames * playing->format.channels; - /* Advance the corresponding real time */ - assert(NETWORK_BYTES <= 2000); /* else risk overflowing 32 bits */ - rtp_time_real.tv_usec += written_frames * 1000000 / playing->format.rate; - if(rtp_time_real.tv_usec >= 1000000) { - ++rtp_time_real.tv_sec; - rtp_time_real.tv_usec -= 1000000; - } break; default: assert(!"reached"); @@ -764,11 +804,137 @@ static int addfd(int fd, int events) { return -1; } -int main(int argc, char **argv) { - int n, fd, stdin_slot, alsa_slots, cmdfd_slot, bfd_slot, poke, timeout; - struct timeval now, delta; - struct track *t; - struct speaker_message sm; +#if API_ALSA +/** @brief ALSA backend initialization */ +static void alsa_init(void) { + info("selected ALSA backend"); +} + +/** @brief ALSA backend activation */ +static int alsa_activate(void) { + /* If we need to change format then close the current device. */ + if(pcm && !formats_equal(&playing->format, &pcm_format)) + idle(); + if(!pcm) { + snd_pcm_hw_params_t *hwparams; + snd_pcm_sw_params_t *swparams; + snd_pcm_uframes_t pcm_bufsize; + int err; + int sample_format = 0; + unsigned rate; + + D(("snd_pcm_open")); + if((err = snd_pcm_open(&pcm, + config->device, + SND_PCM_STREAM_PLAYBACK, + SND_PCM_NONBLOCK))) { + error(0, "error from snd_pcm_open: %d", err); + goto error; + } + snd_pcm_hw_params_alloca(&hwparams); + D(("set up hw params")); + if((err = snd_pcm_hw_params_any(pcm, hwparams)) < 0) + fatal(0, "error from snd_pcm_hw_params_any: %d", err); + if((err = snd_pcm_hw_params_set_access(pcm, hwparams, + SND_PCM_ACCESS_RW_INTERLEAVED)) < 0) + fatal(0, "error from snd_pcm_hw_params_set_access: %d", err); + switch(playing->format.bits) { + case 8: + sample_format = SND_PCM_FORMAT_S8; + break; + case 16: + switch(playing->format.byte_format) { + case AO_FMT_NATIVE: sample_format = SND_PCM_FORMAT_S16; break; + case AO_FMT_LITTLE: sample_format = SND_PCM_FORMAT_S16_LE; break; + case AO_FMT_BIG: sample_format = SND_PCM_FORMAT_S16_BE; break; + error(0, "unrecognized byte format %d", playing->format.byte_format); + goto fatal; + } + break; + default: + error(0, "unsupported sample size %d", playing->format.bits); + goto fatal; + } + if((err = snd_pcm_hw_params_set_format(pcm, hwparams, + sample_format)) < 0) { + error(0, "error from snd_pcm_hw_params_set_format (%d): %d", + sample_format, err); + goto fatal; + } + rate = playing->format.rate; + if((err = snd_pcm_hw_params_set_rate_near(pcm, hwparams, &rate, 0)) < 0) { + error(0, "error from snd_pcm_hw_params_set_rate (%d): %d", + playing->format.rate, err); + goto fatal; + } + if(rate != (unsigned)playing->format.rate) + info("want rate %d, got %u", playing->format.rate, rate); + if((err = snd_pcm_hw_params_set_channels(pcm, hwparams, + playing->format.channels)) < 0) { + error(0, "error from snd_pcm_hw_params_set_channels (%d): %d", + playing->format.channels, err); + goto fatal; + } + bufsize = 3 * FRAMES; + pcm_bufsize = bufsize; + if((err = snd_pcm_hw_params_set_buffer_size_near(pcm, hwparams, + &pcm_bufsize)) < 0) + fatal(0, "error from snd_pcm_hw_params_set_buffer_size (%d): %d", + 3 * FRAMES, err); + if(pcm_bufsize != 3 * FRAMES && pcm_bufsize != last_pcm_bufsize) + info("asked for PCM buffer of %d frames, got %d", + 3 * FRAMES, (int)pcm_bufsize); + last_pcm_bufsize = pcm_bufsize; + if((err = snd_pcm_hw_params(pcm, hwparams)) < 0) + fatal(0, "error calling snd_pcm_hw_params: %d", err); + D(("set up sw params")); + snd_pcm_sw_params_alloca(&swparams); + if((err = snd_pcm_sw_params_current(pcm, swparams)) < 0) + fatal(0, "error calling snd_pcm_sw_params_current: %d", err); + if((err = snd_pcm_sw_params_set_avail_min(pcm, swparams, FRAMES)) < 0) + fatal(0, "error calling snd_pcm_sw_params_set_avail_min %d: %d", + FRAMES, err); + if((err = snd_pcm_sw_params(pcm, swparams)) < 0) + fatal(0, "error calling snd_pcm_sw_params: %d", err); + pcm_format = playing->format; + bpf = bytes_per_frame(&pcm_format); + D(("acquired audio device")); + log_params(hwparams, swparams); + ready = 1; + } + return 0; +fatal: + abandon(); +error: + /* We assume the error is temporary and that we'll retry in a bit. */ + if(pcm) { + snd_pcm_close(pcm); + pcm = 0; + } + return -1; +} +#endif + +/** @brief Command backend initialization */ +static void command_init(void) { + info("selected command backend"); + fork_cmd(); +} + +/** @brief Command backend activation */ +static int command_activate(void) { + if(!ready) { + pcm_format = config->sample_format; + bufsize = 3 * FRAMES; + bpf = bytes_per_frame(&config->sample_format); + D(("acquired audio device")); + ready = 1; + } + return 0; +} + +/** @brief Network backend initialization */ +static void network_init(void) { struct addrinfo *res, *sres; static const struct addrinfo pref = { 0, @@ -791,7 +957,90 @@ int main(int argc, char **argv) { 0 }; static const int one = 1; + int sndbuf, target_sndbuf = 131072; + socklen_t len; char *sockname, *ssockname; + + res = get_address(&config->broadcast, &pref, &sockname); + if(!res) exit(-1); + if(config->broadcast_from.n) { + sres = get_address(&config->broadcast_from, &prefbind, &ssockname); + if(!sres) exit(-1); + } else + sres = 0; + if((bfd = socket(res->ai_family, + res->ai_socktype, + res->ai_protocol)) < 0) + fatal(errno, "error creating broadcast socket"); + if(setsockopt(bfd, SOL_SOCKET, SO_BROADCAST, &one, sizeof one) < 0) + fatal(errno, "error setting SO_BROADCAST on broadcast socket"); + len = sizeof sndbuf; + if(getsockopt(bfd, SOL_SOCKET, SO_SNDBUF, + &sndbuf, &len) < 0) + fatal(errno, "error getting SO_SNDBUF"); + if(target_sndbuf > sndbuf) { + if(setsockopt(bfd, SOL_SOCKET, SO_SNDBUF, + &target_sndbuf, sizeof target_sndbuf) < 0) + error(errno, "error setting SO_SNDBUF to %d", target_sndbuf); + else + info("changed socket send buffer size from %d to %d", + sndbuf, target_sndbuf); + } else + info("default socket send buffer is %d", + sndbuf); + /* We might well want to set additional broadcast- or multicast-related + * options here */ + if(sres && bind(bfd, sres->ai_addr, sres->ai_addrlen) < 0) + fatal(errno, "error binding broadcast socket to %s", ssockname); + if(connect(bfd, res->ai_addr, res->ai_addrlen) < 0) + fatal(errno, "error connecting broadcast socket to %s", sockname); + /* Select an SSRC */ + gcry_randomize(&rtp_id, sizeof rtp_id, GCRY_STRONG_RANDOM); + info("selected network backend, sending to %s", sockname); + if(config->sample_format.byte_format != AO_FMT_BIG) { + info("forcing big-endian sample format"); + config->sample_format.byte_format = AO_FMT_BIG; + } +} + +/** @brief Network backend activation */ +static int network_activate(void) { + if(!ready) { + pcm_format = config->sample_format; + bufsize = 3 * FRAMES; + bpf = bytes_per_frame(&config->sample_format); + D(("acquired audio device")); + ready = 1; + } + return 0; +} + +/** @brief Table of speaker backends */ +static const struct speaker_backend backends[] = { +#if API_ALSA + { + BACKEND_ALSA, + alsa_init, + alsa_activate + }, +#endif + { + BACKEND_COMMAND, + command_init, + command_activate + }, + { + BACKEND_NETWORK, + network_init, + network_activate + }, + { -1, 0, 0 } +}; + +int main(int argc, char **argv) { + int n, fd, stdin_slot, alsa_slots, cmdfd_slot, bfd_slot, poke, timeout; + struct track *t; + struct speaker_message sm; #if API_ALSA int alsa_nslots = -1, err; #endif @@ -825,44 +1074,15 @@ int main(int argc, char **argv) { become_mortal(); /* make sure we're not root, whatever the config says */ if(getuid() == 0 || geteuid() == 0) fatal(0, "do not run as root"); - switch(config->speaker_backend) { - case BACKEND_ALSA: - info("selected ALSA backend"); - case BACKEND_COMMAND: - info("selected command backend"); - fork_cmd(); - break; - case BACKEND_NETWORK: - res = get_address(&config->broadcast, &pref, &sockname); - if(!res) return -1; - if(config->broadcast_from.n) { - sres = get_address(&config->broadcast_from, &prefbind, &ssockname); - if(!sres) return -1; - } else - sres = 0; - if((bfd = socket(res->ai_family, - res->ai_socktype, - res->ai_protocol)) < 0) - fatal(errno, "error creating broadcast socket"); - if(setsockopt(bfd, SOL_SOCKET, SO_BROADCAST, &one, sizeof one) < 0) - fatal(errno, "error settting SO_BROADCAST on broadcast socket"); - /* We might well want to set additional broadcast- or multicast-related - * options here */ - if(sres && bind(bfd, sres->ai_addr, sres->ai_addrlen) < 0) - fatal(errno, "error binding broadcast socket to %s", ssockname); - if(connect(bfd, res->ai_addr, res->ai_addrlen) < 0) - fatal(errno, "error connecting broadcast socket to %s", sockname); - /* Select an SSRC */ - gcry_randomize(&rtp_id, sizeof rtp_id, GCRY_STRONG_RANDOM); - info("selected network backend, sending to %s", sockname); - if(config->sample_format.byte_format != AO_FMT_BIG) { - info("forcing big-endian sample format"); - config->sample_format.byte_format = AO_FMT_BIG; - } - break; - default: - fatal(0, "unknown backend %d", config->speaker_backend); - } + /* identify the backend used to play */ + for(n = 0; backends[n].backend != -1; ++n) + if(backends[n].backend == config->speaker_backend) + break; + if(backends[n].backend == -1) + fatal(0, "unsupported backend %d", config->speaker_backend); + backend = &backends[n]; + /* backend-specific initialization */ + backend->init(); while(getppid() != 1) { fdno = 0; /* Always ready for commands from the main server. */ @@ -889,22 +1109,42 @@ int main(int argc, char **argv) { if(cmdfd >= 0) cmdfd_slot = addfd(cmdfd, POLLOUT); break; - case BACKEND_NETWORK: - /* We want to keep the notional playing point somewhere in the near - * future. If it's too near then clients that attempt even the - * slightest amount of read-ahead will never catch up, and those that - * don't will skip whenever there's a trivial network delay. If it's - * too far ahead then pause latency will be too high. - */ + case BACKEND_NETWORK: { + struct timeval now; + uint64_t target_us; + uint64_t target_rtp_time; + const int64_t samples_ahead = ((uint64_t)RTP_AHEAD_MS + * config->sample_format.rate + * config->sample_format.channels + / 1000); +#if 0 + static unsigned logit; +#endif + + /* If we're starting then initialize the base time */ + if(!rtp_time) + xgettimeofday(&rtp_time_0, 0); + /* We send audio data whenever we get RTP_AHEAD seconds or more + * behind */ xgettimeofday(&now, 0); - delta = tvsub(rtp_time_real, now); - if(delta.tv_sec < RTP_AHEAD) { - D(("delta = %ld.%06ld", (long)delta.tv_sec, (long)delta.tv_usec)); + target_us = tvsub_us(now, rtp_time_0); + assert(target_us <= UINT64_MAX / 88200); + target_rtp_time = (target_us * config->sample_format.rate + * config->sample_format.channels) + + / 1000000; +#if 0 + /* TODO remove logging guff */ + if(!(logit++ & 1023)) + info("rtp_time %llu target %llu difference %lld [%lld]", + rtp_time, target_rtp_time, + rtp_time - target_rtp_time, + samples_ahead); +#endif + if((int64_t)(rtp_time - target_rtp_time) < samples_ahead) bfd_slot = addfd(bfd, POLLOUT); - if(delta.tv_sec < 0) - rtp_time_real = now; /* catch up */ - } break; + } #if API_ALSA case BACKEND_ALSA: { /* We send sample data to ALSA as fast as it can accept it, relying on