X-Git-Url: http://www.chiark.greenend.org.uk/ucgi/~mdw/git/disorder/blobdiff_plain/460b9539a7c15580e41a71bbc0f47ae776238915..3c68b773ae196c2691ed97d987be6221b1cf79b7:/server/speaker.c diff --git a/server/speaker.c b/server/speaker.c index ef31931..f97bb69 100644 --- a/server/speaker.c +++ b/server/speaker.c @@ -1,6 +1,6 @@ /* * This file is part of DisOrder - * Copyright (C) 2005, 2006 Richard Kettlewell + * Copyright (C) 2005, 2006, 2007 Richard Kettlewell * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -17,14 +17,35 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA */ - -/* This program deliberately does not use the garbage collector even though it - * might be convenient to do so. This is for two reasons. Firstly some libao - * drivers are implemented using threads and we do not want to have to deal - * with potential interactions between threading and garbage collection. - * Secondly this process needs to be able to respond quickly and this is not - * compatible with the collector hanging the program even relatively - * briefly. */ +/** @file server/speaker.c + * @brief Speaker processs + * + * This program is responsible for transmitting a single coherent audio stream + * to its destination (over the network, to some sound API, to some + * subprocess). It receives connections from decoders via file descriptor + * passing from the main server and plays them in the right order. + * + * For the ALSA API, 8- and 16- bit + * stereo and mono are supported, with any sample rate (within the limits that + * ALSA can deal with.) + * + * When communicating with a subprocess, sox is invoked to convert the inbound + * data to a single consistent format. The same applies for network (RTP) + * play, though in that case currently only 44.1KHz 16-bit stereo is supported. + * + * The inbound data starts with a structure defining the data format. Note + * that this is NOT portable between different platforms or even necessarily + * between versions; the speaker is assumed to be built from the same source + * and run on the same host as the main server. + * + * This program deliberately does not use the garbage collector even though it + * might be convenient to do so. This is for two reasons. Firstly some sound + * APIs use thread threads and we do not want to have to deal with potential + * interactions between threading and garbage collection. Secondly this + * process needs to be able to respond quickly and this is not compatible with + * the collector hanging the program even relatively briefly. + */ #include #include "types.h" @@ -40,8 +61,14 @@ #include #include #include +#include #include -#include +#include +#include +#include +#include +#include +#include #include "configuration.h" #include "syscalls.h" @@ -50,16 +77,46 @@ #include "mem.h" #include "speaker.h" #include "user.h" +#include "addr.h" +#include "timeval.h" +#include "rtp.h" + +#if API_ALSA +#include +#endif -#define BUFFER_SECONDS 5 /* How many seconds of input to - * buffer. */ +#ifdef WORDS_BIGENDIAN +# define MACHINE_AO_FMT AO_FMT_BIG +#else +# define MACHINE_AO_FMT AO_FMT_LITTLE +#endif + +/** @brief How many seconds of input to buffer + * + * While any given connection has this much audio buffered, no more reads will + * be issued for that connection. The decoder will have to wait. + */ +#define BUFFER_SECONDS 5 #define FRAMES 4096 /* Frame batch size */ -#define NFDS 256 /* Max FDs to poll for */ +/** @brief Bytes to send per network packet + * + * Don't make this too big or arithmetic will start to overflow. + */ +#define NETWORK_BYTES (1024+sizeof(struct rtp_header)) -/* Known tracks are kept in a linked list. We don't normally to have - * more than two - maybe three at the outside. */ +/** @brief Maximum RTP playahead (ms) */ +#define RTP_AHEAD_MS 1000 + +/** @brief Maximum number of FDs to poll for */ +#define NFDS 256 + +/** @brief Track structure + * + * Known tracks are kept in a linked list. Usually there will be at most two + * of these but rearranging the queue can cause there to be more. + */ static struct track { struct track *next; /* next track */ int fd; /* input FD */ @@ -76,13 +133,109 @@ static struct track { static time_t last_report; /* when we last reported */ static int paused; /* pause status */ -static snd_pcm_t *pcm; /* current pcm handle */ -static ao_sample_format pcm_format; /* current format if aodev != 0 */ static size_t bpf; /* bytes per frame */ static struct pollfd fds[NFDS]; /* if we need more than that */ static int fdno; /* fd number */ -static snd_pcm_uframes_t pcm_bufsize; /* buffer size */ +static size_t bufsize; /* buffer size */ +#if API_ALSA +/** @brief The current PCM handle */ +static snd_pcm_t *pcm; +static snd_pcm_uframes_t last_pcm_bufsize; /* last seen buffer size */ +static ao_sample_format pcm_format; /* current format if aodev != 0 */ +#endif + +/** @brief Ready to send audio + * + * This is set when the destination is ready to receive audio. Generally + * this implies that the sound device is open. In the ALSA backend it + * does @b not necessarily imply that is has the right sample format. + */ +static int ready; + static int forceplay; /* frames to force play */ +static int cmdfd = -1; /* child process input */ +static int bfd = -1; /* broadcast FD */ + +/** @brief RTP timestamp + * + * This counts the number of samples played (NB not the number of frames + * played). + * + * The timestamp in the packet header is only 32 bits wide. With 44100Hz + * stereo, that only gives about half a day before wrapping, which is not + * particularly convenient for certain debugging purposes. Therefore the + * timestamp is maintained as a 64-bit integer, giving around six million years + * before wrapping, and truncated to 32 bits when transmitting. + */ +static uint64_t rtp_time; + +/** @brief RTP base timestamp + * + * This is the real time correspoding to an @ref rtp_time of 0. It is used + * to recalculate the timestamp after idle periods. + */ +static struct timeval rtp_time_0; + +static uint16_t rtp_seq; /* frame sequence number */ +static uint32_t rtp_id; /* RTP SSRC */ +static int idled; /* set when idled */ +static int audio_errors; /* audio error counter */ + +/** @brief Structure of a backend */ +struct speaker_backend { + /** @brief Which backend this is + * + * @c -1 terminates the list. + */ + int backend; + + /** @brief Flags + * + * Possible values + * - @ref FIXED_FORMAT + */ + unsigned flags; +/** @brief Lock to configured sample format */ +#define FIXED_FORMAT 0x0001 + + /** @brief Initialization + * + * Called once at startup. This is responsible for one-time setup + * operations, for instance opening a network socket to transmit to. + * + * When writing to a native sound API this might @b not imply opening the + * native sound device - that might be done by @c activate below. + */ + void (*init)(void); + + /** @brief Activation + * @return 0 on success, non-0 on error + * + * Called to activate the output device. + * + * After this function succeeds, @ref ready should be non-0. As well as + * opening the audio device, this function is responsible for reconfiguring + * if it necessary to cope with different samples formats (for backends that + * don't demand a single fixed sample format for the lifetime of the server). + */ + int (*activate)(void); + + /** @brief Play sound + * @param frames Number of frames to play + * @return Number of frames actually played + */ + size_t (*play)(size_t frames); + + /** @brief Deactivation + * + * Called to deactivate the sound device. This is the inverse of + * @c activate above. + */ + void (*deactivate)(void); +}; + +/** @brief Selected backend */ +static const struct speaker_backend *backend; static const struct option options[] = { { "help", no_argument, 0, 'h' }, @@ -116,12 +269,12 @@ static void version(void) { exit(0); } -/* Return the number of bytes per frame in FORMAT. */ +/** @brief Return the number of bytes per frame in @p format */ static size_t bytes_per_frame(const ao_sample_format *format) { return format->channels * format->bits / 8; } -/* Find track ID, maybe creating it if not found. */ +/** @brief Find track @p id, maybe creating it if not found */ static struct track *findtrack(const char *id, int create) { struct track *t; @@ -141,7 +294,7 @@ static struct track *findtrack(const char *id, int create) { return t; } -/* Remove track ID (but do not destroy it). */ +/** @brief Remove track @p id (but do not destroy it) */ static struct track *removetrack(const char *id) { struct track *t, **tt; @@ -153,7 +306,7 @@ static struct track *removetrack(const char *id) { return t; } -/* Destroy a track. */ +/** @brief Destroy a track */ static void destroy(struct track *t) { D(("destroy %s", t->id)); if(t->fd != -1) xclose(t->fd); @@ -161,7 +314,7 @@ static void destroy(struct track *t) { free(t); } -/* Notice a new FD. */ +/** @brief Notice a new connection */ static void acquire(struct track *t, int fd) { D(("acquire %s %d", t->id, fd)); if(t->fd != -1) @@ -170,7 +323,102 @@ static void acquire(struct track *t, int fd) { nonblock(fd); } -/* Read data into a sample buffer. Return 0 on success, -1 on EOF. */ +/** @brief Return true if A and B denote identical libao formats, else false */ +static int formats_equal(const ao_sample_format *a, + const ao_sample_format *b) { + return (a->bits == b->bits + && a->rate == b->rate + && a->channels == b->channels + && a->byte_format == b->byte_format); +} + +/** @brief Compute arguments to sox */ +static void soxargs(const char ***pp, char **qq, ao_sample_format *ao) { + int n; + + *(*pp)++ = "-t.raw"; + *(*pp)++ = "-s"; + *(*pp)++ = *qq; n = sprintf(*qq, "-r%d", ao->rate); *qq += n + 1; + *(*pp)++ = *qq; n = sprintf(*qq, "-c%d", ao->channels); *qq += n + 1; + /* sox 12.17.9 insists on -b etc; CVS sox insists on - etc; both are + * deployed! */ + switch(config->sox_generation) { + case 0: + if(ao->bits != 8 + && ao->byte_format != AO_FMT_NATIVE + && ao->byte_format != MACHINE_AO_FMT) { + *(*pp)++ = "-x"; + } + switch(ao->bits) { + case 8: *(*pp)++ = "-b"; break; + case 16: *(*pp)++ = "-w"; break; + case 32: *(*pp)++ = "-l"; break; + case 64: *(*pp)++ = "-d"; break; + default: fatal(0, "cannot handle sample size %d", (int)ao->bits); + } + break; + case 1: + switch(ao->byte_format) { + case AO_FMT_NATIVE: break; + case AO_FMT_BIG: *(*pp)++ = "-B"; break; + case AO_FMT_LITTLE: *(*pp)++ = "-L"; break; + } + *(*pp)++ = *qq; n = sprintf(*qq, "-%d", ao->bits/8); *qq += n + 1; + break; + } +} + +/** @brief Enable format translation + * + * If necessary, replaces a tracks inbound file descriptor with one connected + * to a sox invocation, which performs the required translation. + */ +static void enable_translation(struct track *t) { + if((backend->flags & FIXED_FORMAT) + && !formats_equal(&t->format, &config->sample_format)) { + char argbuf[1024], *q = argbuf; + const char *av[18], **pp = av; + int soxpipe[2]; + pid_t soxkid; + + *pp++ = "sox"; + soxargs(&pp, &q, &t->format); + *pp++ = "-"; + soxargs(&pp, &q, &config->sample_format); + *pp++ = "-"; + *pp++ = 0; + if(debugging) { + for(pp = av; *pp; pp++) + D(("sox arg[%d] = %s", pp - av, *pp)); + D(("end args")); + } + xpipe(soxpipe); + soxkid = xfork(); + if(soxkid == 0) { + signal(SIGPIPE, SIG_DFL); + xdup2(t->fd, 0); + xdup2(soxpipe[1], 1); + fcntl(0, F_SETFL, fcntl(0, F_GETFL) & ~O_NONBLOCK); + close(soxpipe[0]); + close(soxpipe[1]); + close(t->fd); + execvp("sox", (char **)av); + _exit(1); + } + D(("forking sox for format conversion (kid = %d)", soxkid)); + close(t->fd); + close(soxpipe[1]); + t->fd = soxpipe[0]; + t->format = config->sample_format; + } +} + +/** @brief Read data into a sample buffer + * @param t Pointer to track + * @return 0 on success, -1 on EOF + * + * This is effectively the read callback on @c t->fd. + */ static int fill(struct track *t) { size_t where, left; int n; @@ -206,6 +454,8 @@ static int fill(struct track *t) { /* Check that our assumptions are met. */ if(t->format.bits & 7) fatal(0, "bits per sample not a multiple of 8"); + /* If the input format is unsuitable, arrange to translate it */ + enable_translation(t); /* Make a new buffer for audio data. */ t->size = bytes_per_frame(&t->format) * t->format.rate * BUFFER_SECONDS; t->buffer = xmalloc(t->size); @@ -217,34 +467,16 @@ static int fill(struct track *t) { return 0; } -/* Return true if A and B denote identical libao formats, else false. */ -static int formats_equal(const ao_sample_format *a, - const ao_sample_format *b) { - return (a->bits == b->bits - && a->rate == b->rate - && a->channels == b->channels - && a->byte_format == b->byte_format); -} - -/* Close the sound device. */ +/** @brief Close the sound device */ static void idle(void) { - int err; - D(("idle")); - if(pcm) { - if((err = snd_pcm_nonblock(pcm, 0)) < 0) - fatal(0, "error calling snd_pcm_nonblock: %d", err); - D(("draining pcm")); - snd_pcm_drain(pcm); - D(("closing pcm")); - snd_pcm_close(pcm); - pcm = 0; - forceplay = 0; - D(("released audio device")); - } + if(backend->deactivate) + backend->deactivate(); + idled = 1; + ready = 0; } -/* Abandon the current track */ +/** @brief Abandon the current track */ static void abandon(void) { struct speaker_message sm; @@ -259,24 +491,181 @@ static void abandon(void) { forceplay = 0; } -/* Make sure the sound device is open and has the right sample format. Return - * 0 on success and -1 on error. */ -static int activate(void) { - int err; - snd_pcm_hw_params_t *hwparams; - snd_pcm_sw_params_t *swparams; - int sample_format = 0; - unsigned rate; +#if API_ALSA +/** @brief Log ALSA parameters */ +static void log_params(snd_pcm_hw_params_t *hwparams, + snd_pcm_sw_params_t *swparams) { + snd_pcm_uframes_t f; + unsigned u; + return; /* too verbose */ + if(hwparams) { + /* TODO */ + } + if(swparams) { + snd_pcm_sw_params_get_silence_size(swparams, &f); + info("sw silence_size=%lu", (unsigned long)f); + snd_pcm_sw_params_get_silence_threshold(swparams, &f); + info("sw silence_threshold=%lu", (unsigned long)f); + snd_pcm_sw_params_get_sleep_min(swparams, &u); + info("sw sleep_min=%lu", (unsigned long)u); + snd_pcm_sw_params_get_start_threshold(swparams, &f); + info("sw start_threshold=%lu", (unsigned long)f); + snd_pcm_sw_params_get_stop_threshold(swparams, &f); + info("sw stop_threshold=%lu", (unsigned long)f); + snd_pcm_sw_params_get_xfer_align(swparams, &f); + info("sw xfer_align=%lu", (unsigned long)f); + } +} +#endif + +/** @brief Enable sound output + * + * Makes sure the sound device is open and has the right sample format. Return + * 0 on success and -1 on error. + */ +static int activate(void) { /* If we don't know the format yet we cannot start. */ if(!playing->got_format) { D((" - not got format for %s", playing->id)); return -1; } + return backend->activate(); +} + +/* Check to see whether the current track has finished playing */ +static void maybe_finished(void) { + if(playing + && playing->eof + && (!playing->got_format + || playing->used < bytes_per_frame(&playing->format))) + abandon(); +} + +static void fork_cmd(void) { + pid_t cmdpid; + int pfd[2]; + if(cmdfd != -1) close(cmdfd); + xpipe(pfd); + cmdpid = xfork(); + if(!cmdpid) { + signal(SIGPIPE, SIG_DFL); + xdup2(pfd[0], 0); + close(pfd[0]); + close(pfd[1]); + execl("/bin/sh", "sh", "-c", config->speaker_command, (char *)0); + fatal(errno, "error execing /bin/sh"); + } + close(pfd[0]); + cmdfd = pfd[1]; + D(("forked cmd %d, fd = %d", cmdpid, cmdfd)); +} + +static void play(size_t frames) { + size_t avail_frames, avail_bytes, written_frames; + ssize_t written_bytes; + + /* Make sure the output device is activated */ + if(activate()) { + if(playing) + forceplay = frames; + else + forceplay = 0; /* Must have called abandon() */ + return; + } + D(("play: play %zu/%zu%s %dHz %db %dc", frames, playing->used / bpf, + playing->eof ? " EOF" : "", + playing->format.rate, + playing->format.bits, + playing->format.channels)); + /* If we haven't got enough bytes yet wait until we have. Exception: when + * we are at eof. */ + if(playing->used < frames * bpf && !playing->eof) { + forceplay = frames; + return; + } + /* We have got enough data so don't force play again */ + forceplay = 0; + /* Figure out how many frames there are available to write */ + if(playing->start + playing->used > playing->size) + /* The ring buffer is currently wrapped, only play up to the wrap point */ + avail_bytes = playing->size - playing->start; + else + /* The ring buffer is not wrapped, can play the lot */ + avail_bytes = playing->used; + avail_frames = avail_bytes / bpf; + /* Only play up to the requested amount */ + if(avail_frames > frames) + avail_frames = frames; + if(!avail_frames) + return; + /* Play it, Sam */ + written_frames = backend->play(avail_frames); + written_bytes = written_frames * bpf; + /* written_bytes and written_frames had better both be set and correct by + * this point */ + playing->start += written_bytes; + playing->used -= written_bytes; + playing->played += written_frames; + /* If the pointer is at the end of the buffer (or the buffer is completely + * empty) wrap it back to the start. */ + if(!playing->used || playing->start == playing->size) + playing->start = 0; + frames -= written_frames; +} + +/* Notify the server what we're up to. */ +static void report(void) { + struct speaker_message sm; + + if(playing && playing->buffer != (void *)&playing->format) { + memset(&sm, 0, sizeof sm); + sm.type = paused ? SM_PAUSED : SM_PLAYING; + strcpy(sm.id, playing->id); + sm.data = playing->played / playing->format.rate; + speaker_send(1, &sm, 0); + } + time(&last_report); +} + +static void reap(int __attribute__((unused)) sig) { + pid_t cmdpid; + int st; + + do + cmdpid = waitpid(-1, &st, WNOHANG); + while(cmdpid > 0); + signal(SIGCHLD, reap); +} + +static int addfd(int fd, int events) { + if(fdno < NFDS) { + fds[fdno].fd = fd; + fds[fdno].events = events; + return fdno++; + } else + return -1; +} + +#if API_ALSA +/** @brief ALSA backend initialization */ +static void alsa_init(void) { + info("selected ALSA backend"); +} + +/** @brief ALSA backend activation */ +static int alsa_activate(void) { /* If we need to change format then close the current device. */ if(pcm && !formats_equal(&playing->format, &pcm_format)) - idle(); + idle(); if(!pcm) { + snd_pcm_hw_params_t *hwparams; + snd_pcm_sw_params_t *swparams; + snd_pcm_uframes_t pcm_bufsize; + int err; + int sample_format = 0; + unsigned rate; + D(("snd_pcm_open")); if((err = snd_pcm_open(&pcm, config->device, @@ -329,14 +718,16 @@ static int activate(void) { playing->format.channels, err); goto fatal; } - pcm_bufsize = 3 * FRAMES; + bufsize = 3 * FRAMES; + pcm_bufsize = bufsize; if((err = snd_pcm_hw_params_set_buffer_size_near(pcm, hwparams, &pcm_bufsize)) < 0) fatal(0, "error from snd_pcm_hw_params_set_buffer_size (%d): %d", 3 * FRAMES, err); - if(pcm_bufsize != 3 * FRAMES) + if(pcm_bufsize != 3 * FRAMES && pcm_bufsize != last_pcm_bufsize) info("asked for PCM buffer of %d frames, got %d", 3 * FRAMES, (int)pcm_bufsize); + last_pcm_bufsize = pcm_bufsize; if((err = snd_pcm_hw_params(pcm, hwparams)) < 0) fatal(0, "error calling snd_pcm_hw_params: %d", err); D(("set up sw params")); @@ -351,6 +742,8 @@ static int activate(void) { pcm_format = playing->format; bpf = bytes_per_frame(&pcm_format); D(("acquired audio device")); + log_params(hwparams, swparams); + ready = 1; } return 0; fatal: @@ -364,110 +757,312 @@ error: return -1; } -/* Check to see whether the current track has finished playing */ -static void maybe_finished(void) { - if(playing - && playing->eof - && (!playing->got_format - || playing->used < bytes_per_frame(&playing->format))) - abandon(); -} - -static void play(size_t frames) { - snd_pcm_sframes_t written_frames; - size_t avail_bytes, avail_frames, written_bytes; +/** @brief Play via ALSA */ +static size_t alsa_play(size_t frames) { + snd_pcm_sframes_t pcm_written_frames; int err; - - if(activate()) { - if(playing) - forceplay = frames; - else - forceplay = 0; /* Must have called abandon() */ - return; - } - D(("play: play %zu/%zu%s %dHz %db %dc", frames, playing->used / bpf, - playing->eof ? " EOF" : "", - playing->format.rate, - playing->format.bits, - playing->format.channels)); - /* If we haven't got enough bytes yet wait until we have. Exception: when - * we are at eof. */ - if(playing->used < frames * bpf && !playing->eof) { - forceplay = frames; - return; - } - /* We have got enough data so don't force play again */ - forceplay = 0; - /* Figure out how many frames there are available to write */ - if(playing->start + playing->used > playing->size) - avail_bytes = playing->size - playing->start; - else - avail_bytes = playing->used; - avail_frames = avail_bytes / bpf; - if(avail_frames > frames) - avail_frames = frames; - if(!avail_frames) - return; - written_frames = snd_pcm_writei(pcm, - playing->buffer + playing->start, - avail_frames); + + pcm_written_frames = snd_pcm_writei(pcm, + playing->buffer + playing->start, + frames); D(("actually play %zu frames, wrote %d", - avail_frames, (int)written_frames)); - if(written_frames < 0) { - switch(written_frames) { + frames, (int)pcm_written_frames)); + if(pcm_written_frames < 0) { + switch(pcm_written_frames) { case -EPIPE: /* underrun */ error(0, "snd_pcm_writei reports underrun"); if((err = snd_pcm_prepare(pcm)) < 0) fatal(0, "error calling snd_pcm_prepare: %d", err); - return; + return 0; case -EAGAIN: - return; + return 0; + default: + fatal(0, "error calling snd_pcm_writei: %d", + (int)pcm_written_frames); + } + } else + return pcm_written_frames; +} + +/** @brief ALSA deactivation */ +static void alsa_deactivate(void) { + if(pcm) { + int err; + + if((err = snd_pcm_nonblock(pcm, 0)) < 0) + fatal(0, "error calling snd_pcm_nonblock: %d", err); + D(("draining pcm")); + snd_pcm_drain(pcm); + D(("closing pcm")); + snd_pcm_close(pcm); + pcm = 0; + forceplay = 0; + D(("released audio device")); + } +} +#endif + +/** @brief Command backend initialization */ +static void command_init(void) { + info("selected command backend"); + fork_cmd(); +} + +/** @brief Play to a subprocess */ +static size_t command_play(size_t frames) { + size_t bytes = frames * bpf; + int written_bytes; + + written_bytes = write(cmdfd, playing->buffer + playing->start, bytes); + D(("actually play %zu bytes, wrote %d", + bytes, written_bytes)); + if(written_bytes < 0) { + switch(errno) { + case EPIPE: + error(0, "hmm, command died; trying another"); + fork_cmd(); + return 0; + case EAGAIN: + return 0; default: - fatal(0, "error calling snd_pcm_writei: %d", (int)written_frames); + fatal(errno, "error writing to subprocess"); } + } else + return written_bytes / bpf; +} + +/** @brief Command/network backend activation */ +static int generic_activate(void) { + if(!ready) { + bufsize = 3 * FRAMES; + bpf = bytes_per_frame(&config->sample_format); + D(("acquired audio device")); + ready = 1; } - written_bytes = written_frames * bpf; - playing->start += written_bytes; - playing->used -= written_bytes; - playing->played += written_frames; - /* If the pointer is at the end of the buffer (or the buffer is completely - * empty) wrap it back to the start. */ - if(!playing->used || playing->start == playing->size) - playing->start = 0; - frames -= written_frames; + return 0; } -/* Notify the server what we're up to. */ -static void report(void) { - struct speaker_message sm; +/** @brief Network backend initialization */ +static void network_init(void) { + struct addrinfo *res, *sres; + static const struct addrinfo pref = { + 0, + PF_INET, + SOCK_DGRAM, + IPPROTO_UDP, + 0, + 0, + 0, + 0 + }; + static const struct addrinfo prefbind = { + AI_PASSIVE, + PF_INET, + SOCK_DGRAM, + IPPROTO_UDP, + 0, + 0, + 0, + 0 + }; + static const int one = 1; + int sndbuf, target_sndbuf = 131072; + socklen_t len; + char *sockname, *ssockname; - if(playing && playing->buffer != (void *)&playing->format) { - memset(&sm, 0, sizeof sm); - sm.type = paused ? SM_PAUSED : SM_PLAYING; - strcpy(sm.id, playing->id); - sm.data = playing->played / playing->format.rate; - speaker_send(1, &sm, 0); + res = get_address(&config->broadcast, &pref, &sockname); + if(!res) exit(-1); + if(config->broadcast_from.n) { + sres = get_address(&config->broadcast_from, &prefbind, &ssockname); + if(!sres) exit(-1); + } else + sres = 0; + if((bfd = socket(res->ai_family, + res->ai_socktype, + res->ai_protocol)) < 0) + fatal(errno, "error creating broadcast socket"); + if(setsockopt(bfd, SOL_SOCKET, SO_BROADCAST, &one, sizeof one) < 0) + fatal(errno, "error setting SO_BROADCAST on broadcast socket"); + len = sizeof sndbuf; + if(getsockopt(bfd, SOL_SOCKET, SO_SNDBUF, + &sndbuf, &len) < 0) + fatal(errno, "error getting SO_SNDBUF"); + if(target_sndbuf > sndbuf) { + if(setsockopt(bfd, SOL_SOCKET, SO_SNDBUF, + &target_sndbuf, sizeof target_sndbuf) < 0) + error(errno, "error setting SO_SNDBUF to %d", target_sndbuf); + else + info("changed socket send buffer size from %d to %d", + sndbuf, target_sndbuf); + } else + info("default socket send buffer is %d", + sndbuf); + /* We might well want to set additional broadcast- or multicast-related + * options here */ + if(sres && bind(bfd, sres->ai_addr, sres->ai_addrlen) < 0) + fatal(errno, "error binding broadcast socket to %s", ssockname); + if(connect(bfd, res->ai_addr, res->ai_addrlen) < 0) + fatal(errno, "error connecting broadcast socket to %s", sockname); + /* Select an SSRC */ + gcry_randomize(&rtp_id, sizeof rtp_id, GCRY_STRONG_RANDOM); + info("selected network backend, sending to %s", sockname); + if(config->sample_format.byte_format != AO_FMT_BIG) { + info("forcing big-endian sample format"); + config->sample_format.byte_format = AO_FMT_BIG; } - time(&last_report); } -static int addfd(int fd, int events) { - if(fdno < NFDS) { - fds[fdno].fd = fd; - fds[fdno].events = events; - return fdno++; +/** @brief Play over the network */ +static size_t network_play(size_t frames) { + struct rtp_header header; + struct iovec vec[2]; + size_t bytes = frames * bpf, written_frames; + int written_bytes; + /* We transmit using RTP (RFC3550) and attempt to conform to the internet + * AVT profile (RFC3551). */ + + if(idled) { + /* There may have been a gap. Fix up the RTP time accordingly. */ + struct timeval now; + uint64_t delta; + uint64_t target_rtp_time; + + /* Find the current time */ + xgettimeofday(&now, 0); + /* Find the number of microseconds elapsed since rtp_time=0 */ + delta = tvsub_us(now, rtp_time_0); + assert(delta <= UINT64_MAX / 88200); + target_rtp_time = (delta * playing->format.rate + * playing->format.channels) / 1000000; + /* Overflows at ~6 years uptime with 44100Hz stereo */ + + /* rtp_time is the number of samples we've played. NB that we play + * RTP_AHEAD_MS ahead of ourselves, so it may legitimately be ahead of + * the value we deduce from time comparison. + * + * Suppose we have 1s track started at t=0, and another track begins to + * play at t=2s. Suppose RTP_AHEAD_MS=1000 and 44100Hz stereo. In that + * case we'll send 1s of audio as fast as we can, giving rtp_time=88200. + * rtp_time stops at this point. + * + * At t=2s we'll have calculated target_rtp_time=176400. In this case we + * set rtp_time=176400 and the player can correctly conclude that it + * should leave 1s between the tracks. + * + * Suppose instead that the second track arrives at t=0.5s, and that + * we've managed to transmit the whole of the first track already. We'll + * have target_rtp_time=44100. + * + * The desired behaviour is to play the second track back to back with + * first. In this case therefore we do not modify rtp_time. + * + * Is it ever right to reduce rtp_time? No; for that would imply + * transmitting packets with overlapping timestamp ranges, which does not + * make sense. + */ + if(target_rtp_time > rtp_time) { + /* More time has elapsed than we've transmitted samples. That implies + * we've been 'sending' silence. */ + info("advancing rtp_time by %"PRIu64" samples", + target_rtp_time - rtp_time); + rtp_time = target_rtp_time; + } else if(target_rtp_time < rtp_time) { + const int64_t samples_ahead = ((uint64_t)RTP_AHEAD_MS + * config->sample_format.rate + * config->sample_format.channels + / 1000); + + if(target_rtp_time + samples_ahead < rtp_time) { + info("reversing rtp_time by %"PRIu64" samples", + rtp_time - target_rtp_time); + } + } + } + header.vpxcc = 2 << 6; /* V=2, P=0, X=0, CC=0 */ + header.seq = htons(rtp_seq++); + header.timestamp = htonl((uint32_t)rtp_time); + header.ssrc = rtp_id; + header.mpt = (idled ? 0x80 : 0x00) | 10; + /* 10 = L16 = 16-bit x 2 x 44100KHz. We ought to deduce this value from + * the sample rate (in a library somewhere so that configuration.c can rule + * out invalid rates). + */ + idled = 0; + if(bytes > NETWORK_BYTES - sizeof header) { + bytes = NETWORK_BYTES - sizeof header; + /* Always send a whole number of frames */ + bytes -= bytes % bpf; + } + /* "The RTP clock rate used for generating the RTP timestamp is independent + * of the number of channels and the encoding; it equals the number of + * sampling periods per second. For N-channel encodings, each sampling + * period (say, 1/8000 of a second) generates N samples. (This terminology + * is standard, but somewhat confusing, as the total number of samples + * generated per second is then the sampling rate times the channel + * count.)" + */ + vec[0].iov_base = (void *)&header; + vec[0].iov_len = sizeof header; + vec[1].iov_base = playing->buffer + playing->start; + vec[1].iov_len = bytes; + do { + written_bytes = writev(bfd, vec, 2); + } while(written_bytes < 0 && errno == EINTR); + if(written_bytes < 0) { + error(errno, "error transmitting audio data"); + ++audio_errors; + if(audio_errors == 10) + fatal(0, "too many audio errors"); + return 0; } else - return -1; + audio_errors /= 2; + written_bytes -= sizeof (struct rtp_header); + written_frames = written_bytes / bpf; + /* Advance RTP's notion of the time */ + rtp_time += written_frames * playing->format.channels; + return written_frames; } +/** @brief Table of speaker backends */ +static const struct speaker_backend backends[] = { +#if API_ALSA + { + BACKEND_ALSA, + 0, + alsa_init, + alsa_activate, + alsa_play, + alsa_deactivate + }, +#endif + { + BACKEND_COMMAND, + FIXED_FORMAT, + command_init, + generic_activate, + command_play, + 0 /* deactivate */ + }, + { + BACKEND_NETWORK, + FIXED_FORMAT, + network_init, + generic_activate, + network_play, + 0 /* deactivate */ + }, + { -1, 0, 0, 0, 0, 0 } +}; + int main(int argc, char **argv) { - int n, fd, stdin_slot, alsa_slots, alsa_nslots = -1, err; - unsigned short alsa_revents; + int n, fd, stdin_slot, alsa_slots, cmdfd_slot, bfd_slot, poke, timeout; struct track *t; struct speaker_message sm; +#if API_ALSA + int alsa_nslots = -1, err; +#endif set_progname(argv); - mem_init(0); if(!setlocale(LC_CTYPE, "")) fatal(errno, "error calling setlocale"); while((n = getopt_long(argc, argv, "hVc:dD", options, 0)) >= 0) { switch(n) { @@ -488,13 +1083,23 @@ int main(int argc, char **argv) { if(config_read()) fatal(0, "cannot read configuration"); /* ignore SIGPIPE */ signal(SIGPIPE, SIG_IGN); + /* reap kids */ + signal(SIGCHLD, reap); /* set nice value */ xnice(config->nice_speaker); /* change user */ become_mortal(); /* make sure we're not root, whatever the config says */ if(getuid() == 0 || geteuid() == 0) fatal(0, "do not run as root"); - info("started"); + /* identify the backend used to play */ + for(n = 0; backends[n].backend != -1; ++n) + if(backends[n].backend == config->speaker_backend) + break; + if(backends[n].backend == -1) + fatal(0, "unsupported backend %d", config->speaker_backend); + backend = &backends[n]; + /* backend-specific initialization */ + backend->init(); while(getppid() != 1) { fdno = 0; /* Always ready for commands from the main server. */ @@ -507,37 +1112,135 @@ int main(int argc, char **argv) { playing->slot = -1; /* If forceplay is set then wait until it succeeds before waiting on the * sound device. */ - if(pcm && !forceplay) { - alsa_slots = fdno; - alsa_nslots = snd_pcm_poll_descriptors(pcm, &fds[fdno], NFDS - fdno); - fdno += alsa_nslots; - } else - alsa_slots = -1; + alsa_slots = -1; + cmdfd_slot = -1; + bfd_slot = -1; + /* By default we will wait up to a second before thinking about current + * state. */ + timeout = 1000; + if(ready && !forceplay) { + switch(config->speaker_backend) { + case BACKEND_COMMAND: + /* We send sample data to the subprocess as fast as it can accept it. + * This isn't ideal as pause latency can be very high as a result. */ + if(cmdfd >= 0) + cmdfd_slot = addfd(cmdfd, POLLOUT); + break; + case BACKEND_NETWORK: { + struct timeval now; + uint64_t target_us; + uint64_t target_rtp_time; + const int64_t samples_ahead = ((uint64_t)RTP_AHEAD_MS + * config->sample_format.rate + * config->sample_format.channels + / 1000); +#if 0 + static unsigned logit; +#endif + + /* If we're starting then initialize the base time */ + if(!rtp_time) + xgettimeofday(&rtp_time_0, 0); + /* We send audio data whenever we get RTP_AHEAD seconds or more + * behind */ + xgettimeofday(&now, 0); + target_us = tvsub_us(now, rtp_time_0); + assert(target_us <= UINT64_MAX / 88200); + target_rtp_time = (target_us * config->sample_format.rate + * config->sample_format.channels) + + / 1000000; +#if 0 + /* TODO remove logging guff */ + if(!(logit++ & 1023)) + info("rtp_time %llu target %llu difference %lld [%lld]", + rtp_time, target_rtp_time, + rtp_time - target_rtp_time, + samples_ahead); +#endif + if((int64_t)(rtp_time - target_rtp_time) < samples_ahead) + bfd_slot = addfd(bfd, POLLOUT); + break; + } +#if API_ALSA + case BACKEND_ALSA: { + /* We send sample data to ALSA as fast as it can accept it, relying on + * the fact that it has a relatively small buffer to minimize pause + * latency. */ + int retry = 3; + + alsa_slots = fdno; + do { + retry = 0; + alsa_nslots = snd_pcm_poll_descriptors(pcm, &fds[fdno], NFDS - fdno); + if((alsa_nslots <= 0 + || !(fds[alsa_slots].events & POLLOUT)) + && snd_pcm_state(pcm) == SND_PCM_STATE_XRUN) { + error(0, "underrun detected after call to snd_pcm_poll_descriptors()"); + if((err = snd_pcm_prepare(pcm))) + fatal(0, "error calling snd_pcm_prepare: %d", err); + } else + break; + } while(retry-- > 0); + if(alsa_nslots >= 0) + fdno += alsa_nslots; + break; + } +#endif + default: + assert(!"unknown backend"); + } + } /* If any other tracks don't have a full buffer, try to read sample data * from them. */ for(t = tracks; t; t = t->next) if(t != playing) { if(!t->eof && t->used < t->size) { - t->slot = addfd(t->fd, POLLIN); + t->slot = addfd(t->fd, POLLIN | POLLHUP); } else t->slot = -1; } - /* Wait up to a second before thinking about current state */ - n = poll(fds, fdno, 1000); + /* Wait for something interesting to happen */ + n = poll(fds, fdno, timeout); if(n < 0) { if(errno == EINTR) continue; fatal(errno, "error calling poll"); } /* Play some sound before doing anything else */ - if(alsa_slots != -1) { - if((err = snd_pcm_poll_descriptors_revents(pcm, - &fds[alsa_slots], - alsa_nslots, - &alsa_revents)) < 0) - fatal(0, "error calling snd_pcm_poll_descriptors_revents: %d", err); - if(alsa_revents & POLLOUT) - play(3 * FRAMES); - } else { + poke = 0; + switch(config->speaker_backend) { +#if API_ALSA + case BACKEND_ALSA: + if(alsa_slots != -1) { + unsigned short alsa_revents; + + if((err = snd_pcm_poll_descriptors_revents(pcm, + &fds[alsa_slots], + alsa_nslots, + &alsa_revents)) < 0) + fatal(0, "error calling snd_pcm_poll_descriptors_revents: %d", err); + if(alsa_revents & (POLLOUT | POLLERR)) + play(3 * FRAMES); + } else + poke = 1; + break; +#endif + case BACKEND_COMMAND: + if(cmdfd_slot != -1) { + if(fds[cmdfd_slot].revents & (POLLOUT | POLLERR)) + play(3 * FRAMES); + } else + poke = 1; + break; + case BACKEND_NETWORK: + if(bfd_slot != -1) { + if(fds[bfd_slot].revents & (POLLOUT | POLLERR)) + play(3 * FRAMES); + } else + poke = 1; + break; + } + if(poke) { /* Some attempt to play must have failed */ if(playing && !paused) play(forceplay); @@ -561,7 +1264,7 @@ int main(int argc, char **argv) { t = findtrack(sm.id, 1); if(fd != -1) acquire(t, fd); playing = t; - play(pcm_bufsize); + play(bufsize); report(); break; case SM_PAUSE: @@ -574,7 +1277,7 @@ int main(int argc, char **argv) { if(paused) { paused = 0; if(playing) - play(pcm_bufsize); + play(bufsize); } report(); break; @@ -604,16 +1307,16 @@ int main(int argc, char **argv) { } /* Read in any buffered data */ for(t = tracks; t; t = t->next) - if(t->slot != -1 && (fds[t->slot].revents & POLLIN)) + if(t->slot != -1 && (fds[t->slot].revents & (POLLIN | POLLHUP))) fill(t); /* We might be able to play now */ - if(pcm && forceplay && playing && !paused) + if(ready && forceplay && playing && !paused) play(forceplay); /* Maybe we finished playing a track somewhere in the above */ maybe_finished(); /* If we don't need the sound device for now then close it for the benefit * of anyone else who wants it. */ - if((!playing || paused) && pcm) + if((!playing || paused) && ready) idle(); /* If we've not reported out state for a second do so now. */ if(time(0) > last_report) @@ -631,4 +1334,3 @@ fill-column:79 indent-tabs-mode:nil End: */ -/* arch-tag:HQ4ayCGCjeBF97RuRnvcyg */