1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright (C) 2014 David Herrmann <dh.herrmann@gmail.com>
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
23 #include <libevdev/libevdev.h>
27 #include <systemd/sd-bus.h>
28 #include <systemd/sd-event.h>
31 #include "idev-internal.h"
35 typedef struct idev_evdev idev_evdev;
36 typedef struct unmanaged_evdev unmanaged_evdev;
37 typedef struct managed_evdev managed_evdev;
41 struct libevdev *evdev;
43 sd_event_source *fd_src;
44 sd_event_source *idle_src;
46 bool unsync : 1; /* not in-sync with kernel */
47 bool resync : 1; /* re-syncing with kernel */
51 struct unmanaged_evdev {
56 struct managed_evdev {
59 sd_bus_slot *slot_take_device;
61 bool requested : 1; /* TakeDevice() was sent */
62 bool acquired : 1; /* TakeDevice() was successful */
65 #define idev_evdev_from_element(_e) container_of((_e), idev_evdev, element)
66 #define unmanaged_evdev_from_element(_e) \
67 container_of(idev_evdev_from_element(_e), unmanaged_evdev, evdev)
68 #define managed_evdev_from_element(_e) \
69 container_of(idev_evdev_from_element(_e), managed_evdev, evdev)
71 #define IDEV_EVDEV_INIT(_vtable, _session) ((idev_evdev){ \
72 .element = IDEV_ELEMENT_INIT((_vtable), (_session)), \
76 #define IDEV_EVDEV_NAME_MAX (8 + DECIMAL_STR_MAX(unsigned) * 2)
78 static const idev_element_vtable unmanaged_evdev_vtable;
79 static const idev_element_vtable managed_evdev_vtable;
81 static int idev_evdev_resume(idev_evdev *evdev, int dev_fd);
82 static void idev_evdev_pause(idev_evdev *evdev, bool release);
85 * Virtual Evdev Element
86 * The virtual evdev element is the base class of all other evdev elements. It
87 * uses libevdev to access the kernel evdev API. It supports asynchronous
88 * access revocation, re-syncing if events got dropped and more.
89 * This element cannot be used by itself. There must be a wrapper around it
90 * which opens a file-descriptor and passes it to the virtual evdev element.
93 static void idev_evdev_name(char *out, dev_t devnum) {
94 /* @out must be at least of size IDEV_EVDEV_NAME_MAX */
95 sprintf(out, "evdev/%u:%u", major(devnum), minor(devnum));
98 static int idev_evdev_feed_resync(idev_evdev *evdev) {
100 .type = IDEV_DATA_RESYNC,
101 .resync = evdev->resync,
104 return idev_element_feed(&evdev->element, &data);
107 static int idev_evdev_feed_evdev(idev_evdev *evdev, struct input_event *event) {
109 .type = IDEV_DATA_EVDEV,
110 .resync = evdev->resync,
116 return idev_element_feed(&evdev->element, &data);
119 static void idev_evdev_hup(idev_evdev *evdev) {
121 * On HUP, we close the current fd via idev_evdev_pause(). This drops
122 * the event-sources from the main-loop and effectively puts the
123 * element asleep. If the HUP is part of a hotplug-event, a following
124 * udev-notification will destroy the element. Otherwise, the HUP is
125 * either result of access-revokation or a serious error.
126 * For unmanaged devices, we should never receive HUP (except for
127 * unplug-events). But if we do, something went seriously wrong and we
128 * shouldn't try to be clever.
129 * Instead, we simply stay asleep and wait for the device to be
130 * disabled and then re-enabled (or closed and re-opened). This will
131 * re-open the device node and restart the device.
132 * For managed devices, a HUP usually means our device-access was
133 * revoked. In that case, we simply put the device asleep and wait for
134 * logind to notify us once the device is alive again. logind also
135 * passes us a new fd. Hence, we don't have to re-enable the device.
137 * Long story short: The only thing we have to do here, is close() the
138 * file-descriptor and remove it from the main-loop. Everything else is
139 * handled via additional events we receive.
142 idev_evdev_pause(evdev, true);
145 static int idev_evdev_io(idev_evdev *evdev) {
146 idev_element *e = &evdev->element;
147 struct input_event ev;
152 * Read input-events via libevdev until the input-queue is drained. In
153 * case we're disabled, don't do anything. The input-queue might
154 * overflow, but we don't care as we have to resync after wake-up,
156 * TODO: libevdev should give us a hint how many events to read. We
157 * really want to avoid starvation, so we shouldn't read forever in
158 * case we cannot keep up with the kernel.
159 * TODO: Make sure libevdev always reports SYN_DROPPED to us, regardless
160 * whether any event was synced afterwards.
163 flags = LIBEVDEV_READ_FLAG_NORMAL;
166 /* immediately resync, even if in sync right now */
167 evdev->unsync = false;
168 evdev->resync = false;
169 flags = LIBEVDEV_READ_FLAG_NORMAL;
170 r = libevdev_next_event(evdev->evdev, flags | LIBEVDEV_READ_FLAG_FORCE_SYNC, &ev);
171 if (r < 0 && r != -EAGAIN) {
174 } else if (r != LIBEVDEV_READ_STATUS_SYNC) {
175 log_debug("idev-evdev: %s/%s: cannot force resync: %d",
176 e->session->name, e->name, r);
179 r = libevdev_next_event(evdev->evdev, flags, &ev);
182 if (evdev->resync && r == -EAGAIN) {
184 evdev->resync = false;
185 flags = LIBEVDEV_READ_FLAG_NORMAL;
186 } else if (r == -EAGAIN) {
187 /* no data available */
192 } else if (r == LIBEVDEV_READ_STATUS_SYNC) {
195 r = idev_evdev_feed_evdev(evdev, &ev);
202 evdev->resync = true;
203 flags = LIBEVDEV_READ_FLAG_SYNC;
204 r = idev_evdev_feed_resync(evdev);
212 r = idev_evdev_feed_evdev(evdev, &ev);
221 log_debug_errno(error, "idev-evdev: %s/%s: error on data event: %m",
222 e->session->name, e->name);
226 idev_evdev_hup(evdev);
227 return 0; /* idev_evdev_hup() handles the error so discard it */
230 static int idev_evdev_event_fn(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
231 idev_evdev *evdev = userdata;
233 /* fetch data as long as EPOLLIN is signalled */
234 if (revents & EPOLLIN)
235 return idev_evdev_io(evdev);
237 if (revents & (EPOLLHUP | EPOLLERR))
238 idev_evdev_hup(evdev);
243 static int idev_evdev_idle_fn(sd_event_source *s, void *userdata) {
244 idev_evdev *evdev = userdata;
247 * The idle-event is raised whenever we have to re-sync the libevdev
248 * state from the kernel. We simply call into idev_evdev_io() which
249 * flushes the state and re-syncs it if @unsync is set.
250 * State has to be synced whenever our view of the kernel device is
251 * out of date. This is the case when we open the device, if the
252 * kernel's receive buffer overflows, or on other exceptional
253 * situations. Events during re-syncs must be forwarded to the upper
254 * layers so they can update their view of the device. However, such
255 * events must only be handled passively, as they might be out-of-order
256 * and/or re-ordered. Therefore, we mark them as 'sync' events.
262 return idev_evdev_io(evdev);
265 static void idev_evdev_destroy(idev_evdev *evdev) {
267 assert(evdev->fd < 0);
269 libevdev_free(evdev->evdev);
273 static void idev_evdev_enable(idev_evdev *evdev) {
275 assert(evdev->fd_src);
276 assert(evdev->idle_src);
280 if (evdev->fd < 0 || evdev->element.n_open < 1 || !evdev->element.enabled)
283 evdev->running = true;
284 sd_event_source_set_enabled(evdev->fd_src, SD_EVENT_ON);
285 sd_event_source_set_enabled(evdev->idle_src, SD_EVENT_ONESHOT);
288 static void idev_evdev_disable(idev_evdev *evdev) {
290 assert(evdev->fd_src);
291 assert(evdev->idle_src);
296 evdev->running = false;
297 idev_evdev_feed_resync(evdev);
298 sd_event_source_set_enabled(evdev->fd_src, SD_EVENT_OFF);
299 sd_event_source_set_enabled(evdev->idle_src, SD_EVENT_OFF);
302 static int idev_evdev_resume(idev_evdev *evdev, int dev_fd) {
303 idev_element *e = &evdev->element;
304 _cleanup_close_ int fd = dev_fd;
307 if (fd < 0 || evdev->fd == fd) {
309 idev_evdev_enable(evdev);
313 idev_evdev_pause(evdev, true);
314 log_debug("idev-evdev: %s/%s: resume", e->session->name, e->name);
316 r = fd_nonblock(fd, true);
320 r = fd_cloexec(fd, true);
324 flags = fcntl(fd, F_GETFL, 0);
329 if (flags == O_WRONLY)
332 evdev->element.readable = true;
333 evdev->element.writable = !(flags & O_RDONLY);
336 * TODO: We *MUST* re-sync the device so we get a delta of the changed
337 * state while we didn't read events from the device. This works just
338 * fine with libevdev_change_fd(), however, libevdev_new_from_fd() (or
339 * libevdev_set_fd()) don't pass us events for the initial device
340 * state. So even if we force a re-sync, we will not get the delta for
341 * the initial device state.
342 * We really need to fix libevdev to support that!
345 r = libevdev_change_fd(evdev->evdev, fd);
347 r = libevdev_new_from_fd(fd, &evdev->evdev);
352 r = sd_event_add_io(e->session->context->event,
355 EPOLLHUP | EPOLLERR | EPOLLIN,
361 r = sd_event_add_defer(e->session->context->event,
366 evdev->fd_src = sd_event_source_unref(evdev->fd_src);
370 sd_event_source_set_enabled(evdev->fd_src, SD_EVENT_OFF);
371 sd_event_source_set_enabled(evdev->idle_src, SD_EVENT_OFF);
373 evdev->unsync = true;
377 idev_evdev_enable(evdev);
381 static void idev_evdev_pause(idev_evdev *evdev, bool release) {
382 idev_element *e = &evdev->element;
387 log_debug("idev-evdev: %s/%s: pause", e->session->name, e->name);
389 idev_evdev_disable(evdev);
391 evdev->idle_src = sd_event_source_unref(evdev->idle_src);
392 evdev->fd_src = sd_event_source_unref(evdev->fd_src);
393 evdev->fd = safe_close(evdev->fd);
398 * Unmanaged Evdev Element
399 * The unmanaged evdev element opens the evdev node for a given input device
400 * directly (/dev/input/eventX) and thus needs sufficient privileges. It opens
401 * the device only if we really require it and releases it as soon as we're
402 * disabled or closed.
403 * The unmanaged element can be used in all situations where you have direct
404 * access to input device nodes. Unlike managed evdev elements, it can be used
405 * outside of user sessions and in emergency situations where logind is not
409 static void unmanaged_evdev_resume(idev_element *e) {
410 unmanaged_evdev *eu = unmanaged_evdev_from_element(e);
414 * Unmanaged devices can be acquired on-demand. Therefore, don't
415 * acquire it unless someone opened the device *and* we're enabled.
417 if (e->n_open < 1 || !e->enabled)
422 fd = open(eu->devnode, O_RDWR | O_CLOEXEC | O_NOCTTY | O_NONBLOCK);
424 if (errno != EACCES && errno != EPERM) {
425 log_debug_errno(errno, "idev-evdev: %s/%s: cannot open node %s: %m",
426 e->session->name, e->name, eu->devnode);
430 fd = open(eu->devnode, O_RDONLY | O_CLOEXEC | O_NOCTTY | O_NONBLOCK);
432 log_debug_errno(errno, "idev-evdev: %s/%s: cannot open node %s: %m",
433 e->session->name, e->name, eu->devnode);
445 r = idev_evdev_resume(&eu->evdev, fd);
447 log_debug_errno(r, "idev-evdev: %s/%s: cannot resume: %m",
448 e->session->name, e->name);
451 static void unmanaged_evdev_pause(idev_element *e) {
452 unmanaged_evdev *eu = unmanaged_evdev_from_element(e);
455 * Release the device if the device is disabled or there is no-one who
456 * opened it. This guarantees we stay only available if we're opened
460 idev_evdev_pause(&eu->evdev, true);
463 static int unmanaged_evdev_new(idev_element **out, idev_session *s, struct udev_device *ud) {
464 _cleanup_(idev_element_freep) idev_element *e = NULL;
465 char name[IDEV_EVDEV_NAME_MAX];
471 assert_return(s, -EINVAL);
472 assert_return(ud, -EINVAL);
474 devnode = udev_device_get_devnode(ud);
475 devnum = udev_device_get_devnum(ud);
476 if (!devnode || devnum == 0)
479 idev_evdev_name(name, devnum);
481 eu = new0(unmanaged_evdev, 1);
485 e = &eu->evdev.element;
486 eu->evdev = IDEV_EVDEV_INIT(&unmanaged_evdev_vtable, s);
488 eu->devnode = strdup(devnode);
492 r = idev_element_add(e, name);
502 static void unmanaged_evdev_free(idev_element *e) {
503 unmanaged_evdev *eu = unmanaged_evdev_from_element(e);
505 idev_evdev_destroy(&eu->evdev);
510 static const idev_element_vtable unmanaged_evdev_vtable = {
511 .free = unmanaged_evdev_free,
512 .enable = unmanaged_evdev_resume,
513 .disable = unmanaged_evdev_pause,
514 .open = unmanaged_evdev_resume,
515 .close = unmanaged_evdev_pause,
519 * Managed Evdev Element
520 * The managed evdev element uses systemd-logind to acquire evdev devices. This
521 * means, we do not open the device node /dev/input/eventX directly. Instead,
522 * logind passes us a file-descriptor whenever our session is activated. Thus,
523 * we don't need access to the device node directly.
524 * Furthermore, whenever the session is put asleep, logind revokes the
525 * file-descriptor so we loose access to the device.
526 * Managed evdev elements should be preferred over unmanaged elements whenever
527 * you run inside a user session with exclusive device access.
530 static int managed_evdev_take_device_fn(sd_bus *bus,
531 sd_bus_message *reply,
533 sd_bus_error *ret_error) {
534 managed_evdev *em = userdata;
535 idev_element *e = &em->evdev.element;
536 idev_session *s = e->session;
539 em->slot_take_device = sd_bus_slot_unref(em->slot_take_device);
541 if (sd_bus_message_is_method_error(reply, NULL)) {
542 const sd_bus_error *error = sd_bus_message_get_error(reply);
544 log_debug("idev-evdev: %s/%s: TakeDevice failed: %s: %s",
545 s->name, e->name, error->name, error->message);
551 r = sd_bus_message_read(reply, "hb", &fd, &paused);
553 log_debug("idev-evdev: %s/%s: erroneous TakeDevice reply", s->name, e->name);
557 /* If the device is paused, ignore it; we will get the next fd via
558 * ResumeDevice signals. */
562 fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
564 log_debug_errno(errno, "idev-evdev: %s/%s: cannot duplicate evdev fd: %m", s->name, e->name);
568 r = idev_evdev_resume(&em->evdev, fd);
570 log_debug_errno(r, "idev-evdev: %s/%s: cannot resume: %m",
576 static void managed_evdev_enable(idev_element *e) {
577 _cleanup_bus_message_unref_ sd_bus_message *m = NULL;
578 managed_evdev *em = managed_evdev_from_element(e);
579 idev_session *s = e->session;
580 idev_context *c = s->context;
584 * Acquiring managed devices is heavy, so do it only once we're
585 * enabled *and* opened by someone.
587 if (e->n_open < 1 || !e->enabled)
590 /* bail out if already pending */
594 r = sd_bus_message_new_method_call(c->sysbus,
596 "org.freedesktop.login1",
598 "org.freedesktop.login1.Session",
603 r = sd_bus_message_append(m, "uu", major(em->devnum), minor(em->devnum));
607 r = sd_bus_call_async(c->sysbus,
608 &em->slot_take_device,
610 managed_evdev_take_device_fn,
616 em->requested = true;
620 log_debug_errno(r, "idev-evdev: %s/%s: cannot send TakeDevice request: %m",
624 static void managed_evdev_disable(idev_element *e) {
625 _cleanup_bus_message_unref_ sd_bus_message *m = NULL;
626 managed_evdev *em = managed_evdev_from_element(e);
627 idev_session *s = e->session;
628 idev_context *c = s->context;
632 * Releasing managed devices is heavy. Once acquired, we get
633 * notifications for sleep/wake-up events, so there's no reason to
634 * release it if disabled but opened. However, if a device is closed,
635 * we release it immediately as we don't care for sleep/wake-up events
636 * then (even if we're actually enabled).
639 idev_evdev_pause(&em->evdev, false);
641 if (e->n_open > 0 || !em->requested)
645 * If TakeDevice() is pending or was successful, make sure to
646 * release the device again. We don't care for return-values,
647 * so send it without waiting or callbacks.
648 * If a failed TakeDevice() is pending, but someone else took
649 * the device on the same bus-connection, we might incorrectly
650 * release their device. This is an unlikely race, though.
651 * Furthermore, you really shouldn't have two users of the
652 * controller-API on the same session, on the same devices, *AND* on
653 * the same bus-connection. So we don't care for that race..
656 idev_evdev_pause(&em->evdev, true);
657 em->requested = false;
659 if (!em->acquired && !em->slot_take_device)
662 em->slot_take_device = sd_bus_slot_unref(em->slot_take_device);
663 em->acquired = false;
665 r = sd_bus_message_new_method_call(c->sysbus,
667 "org.freedesktop.login1",
669 "org.freedesktop.login1.Session",
672 r = sd_bus_message_append(m, "uu", major(em->devnum), minor(em->devnum));
674 r = sd_bus_send(c->sysbus, m, NULL);
677 if (r < 0 && r != -ENOTCONN)
678 log_debug_errno(r, "idev-evdev: %s/%s: cannot send ReleaseDevice: %m",
682 static void managed_evdev_resume(idev_element *e, int fd) {
683 managed_evdev *em = managed_evdev_from_element(e);
684 idev_session *s = e->session;
688 * We get ResumeDevice signals whenever logind resumed a previously
689 * paused device. The arguments contain the major/minor number of the
690 * related device and a new file-descriptor for the freshly opened
691 * device-node. We take the file-descriptor and immediately resume the
695 fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
697 log_debug_errno(errno, "idev-evdev: %s/%s: cannot duplicate evdev fd: %m",
702 r = idev_evdev_resume(&em->evdev, fd);
704 log_debug_errno(r, "idev-evdev: %s/%s: cannot resume: %m",
710 static void managed_evdev_pause(idev_element *e, const char *mode) {
711 managed_evdev *em = managed_evdev_from_element(e);
712 idev_session *s = e->session;
713 idev_context *c = s->context;
717 * We get PauseDevice() signals from logind whenever a device we
718 * requested was, or is about to be, paused. Arguments are major/minor
719 * number of the device and the mode of the operation.
720 * We treat it as asynchronous access-revocation (as if we got HUP on
721 * the device fd). Note that we might have already treated the HUP
722 * event via EPOLLHUP, whichever comes first.
724 * @mode can be one of the following:
725 * "pause": The device is about to be paused. We must react
726 * immediately and respond with PauseDeviceComplete(). Once
727 * we replied, logind will pause the device. Note that
728 * logind might apply any kind of timeout and force pause
729 * the device if we don't respond in a timely manner. In
730 * this case, we will receive a second PauseDevice event
731 * with @mode set to "force" (or similar).
732 * "force": The device was disabled forecfully by logind. Access is
733 * already revoked. This is just an asynchronous
734 * notification so we can put the device asleep (in case
735 * we didn't already notice the access revocation).
736 * "gone": This is like "force" but is sent if the device was
737 * paused due to a device-removal event.
739 * We always handle PauseDevice signals as "force" as we properly
740 * support asynchronous access revocation, anyway. But in case logind
741 * sent mode "pause", we also call PauseDeviceComplete() to immediately
742 * acknowledge the request.
745 idev_evdev_pause(&em->evdev, true);
747 if (streq(mode, "pause")) {
748 _cleanup_bus_message_unref_ sd_bus_message *m = NULL;
751 * Sending PauseDeviceComplete() is racy if logind triggers the
752 * timeout. That is, if we take too long and logind pauses the
753 * device by sending a forced PauseDevice, our
754 * PauseDeviceComplete call will be stray. That's fine, though.
755 * logind ignores such stray calls. Only if logind also sent a
756 * further PauseDevice() signal, it might match our call
757 * incorrectly to the newer PauseDevice(). That's fine, too, as
758 * we handle that event asynchronously, anyway. Therefore,
759 * whatever happens, we're fine. Yay!
762 r = sd_bus_message_new_method_call(c->sysbus,
764 "org.freedesktop.login1",
766 "org.freedesktop.login1.Session",
767 "PauseDeviceComplete");
769 r = sd_bus_message_append(m, "uu", major(em->devnum), minor(em->devnum));
771 r = sd_bus_send(c->sysbus, m, NULL);
775 log_debug_errno(r, "idev-evdev: %s/%s: cannot send PauseDeviceComplete: %m",
780 static int managed_evdev_new(idev_element **out, idev_session *s, struct udev_device *ud) {
781 _cleanup_(idev_element_freep) idev_element *e = NULL;
782 char name[IDEV_EVDEV_NAME_MAX];
787 assert_return(s, -EINVAL);
788 assert_return(s->managed, -EINVAL);
789 assert_return(s->context->sysbus, -EINVAL);
790 assert_return(ud, -EINVAL);
792 devnum = udev_device_get_devnum(ud);
796 idev_evdev_name(name, devnum);
798 em = new0(managed_evdev, 1);
802 e = &em->evdev.element;
803 em->evdev = IDEV_EVDEV_INIT(&managed_evdev_vtable, s);
806 r = idev_element_add(e, name);
816 static void managed_evdev_free(idev_element *e) {
817 managed_evdev *em = managed_evdev_from_element(e);
819 idev_evdev_destroy(&em->evdev);
823 static const idev_element_vtable managed_evdev_vtable = {
824 .free = managed_evdev_free,
825 .enable = managed_evdev_enable,
826 .disable = managed_evdev_disable,
827 .open = managed_evdev_enable,
828 .close = managed_evdev_disable,
829 .resume = managed_evdev_resume,
830 .pause = managed_evdev_pause,
834 * Generic Constructor
835 * Instead of relying on the caller to choose between managed and unmanaged
836 * evdev devices, the idev_evdev_new() constructor does that for you (by
837 * looking at s->managed).
840 bool idev_is_evdev(idev_element *e) {
841 return e && (e->vtable == &unmanaged_evdev_vtable ||
842 e->vtable == &managed_evdev_vtable);
845 idev_element *idev_find_evdev(idev_session *s, dev_t devnum) {
846 char name[IDEV_EVDEV_NAME_MAX];
848 assert_return(s, NULL);
849 assert_return(devnum != 0, NULL);
851 idev_evdev_name(name, devnum);
852 return idev_find_element(s, name);
855 int idev_evdev_new(idev_element **out, idev_session *s, struct udev_device *ud) {
856 assert_return(s, -EINVAL);
857 assert_return(ud, -EINVAL);
859 return s->managed ? managed_evdev_new(out, s, ud) : unmanaged_evdev_new(out, s, ud);