1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright (C) 2014 David Herrmann <dh.herrmann@gmail.com>
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
27 #include <sys/ioctl.h>
29 #include <sys/types.h>
30 #include <systemd/sd-bus.h>
31 #include <systemd/sd-event.h>
34 /* Yuck! DRM headers need system headers included first.. but we have to
35 * include it before shared/missing.h to avoid redefining ioctl bits */
37 #include <drm_fourcc.h>
43 #include "grdev-internal.h"
45 #include "udev-util.h"
48 #define GRDRM_MAX_TRIES (16)
50 typedef struct grdrm_object grdrm_object;
51 typedef struct grdrm_plane grdrm_plane;
52 typedef struct grdrm_connector grdrm_connector;
53 typedef struct grdrm_encoder grdrm_encoder;
54 typedef struct grdrm_crtc grdrm_crtc;
56 typedef struct grdrm_fb grdrm_fb;
57 typedef struct grdrm_pipe grdrm_pipe;
58 typedef struct grdrm_card grdrm_card;
59 typedef struct unmanaged_card unmanaged_card;
60 typedef struct managed_card managed_card;
79 void (*free_fn) (grdrm_object *object);
102 struct grdrm_connector {
108 uint32_t used_encoder;
115 uint32_t max_encoders;
119 struct drm_mode_modeinfo *modes;
123 uint64_t *prop_values;
127 struct grdrm_encoder {
148 uint32_t fb_offset_x;
149 uint32_t fb_offset_y;
152 uint32_t n_used_connectors;
153 uint32_t max_used_connectors;
154 uint32_t *used_connectors;
157 struct drm_mode_modeinfo mode;
167 uint32_t n_connectors;
168 uint32_t *connectors;
171 struct drm_mode_modeinfo mode;
175 struct drm_mode_modeinfo mode;
176 uint32_t n_connectors;
177 uint32_t max_connectors;
178 uint32_t *connectors;
186 #define GRDRM_OBJECT_INIT(_card, _id, _index, _type, _free_fn) ((grdrm_object){ \
191 .free_fn = (_free_fn), \
194 grdrm_object *grdrm_find_object(grdrm_card *card, uint32_t id);
195 int grdrm_object_add(grdrm_object *object);
196 grdrm_object *grdrm_object_free(grdrm_object *object);
198 DEFINE_TRIVIAL_CLEANUP_FUNC(grdrm_object*, grdrm_object_free);
200 int grdrm_plane_new(grdrm_plane **out, grdrm_card *card, uint32_t id, uint32_t index);
201 int grdrm_connector_new(grdrm_connector **out, grdrm_card *card, uint32_t id, uint32_t index);
202 int grdrm_encoder_new(grdrm_encoder **out, grdrm_card *card, uint32_t id, uint32_t index);
203 int grdrm_crtc_new(grdrm_crtc **out, grdrm_card *card, uint32_t id, uint32_t index);
205 #define plane_from_object(_obj) container_of((_obj), grdrm_plane, object)
206 #define connector_from_object(_obj) container_of((_obj), grdrm_connector, object)
207 #define encoder_from_object(_obj) container_of((_obj), grdrm_encoder, object)
208 #define crtc_from_object(_obj) container_of((_obj), grdrm_crtc, object)
224 static int grdrm_fb_new(grdrm_fb **out, grdrm_card *card, const struct drm_mode_modeinfo *mode);
225 grdrm_fb *grdrm_fb_free(grdrm_fb *fb);
227 DEFINE_TRIVIAL_CLEANUP_FUNC(grdrm_fb*, grdrm_fb_free);
229 #define fb_from_base(_fb) container_of((_fb), grdrm_fb, base)
241 #define grdrm_pipe_from_base(_e) container_of((_e), grdrm_pipe, base)
243 #define GRDRM_PIPE_NAME_MAX (GRDRM_CARD_NAME_MAX + 1 + DECIMAL_STR_MAX(uint32_t))
245 static const grdev_pipe_vtable grdrm_pipe_vtable;
247 static int grdrm_pipe_new(grdrm_pipe **out, grdrm_crtc *crtc, struct drm_mode_modeinfo *mode, size_t n_fbs);
257 sd_event_source *fd_src;
261 uint32_t n_connectors;
266 bool async_hotplug : 1;
271 bool cap_monotonic : 1;
274 struct unmanaged_card {
279 struct managed_card {
283 sd_bus_slot *slot_pause_device;
284 sd_bus_slot *slot_resume_device;
285 sd_bus_slot *slot_take_device;
287 bool requested : 1; /* TakeDevice() was sent */
288 bool acquired : 1; /* TakeDevice() was successful */
289 bool master : 1; /* we are DRM-Master */
292 #define grdrm_card_from_base(_e) container_of((_e), grdrm_card, base)
293 #define unmanaged_card_from_base(_e) \
294 container_of(grdrm_card_from_base(_e), unmanaged_card, card)
295 #define managed_card_from_base(_e) \
296 container_of(grdrm_card_from_base(_e), managed_card, card)
298 #define GRDRM_CARD_INIT(_vtable, _session) ((grdrm_card){ \
299 .base = GRDEV_CARD_INIT((_vtable), (_session)), \
304 #define GRDRM_CARD_NAME_MAX (6 + DECIMAL_STR_MAX(unsigned) * 2)
306 static const grdev_card_vtable unmanaged_card_vtable;
307 static const grdev_card_vtable managed_card_vtable;
309 static int grdrm_card_open(grdrm_card *card, int dev_fd);
310 static void grdrm_card_close(grdrm_card *card);
311 static bool grdrm_card_async(grdrm_card *card, int r);
314 * The page-flip event of the kernel provides 64bit of arbitrary user-data. As
315 * drivers tend to drop events on intermediate deep mode-sets or because we
316 * might receive events during session activation, we try to avoid allocaing
317 * dynamic data on those events. Instead, we safe the CRTC id plus a 32bit
318 * counter in there. This way, we only get 32bit counters, not 64bit, but that
319 * should be more than enough. On the bright side, we no longer care whether we
320 * lose events. No memory leaks will occur.
321 * Modern DRM drivers might be fixed to no longer leak events, but we want to
322 * be safe. And associating dynamically allocated data with those events is
323 * kinda ugly, anyway.
326 static uint64_t grdrm_encode_vblank_data(uint32_t id, uint32_t counter) {
327 return id | ((uint64_t)counter << 32);
330 static void grdrm_decode_vblank_data(uint64_t data, uint32_t *out_id, uint32_t *out_counter) {
332 *out_id = data & 0xffffffffU;
334 *out_counter = (data >> 32) & 0xffffffffU;
337 static bool grdrm_modes_compatible(const struct drm_mode_modeinfo *a, const struct drm_mode_modeinfo *b) {
341 /* Test whether both modes are compatible according to our internal
342 * assumptions on modes. This comparison is highly dependent on how
343 * we treat modes in grdrm. If we export mode details, we need to
344 * make this comparison much stricter. */
346 if (a->hdisplay != b->hdisplay)
348 if (a->vdisplay != b->vdisplay)
350 if (a->vrefresh != b->vrefresh)
360 grdrm_object *grdrm_find_object(grdrm_card *card, uint32_t id) {
361 assert_return(card, NULL);
363 return id > 0 ? hashmap_get(card->object_map, UINT32_TO_PTR(id)) : NULL;
366 int grdrm_object_add(grdrm_object *object) {
370 assert(object->card);
371 assert(object->id > 0);
372 assert(IN_SET(object->type, GRDRM_TYPE_CRTC, GRDRM_TYPE_ENCODER, GRDRM_TYPE_CONNECTOR, GRDRM_TYPE_PLANE));
373 assert(object->free_fn);
375 if (object->index >= 32)
376 log_debug("grdrm: %s: object index exceeds 32bit masks: type=%u, index=%" PRIu32,
377 object->card->base.name, object->type, object->index);
379 r = hashmap_put(object->card->object_map, UINT32_TO_PTR(object->id), object);
386 grdrm_object *grdrm_object_free(grdrm_object *object) {
390 assert(object->card);
391 assert(object->id > 0);
392 assert(IN_SET(object->type, GRDRM_TYPE_CRTC, GRDRM_TYPE_ENCODER, GRDRM_TYPE_CONNECTOR, GRDRM_TYPE_PLANE));
393 assert(object->free_fn);
395 hashmap_remove_value(object->card->object_map, UINT32_TO_PTR(object->id), object);
397 object->free_fn(object);
405 static void plane_free(grdrm_object *object) {
406 grdrm_plane *plane = plane_from_object(object);
408 free(plane->kern.formats);
409 free(plane->kern.crtcs);
413 int grdrm_plane_new(grdrm_plane **out, grdrm_card *card, uint32_t id, uint32_t index) {
414 _cleanup_(grdrm_object_freep) grdrm_object *object = NULL;
420 plane = new0(grdrm_plane, 1);
424 object = &plane->object;
425 *object = GRDRM_OBJECT_INIT(card, id, index, GRDRM_TYPE_PLANE, plane_free);
427 plane->kern.max_crtcs = 32;
428 plane->kern.crtcs = new0(uint32_t, plane->kern.max_crtcs);
429 if (!plane->kern.crtcs)
432 plane->kern.max_formats = 32;
433 plane->kern.formats = new0(uint32_t, plane->kern.max_formats);
434 if (!plane->kern.formats)
437 r = grdrm_object_add(object);
447 static int grdrm_plane_resync(grdrm_plane *plane) {
448 grdrm_card *card = plane->object.card;
454 for (tries = 0; tries < GRDRM_MAX_TRIES; ++tries) {
455 struct drm_mode_get_plane res;
456 grdrm_object *object;
457 bool resized = false;
461 res.plane_id = plane->object.id;
462 res.format_type_ptr = PTR_TO_UINT64(plane->kern.formats);
463 res.count_format_types = plane->kern.max_formats;
465 r = ioctl(card->fd, DRM_IOCTL_MODE_GETPLANE, &res);
469 card->async_hotplug = true;
471 log_debug("grdrm: %s: plane %u removed during resync", card->base.name, plane->object.id);
473 log_debug("grdrm: %s: cannot retrieve plane %u: %m", card->base.name, plane->object.id);
479 plane->kern.n_crtcs = 0;
480 memzero(plane->kern.crtcs, sizeof(uint32_t) * plane->kern.max_crtcs);
482 HASHMAP_FOREACH(object, card->object_map, iter) {
483 if (object->type != GRDRM_TYPE_CRTC || object->index >= 32)
485 if (!(res.possible_crtcs & (1 << object->index)))
487 if (plane->kern.n_crtcs >= 32) {
488 log_debug("grdrm: %s: possible_crtcs of plane %" PRIu32 " exceeds 32bit mask",
489 card->base.name, plane->object.id);
493 plane->kern.crtcs[plane->kern.n_crtcs++] = object->id;
496 if (res.count_format_types > plane->kern.max_formats) {
499 max = ALIGN_POWER2(res.count_format_types);
500 if (!max || max > UINT16_MAX) {
501 log_debug("grdrm: %s: excessive plane resource limit: %" PRIu32, card->base.name, max);
505 t = realloc(plane->kern.formats, sizeof(*t) * max);
509 plane->kern.formats = t;
510 plane->kern.max_formats = max;
517 plane->kern.n_formats = res.count_format_types;
518 plane->kern.used_crtc = res.crtc_id;
519 plane->kern.used_fb = res.fb_id;
520 plane->kern.gamma_size = res.gamma_size;
525 if (tries >= GRDRM_MAX_TRIES) {
526 log_debug("grdrm: %s: plane %u not settled for retrieval", card->base.name, plane->object.id);
537 static void connector_free(grdrm_object *object) {
538 grdrm_connector *connector = connector_from_object(object);
540 free(connector->kern.prop_values);
541 free(connector->kern.prop_ids);
542 free(connector->kern.modes);
543 free(connector->kern.encoders);
547 int grdrm_connector_new(grdrm_connector **out, grdrm_card *card, uint32_t id, uint32_t index) {
548 _cleanup_(grdrm_object_freep) grdrm_object *object = NULL;
549 grdrm_connector *connector;
554 connector = new0(grdrm_connector, 1);
558 object = &connector->object;
559 *object = GRDRM_OBJECT_INIT(card, id, index, GRDRM_TYPE_CONNECTOR, connector_free);
561 connector->kern.max_encoders = 32;
562 connector->kern.encoders = new0(uint32_t, connector->kern.max_encoders);
563 if (!connector->kern.encoders)
566 connector->kern.max_modes = 32;
567 connector->kern.modes = new0(struct drm_mode_modeinfo, connector->kern.max_modes);
568 if (!connector->kern.modes)
571 connector->kern.max_props = 32;
572 connector->kern.prop_ids = new0(uint32_t, connector->kern.max_props);
573 connector->kern.prop_values = new0(uint64_t, connector->kern.max_props);
574 if (!connector->kern.prop_ids || !connector->kern.prop_values)
577 r = grdrm_object_add(object);
587 static int grdrm_connector_resync(grdrm_connector *connector) {
588 grdrm_card *card = connector->object.card;
594 for (tries = 0; tries < GRDRM_MAX_TRIES; ++tries) {
595 struct drm_mode_get_connector res;
596 bool resized = false;
600 res.connector_id = connector->object.id;
601 res.encoders_ptr = PTR_TO_UINT64(connector->kern.encoders);
602 res.props_ptr = PTR_TO_UINT64(connector->kern.prop_ids);
603 res.prop_values_ptr = PTR_TO_UINT64(connector->kern.prop_values);
604 res.count_encoders = connector->kern.max_encoders;
605 res.count_props = connector->kern.max_props;
607 /* The kernel reads modes from the EDID information only if we
608 * pass count_modes==0. This is a legacy hack for libdrm (which
609 * called every ioctl twice). Now we have to adopt.. *sigh*.
610 * If we never received an hotplug event, there's no reason to
611 * sync modes. EDID reads are heavy, so skip that if not
615 res.modes_ptr = PTR_TO_UINT64(connector->kern.modes);
616 res.count_modes = connector->kern.max_modes;
622 r = ioctl(card->fd, DRM_IOCTL_MODE_GETCONNECTOR, &res);
626 card->async_hotplug = true;
628 log_debug("grdrm: %s: connector %u removed during resync", card->base.name, connector->object.id);
630 log_debug("grdrm: %s: cannot retrieve connector %u: %m", card->base.name, connector->object.id);
636 if (res.count_encoders > connector->kern.max_encoders) {
639 max = ALIGN_POWER2(res.count_encoders);
640 if (!max || max > UINT16_MAX) {
641 log_debug("grdrm: %s: excessive connector resource limit: %" PRIu32, card->base.name, max);
645 t = realloc(connector->kern.encoders, sizeof(*t) * max);
649 connector->kern.encoders = t;
650 connector->kern.max_encoders = max;
654 if (res.count_modes > connector->kern.max_modes) {
655 struct drm_mode_modeinfo *t;
657 max = ALIGN_POWER2(res.count_modes);
658 if (!max || max > UINT16_MAX) {
659 log_debug("grdrm: %s: excessive connector resource limit: %" PRIu32, card->base.name, max);
663 t = realloc(connector->kern.modes, sizeof(*t) * max);
667 connector->kern.modes = t;
668 connector->kern.max_modes = max;
672 if (res.count_props > connector->kern.max_props) {
676 max = ALIGN_POWER2(res.count_props);
677 if (!max || max > UINT16_MAX) {
678 log_debug("grdrm: %s: excessive connector resource limit: %" PRIu32, card->base.name, max);
682 tids = realloc(connector->kern.prop_ids, sizeof(*tids) * max);
685 connector->kern.prop_ids = tids;
687 tvals = realloc(connector->kern.prop_values, sizeof(*tvals) * max);
690 connector->kern.prop_values = tvals;
692 connector->kern.max_props = max;
699 connector->kern.n_encoders = res.count_encoders;
700 connector->kern.n_props = res.count_props;
701 connector->kern.type = res.connector_type;
702 connector->kern.type_id = res.connector_type_id;
703 connector->kern.used_encoder = res.encoder_id;
704 connector->kern.connection = res.connection;
705 connector->kern.mm_width = res.mm_width;
706 connector->kern.mm_height = res.mm_height;
707 connector->kern.subpixel = res.subpixel;
708 if (res.modes_ptr == PTR_TO_UINT64(connector->kern.modes))
709 connector->kern.n_modes = res.count_modes;
714 if (tries >= GRDRM_MAX_TRIES) {
715 log_debug("grdrm: %s: connector %u not settled for retrieval", card->base.name, connector->object.id);
726 static void encoder_free(grdrm_object *object) {
727 grdrm_encoder *encoder = encoder_from_object(object);
729 free(encoder->kern.clones);
730 free(encoder->kern.crtcs);
734 int grdrm_encoder_new(grdrm_encoder **out, grdrm_card *card, uint32_t id, uint32_t index) {
735 _cleanup_(grdrm_object_freep) grdrm_object *object = NULL;
736 grdrm_encoder *encoder;
741 encoder = new0(grdrm_encoder, 1);
745 object = &encoder->object;
746 *object = GRDRM_OBJECT_INIT(card, id, index, GRDRM_TYPE_ENCODER, encoder_free);
748 encoder->kern.max_crtcs = 32;
749 encoder->kern.crtcs = new0(uint32_t, encoder->kern.max_crtcs);
750 if (!encoder->kern.crtcs)
753 encoder->kern.max_clones = 32;
754 encoder->kern.clones = new0(uint32_t, encoder->kern.max_clones);
755 if (!encoder->kern.clones)
758 r = grdrm_object_add(object);
768 static int grdrm_encoder_resync(grdrm_encoder *encoder) {
769 grdrm_card *card = encoder->object.card;
770 struct drm_mode_get_encoder res;
771 grdrm_object *object;
778 res.encoder_id = encoder->object.id;
780 r = ioctl(card->fd, DRM_IOCTL_MODE_GETENCODER, &res);
784 card->async_hotplug = true;
786 log_debug("grdrm: %s: encoder %u removed during resync", card->base.name, encoder->object.id);
788 log_debug("grdrm: %s: cannot retrieve encoder %u: %m", card->base.name, encoder->object.id);
794 encoder->kern.type = res.encoder_type;
795 encoder->kern.used_crtc = res.crtc_id;
797 encoder->kern.n_crtcs = 0;
798 memzero(encoder->kern.crtcs, sizeof(uint32_t) * encoder->kern.max_crtcs);
800 HASHMAP_FOREACH(object, card->object_map, iter) {
801 if (object->type != GRDRM_TYPE_CRTC || object->index >= 32)
803 if (!(res.possible_crtcs & (1 << object->index)))
805 if (encoder->kern.n_crtcs >= 32) {
806 log_debug("grdrm: %s: possible_crtcs exceeds 32bit mask", card->base.name);
810 encoder->kern.crtcs[encoder->kern.n_crtcs++] = object->id;
813 encoder->kern.n_clones = 0;
814 memzero(encoder->kern.clones, sizeof(uint32_t) * encoder->kern.max_clones);
816 HASHMAP_FOREACH(object, card->object_map, iter) {
817 if (object->type != GRDRM_TYPE_ENCODER || object->index >= 32)
819 if (!(res.possible_clones & (1 << object->index)))
821 if (encoder->kern.n_clones >= 32) {
822 log_debug("grdrm: %s: possible_encoders exceeds 32bit mask", card->base.name);
826 encoder->kern.clones[encoder->kern.n_clones++] = object->id;
836 static void crtc_free(grdrm_object *object) {
837 grdrm_crtc *crtc = crtc_from_object(object);
840 grdev_pipe_free(&crtc->pipe->base);
841 free(crtc->set.connectors);
842 free(crtc->old.connectors);
843 free(crtc->kern.used_connectors);
847 int grdrm_crtc_new(grdrm_crtc **out, grdrm_card *card, uint32_t id, uint32_t index) {
848 _cleanup_(grdrm_object_freep) grdrm_object *object = NULL;
854 crtc = new0(grdrm_crtc, 1);
858 object = &crtc->object;
859 *object = GRDRM_OBJECT_INIT(card, id, index, GRDRM_TYPE_CRTC, crtc_free);
861 crtc->kern.max_used_connectors = 32;
862 crtc->kern.used_connectors = new0(uint32_t, crtc->kern.max_used_connectors);
863 if (!crtc->kern.used_connectors)
866 crtc->old.connectors = new0(uint32_t, crtc->kern.max_used_connectors);
867 if (!crtc->old.connectors)
870 r = grdrm_object_add(object);
880 static int grdrm_crtc_resync(grdrm_crtc *crtc) {
881 grdrm_card *card = crtc->object.card;
882 struct drm_mode_crtc res = { .crtc_id = crtc->object.id };
887 /* make sure we can cache any combination later */
888 if (card->n_connectors > crtc->kern.max_used_connectors) {
891 max = ALIGN_POWER2(card->n_connectors);
895 t = realloc_multiply(crtc->kern.used_connectors, sizeof(*t), max);
899 crtc->kern.used_connectors = t;
900 crtc->kern.max_used_connectors = max;
902 if (!crtc->old.set) {
903 crtc->old.connectors = calloc(sizeof(*t), max);
904 if (!crtc->old.connectors)
909 /* GETCRTC doesn't return connectors. We have to read all
910 * encoder-state and deduce the setup ourselves.. */
911 crtc->kern.n_used_connectors = 0;
913 r = ioctl(card->fd, DRM_IOCTL_MODE_GETCRTC, &res);
917 card->async_hotplug = true;
919 log_debug("grdrm: %s: crtc %u removed during resync", card->base.name, crtc->object.id);
921 log_debug("grdrm: %s: cannot retrieve crtc %u: %m", card->base.name, crtc->object.id);
927 crtc->kern.used_fb = res.fb_id;
928 crtc->kern.fb_offset_x = res.x;
929 crtc->kern.fb_offset_y = res.y;
930 crtc->kern.gamma_size = res.gamma_size;
931 crtc->kern.mode_set = res.mode_valid;
932 crtc->kern.mode = res.mode;
937 static void grdrm_crtc_assign(grdrm_crtc *crtc, grdrm_connector *connector) {
938 uint32_t n_connectors;
942 assert(!crtc->object.assigned);
943 assert(!connector || !connector->object.assigned);
945 /* always mark both as assigned; even if assignments cannot be set */
946 crtc->object.assigned = true;
948 connector->object.assigned = true;
950 /* we will support hw clone mode in the future */
951 n_connectors = connector ? 1 : 0;
953 /* bail out if configuration is preserved */
954 if (crtc->set.n_connectors == n_connectors &&
955 (n_connectors == 0 || crtc->set.connectors[0] == connector->object.id))
958 crtc->applied = false;
959 crtc->set.n_connectors = 0;
961 if (n_connectors > crtc->set.max_connectors) {
964 max = ALIGN_POWER2(n_connectors);
970 t = realloc(crtc->set.connectors, sizeof(*t) * max);
976 crtc->set.connectors = t;
977 crtc->set.max_connectors = max;
981 struct drm_mode_modeinfo *m, *pref = NULL;
984 for (i = 0; i < connector->kern.n_modes; ++i) {
985 m = &connector->kern.modes[i];
987 /* ignore 3D modes by default */
988 if (m->flags & DRM_MODE_FLAG_3D_MASK)
996 /* use PREFERRED over non-PREFERRED */
997 if ((pref->type & DRM_MODE_TYPE_PREFERRED) &&
998 !(m->type & DRM_MODE_TYPE_PREFERRED))
1001 /* use DRIVER over non-PREFERRED|DRIVER */
1002 if ((pref->type & DRM_MODE_TYPE_DRIVER) &&
1003 !(m->type & (DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED)))
1006 /* always prefer higher resolution */
1007 if (pref->hdisplay > m->hdisplay ||
1008 (pref->hdisplay == m->hdisplay && pref->vdisplay > m->vdisplay))
1015 crtc->set.mode = *pref;
1016 crtc->set.n_connectors = 1;
1017 crtc->set.connectors[0] = connector->object.id;
1018 log_debug("grdrm: %s: assigned connector %" PRIu32 " to crtc %" PRIu32 " with mode %s",
1019 crtc->object.card->base.name, connector->object.id, crtc->object.id, pref->name);
1021 log_debug("grdrm: %s: connector %" PRIu32 " to be assigned but has no valid mode",
1022 crtc->object.card->base.name, connector->object.id);
1029 log_debug("grdrm: %s: cannot assign crtc %" PRIu32 ": %s",
1030 crtc->object.card->base.name, crtc->object.id, strerror(-r));
1033 static void grdrm_crtc_expose(grdrm_crtc *crtc) {
1040 assert(crtc->object.assigned);
1042 if (crtc->set.n_connectors < 1) {
1044 grdev_pipe_free(&crtc->pipe->base);
1051 if (pipe->base.width != crtc->set.mode.hdisplay ||
1052 pipe->base.height != crtc->set.mode.vdisplay ||
1053 pipe->base.vrefresh != crtc->set.mode.vrefresh) {
1054 grdev_pipe_free(&pipe->base);
1061 pipe->base.front = NULL;
1062 pipe->base.back = NULL;
1063 for (i = 0; i < pipe->base.max_fbs; ++i) {
1064 fb = fb_from_base(pipe->base.fbs[i]);
1065 if (fb->id == crtc->kern.used_fb)
1066 pipe->base.front = &fb->base;
1067 else if (!fb->flipid)
1068 pipe->base.back = &fb->base;
1071 r = grdrm_pipe_new(&pipe, crtc, &crtc->set.mode, 2);
1073 log_debug("grdrm: %s: cannot create pipe for crtc %" PRIu32 ": %s",
1074 crtc->object.card->base.name, crtc->object.id, strerror(-r));
1078 for (i = 0; i < pipe->base.max_fbs; ++i) {
1079 r = grdrm_fb_new(&fb, crtc->object.card, &crtc->set.mode);
1081 log_debug("grdrm: %s: cannot allocate framebuffer for crtc %" PRIu32 ": %s",
1082 crtc->object.card->base.name, crtc->object.id, strerror(-r));
1083 grdev_pipe_free(&pipe->base);
1087 pipe->base.fbs[i] = &fb->base;
1090 pipe->base.front = NULL;
1091 pipe->base.back = pipe->base.fbs[0];
1095 grdev_pipe_ready(&crtc->pipe->base, true);
1098 static void grdrm_crtc_commit_deep(grdrm_crtc *crtc, grdev_fb *basefb) {
1099 struct drm_mode_crtc set_crtc = { .crtc_id = crtc->object.id };
1100 grdrm_card *card = crtc->object.card;
1101 grdrm_pipe *pipe = crtc->pipe;
1109 fb = fb_from_base(basefb);
1111 set_crtc.set_connectors_ptr = PTR_TO_UINT64(crtc->set.connectors);
1112 set_crtc.count_connectors = crtc->set.n_connectors;
1113 set_crtc.fb_id = fb->id;
1116 set_crtc.mode_valid = 1;
1117 set_crtc.mode = crtc->set.mode;
1119 r = ioctl(card->fd, DRM_IOCTL_MODE_SETCRTC, &set_crtc);
1122 log_debug("grdrm: %s: cannot set crtc %" PRIu32 ": %m",
1123 card->base.name, crtc->object.id);
1125 grdrm_card_async(card, r);
1129 if (!crtc->applied) {
1130 log_debug("grdrm: %s: crtc %" PRIu32 " applied via deep modeset",
1131 card->base.name, crtc->object.id);
1132 crtc->applied = true;
1135 pipe->base.back = NULL;
1136 pipe->base.front = &fb->base;
1139 pipe->base.flipping = false;
1140 pipe->base.flip = false;
1142 /* We cannot schedule dummy page-flips on pipes, hence, the
1143 * application would have to schedule their own frame-timers.
1144 * To avoid duplicating that everywhere, we schedule our own
1145 * timer and raise a fake FRAME event when it fires. */
1146 grdev_pipe_schedule(&pipe->base, 1);
1149 static int grdrm_crtc_commit_flip(grdrm_crtc *crtc, grdev_fb *basefb) {
1150 struct drm_mode_crtc_page_flip page_flip = { .crtc_id = crtc->object.id };
1151 grdrm_card *card = crtc->object.card;
1152 grdrm_pipe *pipe = crtc->pipe;
1161 if (!crtc->applied && !grdrm_modes_compatible(&crtc->kern.mode, &crtc->set.mode))
1164 fb = fb_from_base(basefb);
1166 cnt = ++pipe->counter ? : ++pipe->counter;
1167 page_flip.fb_id = fb->id;
1168 page_flip.flags = DRM_MODE_PAGE_FLIP_EVENT;
1169 page_flip.user_data = grdrm_encode_vblank_data(crtc->object.id, cnt);
1171 r = ioctl(card->fd, DRM_IOCTL_MODE_PAGE_FLIP, &page_flip);
1174 /* Avoid excessive logging on EINVAL; it is currently not
1175 * possible to see whether cards support page-flipping, so
1176 * avoid logging on each frame. */
1178 log_debug("grdrm: %s: cannot schedule page-flip on crtc %" PRIu32 ": %m",
1179 card->base.name, crtc->object.id);
1181 if (grdrm_card_async(card, r))
1187 if (!crtc->applied) {
1188 log_debug("grdrm: %s: crtc %" PRIu32 " applied via page flip",
1189 card->base.name, crtc->object.id);
1190 crtc->applied = true;
1193 pipe->base.flipping = true;
1194 pipe->base.flip = false;
1195 pipe->counter = cnt;
1197 pipe->base.back = NULL;
1199 /* Raise fake FRAME event if it takes longer than 2
1200 * frames to receive the pageflip event. We assume the
1201 * queue ran over or some other error happened. */
1202 grdev_pipe_schedule(&pipe->base, 2);
1207 static void grdrm_crtc_commit(grdrm_crtc *crtc) {
1208 struct drm_mode_crtc set_crtc = { .crtc_id = crtc->object.id };
1209 grdrm_card *card = crtc->object.card;
1215 assert(crtc->object.assigned);
1219 /* If a crtc is not assigned any connector, we want any
1220 * previous setup to be cleared, so make sure the CRTC is
1221 * disabled. Otherwise, there might be content on the CRTC
1222 * while we run, which is not what we want.
1223 * If you want to avoid modesets on specific CRTCs, you should
1224 * still keep their assignment, but never enable the resulting
1225 * pipe. This way, we wouldn't touch it at all. */
1226 if (!crtc->applied) {
1227 crtc->applied = true;
1228 r = ioctl(card->fd, DRM_IOCTL_MODE_SETCRTC, &set_crtc);
1231 log_debug("grdrm: %s: cannot shutdown crtc %" PRIu32 ": %m",
1232 card->base.name, crtc->object.id);
1234 grdrm_card_async(card, r);
1238 log_debug("grdrm: %s: crtc %" PRIu32 " applied via shutdown",
1239 card->base.name, crtc->object.id);
1245 /* we always fully ignore disabled pipes */
1246 if (!pipe->base.enabled)
1249 assert(crtc->set.n_connectors > 0);
1251 if (pipe->base.flip)
1252 fb = pipe->base.back;
1253 else if (!crtc->applied)
1254 fb = pipe->base.front;
1261 r = grdrm_crtc_commit_flip(crtc, fb);
1263 /* in case we couldn't page-flip, perform deep modeset */
1264 grdrm_crtc_commit_deep(crtc, fb);
1268 static void grdrm_crtc_restore(grdrm_crtc *crtc) {
1269 struct drm_mode_crtc set_crtc = { .crtc_id = crtc->object.id };
1270 grdrm_card *card = crtc->object.card;
1276 set_crtc.set_connectors_ptr = PTR_TO_UINT64(crtc->old.connectors);
1277 set_crtc.count_connectors = crtc->old.n_connectors;
1278 set_crtc.fb_id = crtc->old.fb;
1279 set_crtc.x = crtc->old.fb_x;
1280 set_crtc.y = crtc->old.fb_y;
1281 set_crtc.gamma_size = crtc->old.gamma;
1282 set_crtc.mode_valid = crtc->old.mode_set;
1283 set_crtc.mode = crtc->old.mode;
1285 r = ioctl(card->fd, DRM_IOCTL_MODE_SETCRTC, &set_crtc);
1288 log_debug("grdrm: %s: cannot restore crtc %" PRIu32 ": %m",
1289 card->base.name, crtc->object.id);
1291 grdrm_card_async(card, r);
1296 ++crtc->pipe->counter;
1297 crtc->pipe->base.front = NULL;
1298 crtc->pipe->base.flipping = false;
1301 log_debug("grdrm: %s: crtc %" PRIu32 " restored", card->base.name, crtc->object.id);
1304 static void grdrm_crtc_flip_complete(grdrm_crtc *crtc, uint32_t counter, struct drm_event_vblank *event) {
1305 bool flipped = false;
1316 /* We got a page-flip event. To be safe, we reset all FBs on the same
1317 * pipe that have smaller flipids than the flip we got as we know they
1318 * are executed in order. We need to do this to guarantee
1319 * queue-overflows or other missed events don't cause starvation.
1320 * Furthermore, if we find the exact FB this event is for, *and* this
1321 * is the most recent event, we mark it as front FB and raise a
1324 for (i = 0; i < pipe->base.max_fbs; ++i) {
1327 if (!pipe->base.fbs[i])
1330 fb = fb_from_base(pipe->base.fbs[i]);
1331 if (counter != 0 && counter == pipe->counter && fb->flipid == counter) {
1332 pipe->base.front = &fb->base;
1335 } else if (counter - fb->flipid < UINT16_MAX) {
1341 crtc->pipe->base.flipping = false;
1342 grdev_pipe_frame(&pipe->base);
1350 static int grdrm_fb_new(grdrm_fb **out, grdrm_card *card, const struct drm_mode_modeinfo *mode) {
1351 _cleanup_(grdrm_fb_freep) grdrm_fb *fb = NULL;
1352 struct drm_mode_create_dumb create_dumb = { };
1353 struct drm_mode_map_dumb map_dumb = { };
1354 struct drm_mode_fb_cmd2 add_fb = { };
1358 assert_return(out, -EINVAL);
1359 assert_return(card, -EINVAL);
1361 fb = new0(grdrm_fb, 1);
1365 /* TODO: we should choose a compatible format of the previous CRTC
1366 * setting to allow page-flip to it. Only choose fallback if the
1367 * previous setting was crap (non xrgb32'ish). */
1370 fb->base.format = DRM_FORMAT_XRGB8888;
1371 fb->base.width = mode->hdisplay;
1372 fb->base.height = mode->vdisplay;
1374 for (i = 0; i < ELEMENTSOF(fb->base.maps); ++i)
1375 fb->base.maps[i] = MAP_FAILED;
1377 create_dumb.width = fb->base.width;
1378 create_dumb.height = fb->base.height;
1379 create_dumb.bpp = 32;
1381 r = ioctl(card->fd, DRM_IOCTL_MODE_CREATE_DUMB, &create_dumb);
1384 log_debug("grdrm: %s: cannot create dumb buffer %" PRIu32 "x%" PRIu32": %m",
1385 card->base.name, fb->base.width, fb->base.height);
1389 fb->handles[0] = create_dumb.handle;
1390 fb->base.strides[0] = create_dumb.pitch;
1391 fb->sizes[0] = create_dumb.size;
1393 map_dumb.handle = fb->handles[0];
1395 r = ioctl(card->fd, DRM_IOCTL_MODE_MAP_DUMB, &map_dumb);
1398 log_debug("grdrm: %s: cannot map dumb buffer %" PRIu32 "x%" PRIu32": %m",
1399 card->base.name, fb->base.width, fb->base.height);
1403 fb->base.maps[0] = mmap(0, fb->sizes[0], PROT_WRITE, MAP_SHARED, card->fd, map_dumb.offset);
1404 if (fb->base.maps[0] == MAP_FAILED) {
1406 log_debug("grdrm: %s: cannot memory-map dumb buffer %" PRIu32 "x%" PRIu32": %m",
1407 card->base.name, fb->base.width, fb->base.height);
1411 memzero(fb->base.maps[0], fb->sizes[0]);
1413 add_fb.width = fb->base.width;
1414 add_fb.height = fb->base.height;
1415 add_fb.pixel_format = fb->base.format;
1417 memcpy(add_fb.handles, fb->handles, sizeof(fb->handles));
1418 memcpy(add_fb.pitches, fb->base.strides, sizeof(fb->base.strides));
1419 memcpy(add_fb.offsets, fb->offsets, sizeof(fb->offsets));
1421 r = ioctl(card->fd, DRM_IOCTL_MODE_ADDFB2, &add_fb);
1424 log_debug("grdrm: %s: cannot add framebuffer %" PRIu32 "x%" PRIu32": %m",
1425 card->base.name, fb->base.width, fb->base.height);
1429 fb->id = add_fb.fb_id;
1436 grdrm_fb *grdrm_fb_free(grdrm_fb *fb) {
1445 if (fb->base.free_fn)
1446 fb->base.free_fn(fb->base.data.ptr);
1448 if (fb->id > 0 && fb->card->fd >= 0) {
1449 r = ioctl(fb->card->fd, DRM_IOCTL_MODE_RMFB, fb->id);
1451 log_debug("grdrm: %s: cannot delete framebuffer %" PRIu32 ": %m",
1452 fb->card->base.name, fb->id);
1455 for (i = 0; i < ELEMENTSOF(fb->handles); ++i) {
1456 struct drm_mode_destroy_dumb destroy_dumb = { };
1458 if (fb->base.maps[i] != MAP_FAILED)
1459 munmap(fb->base.maps[i], fb->sizes[i]);
1461 if (fb->handles[i] > 0 && fb->card->fd >= 0) {
1462 destroy_dumb.handle = fb->handles[i];
1463 r = ioctl(fb->card->fd, DRM_IOCTL_MODE_DESTROY_DUMB, &destroy_dumb);
1465 log_debug("grdrm: %s: cannot destroy dumb-buffer %" PRIu32 ": %m",
1466 fb->card->base.name, fb->handles[i]);
1479 static void grdrm_pipe_name(char *out, grdrm_crtc *crtc) {
1480 /* @out must be at least of size GRDRM_PIPE_NAME_MAX */
1481 sprintf(out, "%s/%" PRIu32, crtc->object.card->base.name, crtc->object.id);
1484 static int grdrm_pipe_new(grdrm_pipe **out, grdrm_crtc *crtc, struct drm_mode_modeinfo *mode, size_t n_fbs) {
1485 _cleanup_(grdev_pipe_freep) grdev_pipe *basepipe = NULL;
1486 grdrm_card *card = crtc->object.card;
1487 char name[GRDRM_PIPE_NAME_MAX];
1491 assert_return(crtc, -EINVAL);
1492 assert_return(grdev_is_drm_card(&card->base), -EINVAL);
1494 pipe = new0(grdrm_pipe, 1);
1498 basepipe = &pipe->base;
1499 pipe->base = GRDEV_PIPE_INIT(&grdrm_pipe_vtable, &card->base);
1501 pipe->base.width = mode->hdisplay;
1502 pipe->base.height = mode->vdisplay;
1503 pipe->base.vrefresh = mode->vrefresh ? : 25;
1505 grdrm_pipe_name(name, crtc);
1506 r = grdev_pipe_add(&pipe->base, name, n_fbs);
1516 static void grdrm_pipe_free(grdev_pipe *basepipe) {
1517 grdrm_pipe *pipe = grdrm_pipe_from_base(basepipe);
1522 for (i = 0; i < pipe->base.max_fbs; ++i)
1523 if (pipe->base.fbs[i])
1524 grdrm_fb_free(fb_from_base(pipe->base.fbs[i]));
1529 static grdev_fb *grdrm_pipe_target(grdev_pipe *basepipe) {
1533 if (!basepipe->back) {
1534 for (i = 0; i < basepipe->max_fbs; ++i) {
1535 if (!basepipe->fbs[i])
1538 fb = fb_from_base(basepipe->fbs[i]);
1539 if (&fb->base == basepipe->front)
1541 if (basepipe->flipping && fb->flipid)
1544 basepipe->back = &fb->base;
1549 return basepipe->back;
1552 static const grdev_pipe_vtable grdrm_pipe_vtable = {
1553 .free = grdrm_pipe_free,
1554 .target = grdrm_pipe_target,
1561 static void grdrm_name(char *out, dev_t devnum) {
1562 /* @out must be at least of size GRDRM_CARD_NAME_MAX */
1563 sprintf(out, "drm/%u:%u", major(devnum), minor(devnum));
1566 static void grdrm_card_print(grdrm_card *card) {
1567 grdrm_object *object;
1569 grdrm_encoder *encoder;
1570 grdrm_connector *connector;
1576 log_debug("grdrm: %s: state dump", card->base.name);
1578 log_debug(" crtcs:");
1579 HASHMAP_FOREACH(object, card->object_map, iter) {
1580 if (object->type != GRDRM_TYPE_CRTC)
1583 crtc = crtc_from_object(object);
1584 log_debug(" (id: %u index: %d)", object->id, object->index);
1586 if (crtc->kern.mode_set)
1587 log_debug(" mode: %dx%d", crtc->kern.mode.hdisplay, crtc->kern.mode.vdisplay);
1589 log_debug(" mode: <none>");
1592 log_debug(" encoders:");
1593 HASHMAP_FOREACH(object, card->object_map, iter) {
1594 if (object->type != GRDRM_TYPE_ENCODER)
1597 encoder = encoder_from_object(object);
1598 log_debug(" (id: %u index: %d)", object->id, object->index);
1600 if (encoder->kern.used_crtc)
1601 log_debug(" crtc: %u", encoder->kern.used_crtc);
1603 log_debug(" crtc: <none>");
1605 buf = malloc((DECIMAL_STR_MAX(uint32_t) + 1) * encoder->kern.n_crtcs + 1);
1610 for (i = 0; i < encoder->kern.n_crtcs; ++i)
1611 p += sprintf(p, " %" PRIu32, encoder->kern.crtcs[i]);
1613 log_debug(" possible crtcs:%s", buf);
1617 buf = malloc((DECIMAL_STR_MAX(uint32_t) + 1) * encoder->kern.n_clones + 1);
1622 for (i = 0; i < encoder->kern.n_clones; ++i)
1623 p += sprintf(p, " %" PRIu32, encoder->kern.clones[i]);
1625 log_debug(" possible clones:%s", buf);
1630 log_debug(" connectors:");
1631 HASHMAP_FOREACH(object, card->object_map, iter) {
1632 if (object->type != GRDRM_TYPE_CONNECTOR)
1635 connector = connector_from_object(object);
1636 log_debug(" (id: %u index: %d)", object->id, object->index);
1637 log_debug(" type: %" PRIu32 "-%" PRIu32 " connection: %" PRIu32 " subpixel: %" PRIu32 " extents: %" PRIu32 "x%" PRIu32,
1638 connector->kern.type, connector->kern.type_id, connector->kern.connection, connector->kern.subpixel,
1639 connector->kern.mm_width, connector->kern.mm_height);
1641 if (connector->kern.used_encoder)
1642 log_debug(" encoder: %" PRIu32, connector->kern.used_encoder);
1644 log_debug(" encoder: <none>");
1646 buf = malloc((DECIMAL_STR_MAX(uint32_t) + 1) * connector->kern.n_encoders + 1);
1651 for (i = 0; i < connector->kern.n_encoders; ++i)
1652 p += sprintf(p, " %" PRIu32, connector->kern.encoders[i]);
1654 log_debug(" possible encoders:%s", buf);
1658 for (i = 0; i < connector->kern.n_modes; ++i) {
1659 struct drm_mode_modeinfo *mode = &connector->kern.modes[i];
1660 log_debug(" mode: %" PRIu32 "x%" PRIu32, mode->hdisplay, mode->vdisplay);
1664 log_debug(" planes:");
1665 HASHMAP_FOREACH(object, card->object_map, iter) {
1666 if (object->type != GRDRM_TYPE_PLANE)
1669 plane = plane_from_object(object);
1670 log_debug(" (id: %u index: %d)", object->id, object->index);
1671 log_debug(" gamma-size: %" PRIu32, plane->kern.gamma_size);
1673 if (plane->kern.used_crtc)
1674 log_debug(" crtc: %" PRIu32, plane->kern.used_crtc);
1676 log_debug(" crtc: <none>");
1678 buf = malloc((DECIMAL_STR_MAX(uint32_t) + 1) * plane->kern.n_crtcs + 1);
1683 for (i = 0; i < plane->kern.n_crtcs; ++i)
1684 p += sprintf(p, " %" PRIu32, plane->kern.crtcs[i]);
1686 log_debug(" possible crtcs:%s", buf);
1690 buf = malloc((DECIMAL_STR_MAX(unsigned int) + 3) * plane->kern.n_formats + 1);
1695 for (i = 0; i < plane->kern.n_formats; ++i)
1696 p += sprintf(p, " 0x%x", (unsigned int)plane->kern.formats[i]);
1698 log_debug(" possible formats:%s", buf);
1704 static int grdrm_card_resync(grdrm_card *card) {
1705 _cleanup_free_ uint32_t *crtc_ids = NULL, *encoder_ids = NULL, *connector_ids = NULL, *plane_ids = NULL;
1706 uint32_t allocated = 0;
1707 grdrm_object *object;
1714 card->async_hotplug = false;
1717 /* mark existing objects for possible removal */
1718 HASHMAP_FOREACH(object, card->object_map, iter)
1719 object->present = false;
1721 for (tries = 0; tries < GRDRM_MAX_TRIES; ++tries) {
1722 struct drm_mode_get_plane_res pres;
1723 struct drm_mode_card_res res;
1726 if (allocated < card->max_ids) {
1729 free(connector_ids);
1731 crtc_ids = new0(uint32_t, card->max_ids);
1732 encoder_ids = new0(uint32_t, card->max_ids);
1733 connector_ids = new0(uint32_t, card->max_ids);
1734 plane_ids = new0(uint32_t, card->max_ids);
1736 if (!crtc_ids || !encoder_ids || !connector_ids || !plane_ids)
1739 allocated = card->max_ids;
1743 res.crtc_id_ptr = PTR_TO_UINT64(crtc_ids);
1744 res.connector_id_ptr = PTR_TO_UINT64(connector_ids);
1745 res.encoder_id_ptr = PTR_TO_UINT64(encoder_ids);
1746 res.count_crtcs = allocated;
1747 res.count_encoders = allocated;
1748 res.count_connectors = allocated;
1750 r = ioctl(card->fd, DRM_IOCTL_MODE_GETRESOURCES, &res);
1753 log_debug("grdrm: %s: cannot retrieve drm resources: %m", card->base.name);
1758 pres.plane_id_ptr = PTR_TO_UINT64(plane_ids);
1759 pres.count_planes = allocated;
1761 r = ioctl(card->fd, DRM_IOCTL_MODE_GETPLANERESOURCES, &pres);
1764 log_debug("grdrm: %s: cannot retrieve drm plane-resources: %m", card->base.name);
1768 max = MAX(MAX(res.count_crtcs, res.count_encoders),
1769 MAX(res.count_connectors, pres.count_planes));
1770 if (max > allocated) {
1773 n = ALIGN_POWER2(max);
1774 if (!n || n > UINT16_MAX) {
1775 log_debug("grdrm: %s: excessive DRM resource limit: %" PRIu32, card->base.name, max);
1779 /* retry with resized buffers */
1784 /* mark available objects as present */
1786 for (i = 0; i < res.count_crtcs; ++i) {
1787 object = grdrm_find_object(card, crtc_ids[i]);
1788 if (object && object->type == GRDRM_TYPE_CRTC) {
1789 object->present = true;
1795 for (i = 0; i < res.count_encoders; ++i) {
1796 object = grdrm_find_object(card, encoder_ids[i]);
1797 if (object && object->type == GRDRM_TYPE_ENCODER) {
1798 object->present = true;
1804 for (i = 0; i < res.count_connectors; ++i) {
1805 object = grdrm_find_object(card, connector_ids[i]);
1806 if (object && object->type == GRDRM_TYPE_CONNECTOR) {
1807 object->present = true;
1809 connector_ids[i] = 0;
1813 for (i = 0; i < pres.count_planes; ++i) {
1814 object = grdrm_find_object(card, plane_ids[i]);
1815 if (object && object->type == GRDRM_TYPE_PLANE) {
1816 object->present = true;
1822 /* drop removed objects */
1824 HASHMAP_FOREACH(object, card->object_map, iter)
1825 if (!object->present)
1826 grdrm_object_free(object);
1828 /* add new objects */
1830 card->n_crtcs = res.count_crtcs;
1831 for (i = 0; i < res.count_crtcs; ++i) {
1832 if (crtc_ids[i] < 1)
1835 r = grdrm_crtc_new(NULL, card, crtc_ids[i], i);
1840 card->n_encoders = res.count_encoders;
1841 for (i = 0; i < res.count_encoders; ++i) {
1842 if (encoder_ids[i] < 1)
1845 r = grdrm_encoder_new(NULL, card, encoder_ids[i], i);
1850 card->n_connectors = res.count_connectors;
1851 for (i = 0; i < res.count_connectors; ++i) {
1852 if (connector_ids[i] < 1)
1855 r = grdrm_connector_new(NULL, card, connector_ids[i], i);
1860 card->n_planes = pres.count_planes;
1861 for (i = 0; i < pres.count_planes; ++i) {
1862 if (plane_ids[i] < 1)
1865 r = grdrm_plane_new(NULL, card, plane_ids[i], i);
1870 /* re-sync objects after object_map is synced */
1872 HASHMAP_FOREACH(object, card->object_map, iter) {
1873 switch (object->type) {
1874 case GRDRM_TYPE_CRTC:
1875 r = grdrm_crtc_resync(crtc_from_object(object));
1877 case GRDRM_TYPE_ENCODER:
1878 r = grdrm_encoder_resync(encoder_from_object(object));
1880 case GRDRM_TYPE_CONNECTOR:
1881 r = grdrm_connector_resync(connector_from_object(object));
1883 case GRDRM_TYPE_PLANE:
1884 r = grdrm_plane_resync(plane_from_object(object));
1887 assert_not_reached("grdrm: invalid object type");
1894 if (card->async_hotplug)
1898 /* if modeset objects change during sync, start over */
1899 if (card->async_hotplug) {
1900 card->async_hotplug = false;
1904 /* cache crtc/connector relationship */
1905 HASHMAP_FOREACH(object, card->object_map, iter) {
1906 grdrm_connector *connector;
1907 grdrm_encoder *encoder;
1910 if (object->type != GRDRM_TYPE_CONNECTOR)
1913 connector = connector_from_object(object);
1914 if (connector->kern.connection != 1 || connector->kern.used_encoder < 1)
1917 object = grdrm_find_object(card, connector->kern.used_encoder);
1918 if (!object || object->type != GRDRM_TYPE_ENCODER)
1921 encoder = encoder_from_object(object);
1922 if (encoder->kern.used_crtc < 1)
1925 object = grdrm_find_object(card, encoder->kern.used_crtc);
1926 if (!object || object->type != GRDRM_TYPE_CRTC)
1929 crtc = crtc_from_object(object);
1930 assert(crtc->kern.n_used_connectors < crtc->kern.max_used_connectors);
1931 crtc->kern.used_connectors[crtc->kern.n_used_connectors++] = connector->object.id;
1934 /* cache old crtc settings for later restore */
1935 HASHMAP_FOREACH(object, card->object_map, iter) {
1938 if (object->type != GRDRM_TYPE_CRTC)
1941 crtc = crtc_from_object(object);
1943 /* Save data if it is the first time we refresh the CRTC. This data can
1944 * be used optionally to restore any previous configuration. For
1945 * instance, it allows us to restore VT configurations after we close
1946 * our session again. */
1947 if (!crtc->old.set) {
1948 crtc->old.fb = crtc->kern.used_fb;
1949 crtc->old.fb_x = crtc->kern.fb_offset_x;
1950 crtc->old.fb_y = crtc->kern.fb_offset_y;
1951 crtc->old.gamma = crtc->kern.gamma_size;
1952 crtc->old.n_connectors = crtc->kern.n_used_connectors;
1953 if (crtc->old.n_connectors)
1954 memcpy(crtc->old.connectors, crtc->kern.used_connectors, sizeof(uint32_t) * crtc->old.n_connectors);
1955 crtc->old.mode_set = crtc->kern.mode_set;
1956 crtc->old.mode = crtc->kern.mode;
1957 crtc->old.set = true;
1961 /* everything synced */
1965 if (tries >= GRDRM_MAX_TRIES) {
1967 * Ugh! We were unable to sync the DRM card state due to heavy
1968 * hotplugging. This should never happen, so print a debug
1969 * message and bail out. The next uevent will trigger
1973 log_debug("grdrm: %s: hotplug-storm when syncing card", card->base.name);
1980 static bool card_configure_crtc(grdrm_crtc *crtc, grdrm_connector *connector) {
1981 grdrm_card *card = crtc->object.card;
1982 grdrm_encoder *encoder;
1983 grdrm_object *object;
1986 if (crtc->object.assigned || connector->object.assigned)
1988 if (connector->kern.connection != 1)
1991 for (i = 0; i < connector->kern.n_encoders; ++i) {
1992 object = grdrm_find_object(card, connector->kern.encoders[i]);
1993 if (!object || object->type != GRDRM_TYPE_ENCODER)
1996 encoder = encoder_from_object(object);
1997 for (j = 0; j < encoder->kern.n_crtcs; ++j) {
1998 if (encoder->kern.crtcs[j] == crtc->object.id) {
1999 grdrm_crtc_assign(crtc, connector);
2008 static void grdrm_card_configure(grdrm_card *card) {
2010 * Modeset Configuration
2011 * This is where we update our modeset configuration and assign
2012 * connectors to CRTCs. This means, each connector that we want to
2013 * enable needs a CRTC, disabled (or unavailable) connectors are left
2014 * alone in the dark. Once all CRTCs are assigned, the remaining CRTCs
2016 * Sounds trivial, but there're several caveats:
2018 * * Multiple connectors can be driven by the same CRTC. This is
2019 * known as 'hardware clone mode'. Advantage over software clone
2020 * mode is that only a single CRTC is needed to drive multiple
2021 * displays. However, few hardware supports this and it's a huge
2022 * headache to configure on dynamic demands. Therefore, we only
2023 * support it if configured statically beforehand.
2025 * * CRTCs are not created equal. Some might be much more poweful
2026 * than others, including more advanced plane support. So far, our
2027 * CRTC selection is random. You need to supply static
2028 * configuration if you want special setups. So far, there is no
2029 * proper way to do advanced CRTC selection on dynamic demands. It
2030 * is not really clear which demands require what CRTC, so, like
2031 * everyone else, we do random CRTC selection unless explicitly
2034 * * Each Connector has a list of possible encoders that can drive
2035 * it, and each encoder has a list of possible CRTCs. If this graph
2036 * is a tree, assignment is trivial. However, if not, we cannot
2037 * reliably decide on configurations beforehand. The encoder is
2038 * always selected by the kernel, so we have to actually set a mode
2039 * to know which encoder is used. There is no way to ask the kernel
2040 * whether a given configuration is possible. This will change with
2041 * atomic-modesetting, but until then, we keep our configurations
2042 * simple and assume they work all just fine. If one fails
2043 * unexpectedly, we print a warning and disable it.
2045 * Configuring a card consists of several steps:
2047 * 1) First of all, we apply any user-configuration. If a user wants
2048 * a fixed configuration, we apply it and preserve it.
2049 * So far, we don't support user configuration files, so this step
2052 * 2) Secondly, we need to apply any quirks from hwdb. Some hardware
2053 * might only support limited configurations or require special
2054 * CRTC/Connector mappings. We read this from hwdb and apply it, if
2056 * So far, we don't support this as there is no known quirk, so
2057 * this step is skipped.
2059 * 3) As deep modesets are expensive, we try to avoid them if
2060 * possible. Therefore, we read the current configuration from the
2061 * kernel and try to preserve it, if compatible with our demands.
2062 * If not, we break it and reassign it in a following step.
2064 * 4) The main step involves configuring all remaining objects. By
2065 * default, all available connectors are enabled, except for those
2066 * disabled by user-configuration. We lookup a suitable CRTC for
2067 * each connector and assign them. As there might be more
2068 * connectors than CRTCs, we apply some ordering so users can
2069 * select which connectors are more important right now.
2070 * So far, we only apply the default ordering, more might be added
2074 grdrm_object *object;
2078 /* clear assignments */
2079 HASHMAP_FOREACH(object, card->object_map, i)
2080 object->assigned = false;
2082 /* preserve existing configurations */
2083 HASHMAP_FOREACH(object, card->object_map, i) {
2084 if (object->type != GRDRM_TYPE_CRTC || object->assigned)
2087 crtc = crtc_from_object(object);
2089 if (crtc->applied) {
2090 /* If our mode is set, preserve it. If no connector is
2091 * set, modeset either failed or the pipe is unused. In
2092 * both cases, leave it alone. It might be tried again
2093 * below in case there're remaining connectors.
2094 * Otherwise, try restoring the assignments. If they
2095 * are no longer valid, leave the pipe untouched. */
2097 if (crtc->set.n_connectors < 1)
2100 assert(crtc->set.n_connectors == 1);
2102 object = grdrm_find_object(card, crtc->set.connectors[0]);
2103 if (!object || object->type != GRDRM_TYPE_CONNECTOR)
2106 card_configure_crtc(crtc, connector_from_object(object));
2107 } else if (crtc->kern.mode_set && crtc->kern.n_used_connectors != 1) {
2108 /* If our mode is not set on the pipe, we know the kern
2109 * information is valid. Try keeping it. If it's not
2110 * possible, leave the pipe untouched for later
2113 object = grdrm_find_object(card, crtc->kern.used_connectors[0]);
2114 if (!object || object->type != GRDRM_TYPE_CONNECTOR)
2117 card_configure_crtc(crtc, connector_from_object(object));
2121 /* assign remaining objects */
2122 HASHMAP_FOREACH(object, card->object_map, i) {
2123 if (object->type != GRDRM_TYPE_CRTC || object->assigned)
2126 crtc = crtc_from_object(object);
2128 HASHMAP_FOREACH(object, card->object_map, j) {
2129 if (object->type != GRDRM_TYPE_CONNECTOR)
2132 if (card_configure_crtc(crtc, connector_from_object(object)))
2136 if (!crtc->object.assigned)
2137 grdrm_crtc_assign(crtc, NULL);
2140 /* expose configuration */
2141 HASHMAP_FOREACH(object, card->object_map, i) {
2142 if (object->type != GRDRM_TYPE_CRTC)
2145 grdrm_crtc_expose(crtc_from_object(object));
2149 static void grdrm_card_hotplug(grdrm_card *card) {
2157 log_debug("grdrm: %s/%s: reconfigure card", card->base.session->name, card->base.name);
2159 card->ready = false;
2160 r = grdrm_card_resync(card);
2162 log_debug("grdrm: %s/%s: cannot re-sync card: %s",
2163 card->base.session->name, card->base.name, strerror(-r));
2167 grdev_session_pin(card->base.session);
2169 /* debug statement to print card information */
2171 grdrm_card_print(card);
2173 grdrm_card_configure(card);
2175 card->hotplug = false;
2177 grdev_session_unpin(card->base.session);
2180 static int grdrm_card_io_fn(sd_event_source *s, int fd, uint32_t revents, void *userdata) {
2181 grdrm_card *card = userdata;
2182 struct drm_event_vblank *vblank;
2183 struct drm_event *event;
2184 uint32_t id, counter;
2185 grdrm_object *object;
2190 if (revents & (EPOLLHUP | EPOLLERR)) {
2191 /* Immediately close device on HUP; no need to flush pending
2192 * data.. there're no events we care about here. */
2193 log_debug("grdrm: %s/%s: HUP", card->base.session->name, card->base.name);
2194 grdrm_card_close(card);
2198 if (revents & (EPOLLIN)) {
2199 l = read(card->fd, buf, sizeof(buf));
2201 if (errno == EAGAIN || errno == EINTR)
2204 log_debug("grdrm: %s/%s: read error: %m", card->base.session->name, card->base.name);
2205 grdrm_card_close(card);
2209 for (len = l; len > 0; len -= event->length) {
2212 if (len < sizeof(*event) || len < event->length) {
2213 log_debug("grdrm: %s/%s: truncated event", card->base.session->name, card->base.name);
2217 switch (event->type) {
2218 case DRM_EVENT_FLIP_COMPLETE:
2219 vblank = (void*)event;
2220 if (event->length < sizeof(*vblank)) {
2221 log_debug("grdrm: %s/%s: truncated vblank event", card->base.session->name, card->base.name);
2225 grdrm_decode_vblank_data(vblank->user_data, &id, &counter);
2226 object = grdrm_find_object(card, id);
2227 if (!object || object->type != GRDRM_TYPE_CRTC)
2230 grdrm_crtc_flip_complete(crtc_from_object(object), counter, vblank);
2239 static int grdrm_card_add(grdrm_card *card, const char *name) {
2241 assert(card->fd < 0);
2243 card->object_map = hashmap_new(&trivial_hash_ops);
2244 if (!card->object_map)
2247 return grdev_card_add(&card->base, name);
2250 static void grdrm_card_destroy(grdrm_card *card) {
2252 assert(!card->running);
2253 assert(card->fd < 0);
2254 assert(hashmap_size(card->object_map) == 0);
2256 hashmap_free(card->object_map);
2259 static void grdrm_card_commit(grdev_card *basecard) {
2260 grdrm_card *card = grdrm_card_from_base(basecard);
2261 grdrm_object *object;
2264 HASHMAP_FOREACH(object, card->object_map, iter) {
2268 if (object->type != GRDRM_TYPE_CRTC)
2271 grdrm_crtc_commit(crtc_from_object(object));
2275 static void grdrm_card_restore(grdev_card *basecard) {
2276 grdrm_card *card = grdrm_card_from_base(basecard);
2277 grdrm_object *object;
2280 HASHMAP_FOREACH(object, card->object_map, iter) {
2284 if (object->type != GRDRM_TYPE_CRTC)
2287 grdrm_crtc_restore(crtc_from_object(object));
2291 static void grdrm_card_enable(grdrm_card *card) {
2294 if (card->fd < 0 || card->running)
2297 /* ignore cards without DUMB_BUFFER capability */
2298 if (!card->cap_dumb)
2301 assert(card->fd_src);
2303 log_debug("grdrm: %s/%s: enable", card->base.session->name, card->base.name);
2305 card->running = true;
2306 sd_event_source_set_enabled(card->fd_src, SD_EVENT_ON);
2307 grdrm_card_hotplug(card);
2310 static void grdrm_card_disable(grdrm_card *card) {
2311 grdrm_object *object;
2316 if (card->fd < 0 || !card->running)
2319 assert(card->fd_src);
2321 log_debug("grdrm: %s/%s: disable", card->base.session->name, card->base.name);
2323 card->running = false;
2324 card->ready = false;
2325 sd_event_source_set_enabled(card->fd_src, SD_EVENT_OFF);
2327 /* stop all pipes */
2328 HASHMAP_FOREACH(object, card->object_map, iter) {
2331 if (object->type != GRDRM_TYPE_CRTC)
2334 crtc = crtc_from_object(object);
2335 crtc->applied = false;
2337 grdev_pipe_ready(&crtc->pipe->base, false);
2341 static int grdrm_card_open(grdrm_card *card, int dev_fd) {
2342 _cleanup_(grdev_session_unpinp) grdev_session *pin = NULL;
2343 _cleanup_close_ int fd = dev_fd;
2344 struct drm_get_cap cap;
2348 assert(dev_fd >= 0);
2349 assert(card->fd != dev_fd);
2351 pin = grdev_session_pin(card->base.session);
2352 grdrm_card_close(card);
2354 log_debug("grdrm: %s/%s: open", card->base.session->name, card->base.name);
2356 r = fd_nonblock(fd, true);
2360 r = fd_cloexec(fd, true);
2364 flags = fcntl(fd, F_GETFL, 0);
2367 if ((flags & O_ACCMODE) != O_RDWR)
2370 r = sd_event_add_io(card->base.session->context->event,
2373 EPOLLHUP | EPOLLERR | EPOLLIN,
2379 sd_event_source_set_enabled(card->fd_src, SD_EVENT_OFF);
2381 card->hotplug = true;
2385 /* cache DUMB_BUFFER capability */
2386 cap.capability = DRM_CAP_DUMB_BUFFER;
2388 r = ioctl(card->fd, DRM_IOCTL_GET_CAP, &cap);
2389 card->cap_dumb = r >= 0 && cap.value;
2391 log_debug("grdrm: %s/%s: cannot retrieve DUMB_BUFFER capability: %s",
2392 card->base.session->name, card->base.name, strerror(-r));
2393 else if (!card->cap_dumb)
2394 log_debug("grdrm: %s/%s: DUMB_BUFFER capability not supported",
2395 card->base.session->name, card->base.name);
2397 /* cache TIMESTAMP_MONOTONIC capability */
2398 cap.capability = DRM_CAP_TIMESTAMP_MONOTONIC;
2400 r = ioctl(card->fd, DRM_IOCTL_GET_CAP, &cap);
2401 card->cap_monotonic = r >= 0 && cap.value;
2403 log_debug("grdrm: %s/%s: cannot retrieve TIMESTAMP_MONOTONIC capability: %s",
2404 card->base.session->name, card->base.name, strerror(-r));
2405 else if (!card->cap_monotonic)
2406 log_debug("grdrm: %s/%s: TIMESTAMP_MONOTONIC is disabled globally, fix this NOW!",
2407 card->base.session->name, card->base.name);
2412 static void grdrm_card_close(grdrm_card *card) {
2413 grdrm_object *object;
2418 log_debug("grdrm: %s/%s: close", card->base.session->name, card->base.name);
2420 grdrm_card_disable(card);
2422 card->fd_src = sd_event_source_unref(card->fd_src);
2423 card->fd = safe_close(card->fd);
2425 grdev_session_pin(card->base.session);
2426 while ((object = hashmap_first(card->object_map)))
2427 grdrm_object_free(object);
2428 grdev_session_unpin(card->base.session);
2431 static bool grdrm_card_async(grdrm_card *card, int r) {
2434 /* If we get EACCES on runtime DRM calls, we lost DRM-Master
2435 * (or we did something terribly wrong). Immediately disable
2436 * the card, so we stop all pipes and wait to be activated
2438 grdrm_card_disable(card);
2441 /* DRM objects can be hotplugged at any time. If an object is
2442 * removed that we use, we remember that state so a following
2443 * call can test for this.
2444 * Note that we also get a uevent as followup, this will resync
2445 * the whole device. */
2446 card->async_hotplug = true;
2450 return !card->ready;
2455 * The unmanaged DRM card opens the device node for a given DRM device
2456 * directly (/dev/dri/cardX) and thus needs sufficient privileges. It opens
2457 * the device only if we really require it and releases it as soon as we're
2458 * disabled or closed.
2459 * The unmanaged element can be used in all situations where you have direct
2460 * access to DRM device nodes. Unlike managed DRM elements, it can be used
2461 * outside of user sessions and in emergency situations where logind is not
2465 static void unmanaged_card_enable(grdev_card *basecard) {
2466 unmanaged_card *cu = unmanaged_card_from_base(basecard);
2469 if (cu->card.fd < 0) {
2470 /* try open on activation if it failed during allocation */
2471 fd = open(cu->devnode, O_RDWR | O_CLOEXEC | O_NOCTTY | O_NONBLOCK);
2473 /* not fatal; simply ignore the device */
2474 log_debug("grdrm: %s/%s: cannot open node %s: %m",
2475 basecard->session->name, basecard->name, cu->devnode);
2479 /* we might already be DRM-Master by open(); that's fine */
2481 r = grdrm_card_open(&cu->card, fd);
2483 log_debug("grdrm: %s/%s: cannot open: %s",
2484 basecard->session->name, basecard->name, strerror(-r));
2489 r = ioctl(cu->card.fd, DRM_IOCTL_SET_MASTER, 0);
2491 log_debug("grdrm: %s/%s: cannot acquire DRM-Master: %m",
2492 basecard->session->name, basecard->name);
2496 grdrm_card_enable(&cu->card);
2499 static void unmanaged_card_disable(grdev_card *basecard) {
2500 unmanaged_card *cu = unmanaged_card_from_base(basecard);
2502 grdrm_card_disable(&cu->card);
2505 static int unmanaged_card_new(grdev_card **out, grdev_session *session, struct udev_device *ud) {
2506 _cleanup_(grdev_card_freep) grdev_card *basecard = NULL;
2507 char name[GRDRM_CARD_NAME_MAX];
2509 const char *devnode;
2513 assert_return(session, -EINVAL);
2514 assert_return(ud, -EINVAL);
2516 devnode = udev_device_get_devnode(ud);
2517 devnum = udev_device_get_devnum(ud);
2518 if (!devnode || devnum == 0)
2521 grdrm_name(name, devnum);
2523 cu = new0(unmanaged_card, 1);
2527 basecard = &cu->card.base;
2528 cu->card = GRDRM_CARD_INIT(&unmanaged_card_vtable, session);
2530 cu->devnode = strdup(devnode);
2534 r = grdrm_card_add(&cu->card, name);
2538 /* try to open but ignore errors */
2539 fd = open(cu->devnode, O_RDWR | O_CLOEXEC | O_NOCTTY | O_NONBLOCK);
2541 /* not fatal; allow uaccess based control on activation */
2542 log_debug("grdrm: %s/%s: cannot open node %s: %m",
2543 basecard->session->name, basecard->name, cu->devnode);
2545 /* We might get DRM-Master implicitly on open(); drop it immediately
2546 * so we acquire it only once we're actually enabled. We don't
2547 * really care whether this call fails or not, but lets log any
2548 * weird errors, anyway. */
2549 r = ioctl(fd, DRM_IOCTL_DROP_MASTER, 0);
2550 if (r < 0 && errno != EACCES && errno != EINVAL)
2551 log_debug("grdrm: %s/%s: cannot drop DRM-Master: %m",
2552 basecard->session->name, basecard->name);
2554 r = grdrm_card_open(&cu->card, fd);
2556 log_debug("grdrm: %s/%s: cannot open: %s",
2557 basecard->session->name, basecard->name, strerror(-r));
2566 static void unmanaged_card_free(grdev_card *basecard) {
2567 unmanaged_card *cu = unmanaged_card_from_base(basecard);
2569 assert(!basecard->enabled);
2571 grdrm_card_close(&cu->card);
2572 grdrm_card_destroy(&cu->card);
2577 static const grdev_card_vtable unmanaged_card_vtable = {
2578 .free = unmanaged_card_free,
2579 .enable = unmanaged_card_enable,
2580 .disable = unmanaged_card_disable,
2581 .commit = grdrm_card_commit,
2582 .restore = grdrm_card_restore,
2587 * The managed DRM card uses systemd-logind to acquire DRM devices. This
2588 * means, we do not open the device node /dev/dri/cardX directly. Instead,
2589 * logind passes us a file-descriptor whenever our session is activated. Thus,
2590 * we don't need access to the device node directly.
2591 * Furthermore, whenever the session is put asleep, logind revokes the
2592 * file-descriptor so we loose access to the device.
2593 * Managed DRM cards should be preferred over unmanaged DRM cards whenever
2594 * you run inside a user session with exclusive device access.
2597 static void managed_card_enable(grdev_card *card) {
2598 managed_card *cm = managed_card_from_base(card);
2600 /* If the device is manually re-enabled, we try to resume our card
2601 * management. Note that we have no control over DRM-Master and the fd,
2602 * so we have to take over the state from the last logind event. */
2605 grdrm_card_enable(&cm->card);
2608 static void managed_card_disable(grdev_card *card) {
2609 managed_card *cm = managed_card_from_base(card);
2611 /* If the device is manually disabled, we keep the FD but put our card
2612 * management asleep. This way, we can wake up at any time, but don't
2613 * touch the device while asleep. */
2615 grdrm_card_disable(&cm->card);
2618 static int managed_card_pause_device_fn(sd_bus *bus,
2619 sd_bus_message *signal,
2621 sd_bus_error *ret_error) {
2622 managed_card *cm = userdata;
2623 grdev_session *session = cm->card.base.session;
2624 uint32_t major, minor;
2629 * We get PauseDevice() signals from logind whenever a device we
2630 * requested was, or is about to be, paused. Arguments are major/minor
2631 * number of the device and the mode of the operation.
2632 * In case the event is not about our device, we ignore it. Otherwise,
2633 * we treat it as asynchronous DRM-DROP-MASTER. Note that we might have
2634 * already handled an EACCES error from a modeset ioctl, in which case
2635 * we already disabled the device.
2637 * @mode can be one of the following:
2638 * "pause": The device is about to be paused. We must react
2639 * immediately and respond with PauseDeviceComplete(). Once
2640 * we replied, logind will pause the device. Note that
2641 * logind might apply any kind of timeout and force pause
2642 * the device if we don't respond in a timely manner. In
2643 * this case, we will receive a second PauseDevice event
2644 * with @mode set to "force" (or similar).
2645 * "force": The device was disabled forecfully by logind. DRM-Master
2646 * was already dropped. This is just an asynchronous
2647 * notification so we can put the device asleep (in case
2648 * we didn't already notice the dropped DRM-Master).
2649 * "gone": This is like "force" but is sent if the device was
2650 * paused due to a device-removal event.
2652 * We always handle PauseDevice signals as "force" as we properly
2653 * support asynchronously dropping DRM-Master, anyway. But in case
2654 * logind sent mode "pause", we also call PauseDeviceComplete() to
2655 * immediately acknowledge the request.
2658 r = sd_bus_message_read(signal, "uus", &major, &minor, &mode);
2660 log_debug("grdrm: %s/%s: erroneous PauseDevice signal",
2661 session->name, cm->card.base.name);
2665 /* not our device? */
2666 if (makedev(major, minor) != cm->devnum)
2670 grdrm_card_disable(&cm->card);
2672 if (streq(mode, "pause")) {
2673 _cleanup_bus_message_unref_ sd_bus_message *m = NULL;
2676 * Sending PauseDeviceComplete() is racy if logind triggers the
2677 * timeout. That is, if we take too long and logind pauses the
2678 * device by sending a forced PauseDevice, our
2679 * PauseDeviceComplete call will be stray. That's fine, though.
2680 * logind ignores such stray calls. Only if logind also sent a
2681 * further PauseDevice() signal, it might match our call
2682 * incorrectly to the newer PauseDevice(). That's fine, too, as
2683 * we handle that event asynchronously, anyway. Therefore,
2684 * whatever happens, we're fine. Yay!
2687 r = sd_bus_message_new_method_call(session->context->sysbus,
2689 "org.freedesktop.login1",
2691 "org.freedesktop.login1.Session",
2692 "PauseDeviceComplete");
2694 r = sd_bus_message_append(m, "uu", major, minor);
2696 r = sd_bus_send(session->context->sysbus, m, NULL);
2700 log_debug("grdrm: %s/%s: cannot send PauseDeviceComplete: %s",
2701 session->name, cm->card.base.name, strerror(-r));
2707 static int managed_card_resume_device_fn(sd_bus *bus,
2708 sd_bus_message *signal,
2710 sd_bus_error *ret_error) {
2711 managed_card *cm = userdata;
2712 grdev_session *session = cm->card.base.session;
2713 uint32_t major, minor;
2717 * We get ResumeDevice signals whenever logind resumed a previously
2718 * paused device. The arguments contain the major/minor number of the
2719 * related device and a new file-descriptor for the freshly opened
2721 * If the signal is not about our device, we simply ignore it.
2722 * Otherwise, we immediately resume the device. Note that we drop the
2723 * new file-descriptor as we already have one from TakeDevice(). logind
2724 * preserves the file-context across pause/resume for DRM but only
2725 * drops/acquires DRM-Master accordingly. This way, our context (like
2726 * DRM-FBs and BOs) is preserved.
2729 r = sd_bus_message_read(signal, "uuh", &major, &minor, &fd);
2731 log_debug("grdrm: %s/%s: erroneous ResumeDevice signal",
2732 session->name, cm->card.base.name);
2736 /* not our device? */
2737 if (makedev(major, minor) != cm->devnum)
2740 if (cm->card.fd < 0) {
2741 /* This shouldn't happen. We should already own an FD from
2742 * TakeDevice(). However, lets be safe and use this FD in case
2743 * we really don't have one. There is no harm in doing this
2744 * and our code works fine this way. */
2745 fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
2747 log_debug("grdrm: %s/%s: cannot duplicate fd: %m",
2748 session->name, cm->card.base.name);
2752 r = grdrm_card_open(&cm->card, fd);
2754 log_debug("grdrm: %s/%s: cannot open: %s",
2755 session->name, cm->card.base.name, strerror(-r));
2761 if (cm->card.base.enabled)
2762 grdrm_card_enable(&cm->card);
2767 static int managed_card_setup_bus(managed_card *cm) {
2768 grdev_session *session = cm->card.base.session;
2769 _cleanup_free_ char *match = NULL;
2772 match = strjoin("type='signal',"
2773 "sender='org.freedesktop.login1',"
2774 "interface='org.freedesktop.login1.Session',"
2775 "member='PauseDevice',"
2776 "path='", session->path, "'",
2781 r = sd_bus_add_match(session->context->sysbus,
2782 &cm->slot_pause_device,
2784 managed_card_pause_device_fn,
2790 match = strjoin("type='signal',"
2791 "sender='org.freedesktop.login1',"
2792 "interface='org.freedesktop.login1.Session',"
2793 "member='ResumeDevice',"
2794 "path='", session->path, "'",
2799 r = sd_bus_add_match(session->context->sysbus,
2800 &cm->slot_resume_device,
2802 managed_card_resume_device_fn,
2810 static int managed_card_take_device_fn(sd_bus *bus,
2811 sd_bus_message *reply,
2813 sd_bus_error *ret_error) {
2814 managed_card *cm = userdata;
2815 grdev_session *session = cm->card.base.session;
2818 cm->slot_take_device = sd_bus_slot_unref(cm->slot_take_device);
2820 if (sd_bus_message_is_method_error(reply, NULL)) {
2821 const sd_bus_error *error = sd_bus_message_get_error(reply);
2823 log_debug("grdrm: %s/%s: TakeDevice failed: %s: %s",
2824 session->name, cm->card.base.name, error->name, error->message);
2828 cm->acquired = true;
2830 r = sd_bus_message_read(reply, "hb", &fd, &paused);
2832 log_debug("grdrm: %s/%s: erroneous TakeDevice reply",
2833 session->name, cm->card.base.name);
2837 fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
2839 log_debug("grdrm: %s/%s: cannot duplicate fd: %m",
2840 session->name, cm->card.base.name);
2844 r = grdrm_card_open(&cm->card, fd);
2846 log_debug("grdrm: %s/%s: cannot open: %s",
2847 session->name, cm->card.base.name, strerror(-r));
2851 if (!paused && cm->card.base.enabled)
2852 grdrm_card_enable(&cm->card);
2857 static void managed_card_take_device(managed_card *cm) {
2858 _cleanup_bus_message_unref_ sd_bus_message *m = NULL;
2859 grdev_session *session = cm->card.base.session;
2862 r = sd_bus_message_new_method_call(session->context->sysbus,
2864 "org.freedesktop.login1",
2866 "org.freedesktop.login1.Session",
2871 r = sd_bus_message_append(m, "uu", major(cm->devnum), minor(cm->devnum));
2875 r = sd_bus_call_async(session->context->sysbus,
2876 &cm->slot_take_device,
2878 managed_card_take_device_fn,
2884 cm->requested = true;
2888 log_debug("grdrm: %s/%s: cannot send TakeDevice request: %s",
2889 session->name, cm->card.base.name, strerror(-r));
2892 static void managed_card_release_device(managed_card *cm) {
2893 _cleanup_bus_message_unref_ sd_bus_message *m = NULL;
2894 grdev_session *session = cm->card.base.session;
2898 * If TakeDevice() is pending or was successful, make sure to
2899 * release the device again. We don't care for return-values,
2900 * so send it without waiting or callbacks.
2901 * If a failed TakeDevice() is pending, but someone else took
2902 * the device on the same bus-connection, we might incorrectly
2903 * release their device. This is an unlikely race, though.
2904 * Furthermore, you really shouldn't have two users of the
2905 * controller-API on the same session, on the same devices, *AND* on
2906 * the same bus-connection. So we don't care for that race..
2909 grdrm_card_close(&cm->card);
2910 cm->requested = false;
2912 if (!cm->acquired && !cm->slot_take_device)
2915 cm->slot_take_device = sd_bus_slot_unref(cm->slot_take_device);
2916 cm->acquired = false;
2918 r = sd_bus_message_new_method_call(session->context->sysbus,
2920 "org.freedesktop.login1",
2922 "org.freedesktop.login1.Session",
2925 r = sd_bus_message_append(m, "uu", major(cm->devnum), minor(cm->devnum));
2927 r = sd_bus_send(session->context->sysbus, m, NULL);
2930 if (r < 0 && r != -ENOTCONN)
2931 log_debug("grdrm: %s/%s: cannot send ReleaseDevice: %s",
2932 session->name, cm->card.base.name, strerror(-r));
2935 static int managed_card_new(grdev_card **out, grdev_session *session, struct udev_device *ud) {
2936 _cleanup_(grdev_card_freep) grdev_card *basecard = NULL;
2937 char name[GRDRM_CARD_NAME_MAX];
2942 assert_return(session, -EINVAL);
2943 assert_return(session->managed, -EINVAL);
2944 assert_return(session->context->sysbus, -EINVAL);
2945 assert_return(ud, -EINVAL);
2947 devnum = udev_device_get_devnum(ud);
2951 grdrm_name(name, devnum);
2953 cm = new0(managed_card, 1);
2957 basecard = &cm->card.base;
2958 cm->card = GRDRM_CARD_INIT(&managed_card_vtable, session);
2959 cm->devnum = devnum;
2961 r = managed_card_setup_bus(cm);
2965 r = grdrm_card_add(&cm->card, name);
2969 managed_card_take_device(cm);
2977 static void managed_card_free(grdev_card *basecard) {
2978 managed_card *cm = managed_card_from_base(basecard);
2980 assert(!basecard->enabled);
2982 managed_card_release_device(cm);
2983 cm->slot_resume_device = sd_bus_slot_unref(cm->slot_resume_device);
2984 cm->slot_pause_device = sd_bus_slot_unref(cm->slot_pause_device);
2985 grdrm_card_destroy(&cm->card);
2989 static const grdev_card_vtable managed_card_vtable = {
2990 .free = managed_card_free,
2991 .enable = managed_card_enable,
2992 .disable = managed_card_disable,
2993 .commit = grdrm_card_commit,
2994 .restore = grdrm_card_restore,
2998 * Generic Constructor
2999 * Instead of relying on the caller to choose between managed and unmanaged
3000 * DRM devices, the grdev_drm_new() constructor does that for you (by
3001 * looking at session->managed).
3004 bool grdev_is_drm_card(grdev_card *basecard) {
3005 return basecard && (basecard->vtable == &unmanaged_card_vtable ||
3006 basecard->vtable == &managed_card_vtable);
3009 grdev_card *grdev_find_drm_card(grdev_session *session, dev_t devnum) {
3010 char name[GRDRM_CARD_NAME_MAX];
3012 assert_return(session, NULL);
3013 assert_return(devnum != 0, NULL);
3015 grdrm_name(name, devnum);
3016 return grdev_find_card(session, name);
3019 int grdev_drm_card_new(grdev_card **out, grdev_session *session, struct udev_device *ud) {
3020 assert_return(session, -EINVAL);
3021 assert_return(ud, -EINVAL);
3023 return session->managed ? managed_card_new(out, session, ud) : unmanaged_card_new(out, session, ud);
3026 void grdev_drm_card_hotplug(grdev_card *basecard, struct udev_device *ud) {
3027 const char *p, *action;
3032 assert(grdev_is_drm_card(basecard));
3035 card = grdrm_card_from_base(basecard);
3037 action = udev_device_get_action(ud);
3038 if (!action || streq(action, "add") || streq(action, "remove")) {
3039 /* If we get add/remove events on DRM nodes without devnum, we
3040 * got hotplugged DRM objects so refresh the device. */
3041 devnum = udev_device_get_devnum(ud);
3043 card->hotplug = true;
3044 grdrm_card_hotplug(card);
3046 } else if (streq_ptr(action, "change")) {
3047 /* A change event with HOTPLUG=1 is sent whenever a connector
3048 * changed state. Refresh the device to update our state. */
3049 p = udev_device_get_property_value(ud, "HOTPLUG");
3050 if (streq_ptr(p, "1")) {
3051 card->hotplug = true;
3052 grdrm_card_hotplug(card);