backend-drm: extract device from backend

Extract the kms device from the backend to allow a better separation of the
backend and the kms device. This will allow to handle multiple kms devices with
a single drm backend.

Signed-off-by: Michael Tretter <m.tretter@pengutronix.de>
dev
Michael Tretter 3 years ago committed by Daniel Stone
parent 2860933ded
commit 0d967bd7f4
  1. 11
      libweston/backend-drm/drm-gbm.c
  2. 70
      libweston/backend-drm/drm-internal.h
  3. 9
      libweston/backend-drm/drm-virtual.c
  4. 298
      libweston/backend-drm/drm.c
  5. 39
      libweston/backend-drm/fb.c
  6. 5
      libweston/backend-drm/kms-color.c
  7. 140
      libweston/backend-drm/kms.c
  8. 24
      libweston/backend-drm/modes.c
  9. 8
      libweston/backend-drm/state-helpers.c
  10. 50
      libweston/backend-drm/state-propose.c

@ -118,8 +118,9 @@ drm_backend_create_gl_renderer(struct drm_backend *b)
int
init_egl(struct drm_backend *b)
{
b->gbm = create_gbm_device(b->drm.fd);
struct drm_device *device = b->drm;
b->gbm = create_gbm_device(device->drm.fd);
if (!b->gbm)
return -1;
@ -145,6 +146,7 @@ static void drm_output_fini_cursor_egl(struct drm_output *output)
static int
drm_output_init_cursor_egl(struct drm_output *output, struct drm_backend *b)
{
struct drm_device *device = b->drm;
unsigned int i;
/* No point creating cursors if we don't have a plane for them. */
@ -154,7 +156,7 @@ drm_output_init_cursor_egl(struct drm_output *output, struct drm_backend *b)
for (i = 0; i < ARRAY_LENGTH(output->gbm_cursor_fb); i++) {
struct gbm_bo *bo;
bo = gbm_bo_create(b->gbm, b->cursor_width, b->cursor_height,
bo = gbm_bo_create(b->gbm, device->cursor_width, device->cursor_height,
GBM_FORMAT_ARGB8888,
GBM_BO_USE_CURSOR | GBM_BO_USE_WRITE);
if (!bo)
@ -173,7 +175,7 @@ drm_output_init_cursor_egl(struct drm_output *output, struct drm_backend *b)
err:
weston_log("cursor buffers unavailable, using gl cursors\n");
b->cursors_are_broken = true;
device->cursors_are_broken = true;
drm_output_fini_cursor_egl(output);
return -1;
}
@ -317,6 +319,7 @@ drm_output_render_gl(struct drm_output_state *state, pixman_region32_t *damage)
static void
switch_to_gl_renderer(struct drm_backend *b)
{
struct drm_device *device = b->drm;
struct drm_output *output;
bool dmabuf_support_inited;
bool linux_explicit_sync_inited;
@ -330,7 +333,7 @@ switch_to_gl_renderer(struct drm_backend *b)
weston_log("Switching to GL renderer\n");
b->gbm = create_gbm_device(b->drm.fd);
b->gbm = create_gbm_device(device->drm.fd);
if (!b->gbm) {
weston_log("Failed to create gbm device. "
"Aborting renderer switch\n");

@ -259,15 +259,8 @@ enum actions_needed_dmabuf_feedback {
ACTION_NEEDED_REMOVE_SCANOUT_TRANCHE = (1 << 1),
};
struct drm_backend {
struct weston_backend base;
struct weston_compositor *compositor;
struct udev *udev;
struct wl_event_source *drm_source;
struct udev_monitor *udev_monitor;
struct wl_event_source *udev_drm_source;
struct drm_device {
struct drm_backend *backend;
struct {
int id;
@ -275,50 +268,63 @@ struct drm_backend {
char *filename;
dev_t devnum;
} drm;
struct gbm_device *gbm;
struct wl_listener session_listener;
uint32_t gbm_format;
/* we need these parameters in order to not fail drmModeAddFB2()
* due to out of bounds dimensions, and then mistakenly set
* sprites_are_broken:
*/
int min_width, max_width;
int min_height, max_height;
/* drm_crtc::link */
struct wl_list crtc_list;
struct wl_list plane_list;
void *repaint_data;
/* drm_writeback::link */
struct wl_list writeback_connector_list;
bool state_invalid;
/* drm_crtc::link */
struct wl_list crtc_list;
bool atomic_modeset;
/* drm_writeback::link */
struct wl_list writeback_connector_list;
bool aspect_ratio_supported;
int32_t cursor_width;
int32_t cursor_height;
bool sprites_are_broken;
bool cursors_are_broken;
bool sprites_are_broken;
bool atomic_modeset;
void *repaint_data;
bool fb_modifiers;
/* we need these parameters in order to not fail drmModeAddFB2()
* due to out of bounds dimensions, and then mistakenly set
* sprites_are_broken:
*/
int min_width, max_width;
int min_height, max_height;
};
struct drm_backend {
struct weston_backend base;
struct weston_compositor *compositor;
struct udev *udev;
struct wl_event_source *drm_source;
struct udev_monitor *udev_monitor;
struct wl_event_source *udev_drm_source;
struct drm_device *drm;
struct gbm_device *gbm;
struct wl_listener session_listener;
uint32_t gbm_format;
bool use_pixman;
bool use_pixman_shadow;
struct udev_input input;
int32_t cursor_width;
int32_t cursor_height;
uint32_t pageflip_timeout;
bool shutting_down;
bool aspect_ratio_supported;
bool fb_modifiers;
struct weston_log_scope *debug;
};

@ -92,6 +92,7 @@ drm_virtual_crtc_destroy(struct drm_crtc *crtc)
static struct drm_plane *
drm_virtual_plane_create(struct drm_backend *b, struct drm_output *output)
{
struct drm_device *device = b->drm;
struct drm_plane *plane;
struct weston_drm_format *fmt;
uint64_t mod;
@ -115,7 +116,7 @@ drm_virtual_plane_create(struct drm_backend *b, struct drm_output *output)
/* If output supports linear modifier, we add it to the plane.
* Otherwise we add DRM_FORMAT_MOD_INVALID, as explicit modifiers
* are not supported. */
if ((output->gbm_bo_flags & GBM_BO_USE_LINEAR) && b->fb_modifiers)
if ((output->gbm_bo_flags & GBM_BO_USE_LINEAR) && device->fb_modifiers)
mod = DRM_FORMAT_MOD_LINEAR;
else
mod = DRM_FORMAT_MOD_INVALID;
@ -124,7 +125,7 @@ drm_virtual_plane_create(struct drm_backend *b, struct drm_output *output)
goto err;
weston_plane_init(&plane->base, b->compositor, 0, 0);
wl_list_insert(&b->plane_list, &plane->link);
wl_list_insert(&device->plane_list, &plane->link);
return plane;
@ -192,11 +193,13 @@ drm_virtual_output_repaint(struct weston_output *output_base,
struct drm_plane_state *scanout_state;
struct drm_pending_state *pending_state;
struct drm_backend *backend;
struct drm_device *device;
assert(output->virtual);
backend = output->backend;
pending_state = backend->repaint_data;
device = backend->drm;
pending_state = device->repaint_data;
if (output->disable_pending || output->destroy_pending)
goto err;

File diff suppressed because it is too large Load Diff

@ -71,13 +71,14 @@ drm_fb_destroy_dumb(struct drm_fb *fb)
static int
drm_fb_addfb(struct drm_backend *b, struct drm_fb *fb)
{
struct drm_device *device = b->drm;
int ret = -EINVAL;
uint64_t mods[4] = { };
size_t i;
/* If we have a modifier set, we must only use the WithModifiers
* entrypoint; we cannot import it through legacy ioctls. */
if (b->fb_modifiers && fb->modifier != DRM_FORMAT_MOD_INVALID) {
if (device->fb_modifiers && fb->modifier != DRM_FORMAT_MOD_INVALID) {
/* KMS demands that if a modifier is set, it must be the same
* for all planes. */
for (i = 0; i < ARRAY_LENGTH(mods) && fb->handles[i]; i++)
@ -115,6 +116,7 @@ struct drm_fb *
drm_fb_create_dumb(struct drm_backend *b, int width, int height,
uint32_t format)
{
struct drm_device *device = b->drm;
struct drm_fb *fb;
int ret;
@ -145,7 +147,7 @@ drm_fb_create_dumb(struct drm_backend *b, int width, int height,
create_arg.width = width;
create_arg.height = height;
ret = drmIoctl(b->drm.fd, DRM_IOCTL_MODE_CREATE_DUMB, &create_arg);
ret = drmIoctl(device->drm.fd, DRM_IOCTL_MODE_CREATE_DUMB, &create_arg);
if (ret)
goto err_fb;
@ -157,7 +159,7 @@ drm_fb_create_dumb(struct drm_backend *b, int width, int height,
fb->size = create_arg.size;
fb->width = width;
fb->height = height;
fb->fd = b->drm.fd;
fb->fd = device->drm.fd;
if (drm_fb_addfb(b, fb) != 0) {
weston_log("failed to create kms fb: %s\n", strerror(errno));
@ -171,18 +173,18 @@ drm_fb_create_dumb(struct drm_backend *b, int width, int height,
goto err_add_fb;
fb->map = mmap(NULL, fb->size, PROT_WRITE,
MAP_SHARED, b->drm.fd, map_arg.offset);
MAP_SHARED, device->drm.fd, map_arg.offset);
if (fb->map == MAP_FAILED)
goto err_add_fb;
return fb;
err_add_fb:
drmModeRmFB(b->drm.fd, fb->fb_id);
drmModeRmFB(device->drm.fd, fb->fb_id);
err_bo:
memset(&destroy_arg, 0, sizeof(destroy_arg));
destroy_arg.handle = create_arg.handle;
drmIoctl(b->drm.fd, DRM_IOCTL_MODE_DESTROY_DUMB, &destroy_arg);
drmIoctl(device->drm.fd, DRM_IOCTL_MODE_DESTROY_DUMB, &destroy_arg);
err_fb:
free(fb);
return NULL;
@ -227,6 +229,7 @@ drm_fb_get_from_dmabuf(struct linux_dmabuf_buffer *dmabuf,
* of GBM_BO_IMPORT_FD_MODIFIER. */
return NULL;
#else
struct drm_device *device = backend->drm;
struct drm_fb *fb;
int i;
struct gbm_import_fd_modifier_data import_mod = {
@ -287,7 +290,7 @@ drm_fb_get_from_dmabuf(struct linux_dmabuf_buffer *dmabuf,
fb->height = dmabuf->attributes.height;
fb->modifier = dmabuf->attributes.modifier[0];
fb->size = 0;
fb->fd = backend->drm.fd;
fb->fd = device->drm.fd;
ARRAY_COPY(fb->strides, dmabuf->attributes.stride);
ARRAY_COPY(fb->offsets, dmabuf->attributes.offset);
@ -302,10 +305,10 @@ drm_fb_get_from_dmabuf(struct linux_dmabuf_buffer *dmabuf,
if (is_opaque)
fb->format = pixel_format_get_opaque_substitute(fb->format);
if (backend->min_width > fb->width ||
fb->width > backend->max_width ||
backend->min_height > fb->height ||
fb->height > backend->max_height) {
if (device->min_width > fb->width ||
fb->width > device->max_width ||
device->min_height > fb->height ||
fb->height > device->max_height) {
weston_log("bo geometry out of bounds\n");
goto err_free;
}
@ -342,6 +345,7 @@ struct drm_fb *
drm_fb_get_from_bo(struct gbm_bo *bo, struct drm_backend *backend,
bool is_opaque, enum drm_fb_type type)
{
struct drm_device *device = backend->drm;
struct drm_fb *fb = gbm_bo_get_user_data(bo);
#ifdef HAVE_GBM_MODIFIERS
int i;
@ -359,7 +363,7 @@ drm_fb_get_from_bo(struct gbm_bo *bo, struct drm_backend *backend,
fb->type = type;
fb->refcnt = 1;
fb->bo = bo;
fb->fd = backend->drm.fd;
fb->fd = device->drm.fd;
fb->width = gbm_bo_get_width(bo);
fb->height = gbm_bo_get_height(bo);
@ -392,10 +396,10 @@ drm_fb_get_from_bo(struct gbm_bo *bo, struct drm_backend *backend,
if (is_opaque)
fb->format = pixel_format_get_opaque_substitute(fb->format);
if (backend->min_width > fb->width ||
fb->width > backend->max_width ||
backend->min_height > fb->height ||
fb->height > backend->max_height) {
if (device->min_width > fb->width ||
fb->width > device->max_width ||
device->min_height > fb->height ||
fb->height > device->max_height) {
weston_log("bo geometry out of bounds\n");
goto err_free;
}
@ -529,6 +533,7 @@ drm_fb_get_from_view(struct drm_output_state *state, struct weston_view *ev,
{
struct drm_output *output = state->output;
struct drm_backend *b = to_drm_backend(output->base.compositor);
struct drm_device *device = b->drm;
struct weston_buffer *buffer = ev->surface->buffer_ref.buffer;
struct drm_buffer_fb *buf_fb;
bool is_opaque = weston_view_is_opaque(ev, &ev->transform.boundingbox);
@ -603,7 +608,7 @@ drm_fb_get_from_view(struct drm_output_state *state, struct weston_view *ev,
/* Check if this buffer can ever go on any planes. If it can't, we have
* no reason to ever have a drm_fb, so we fail it here. */
wl_list_for_each(plane, &b->plane_list, link) {
wl_list_for_each(plane, &device->plane_list, link) {
/* only SHM buffers can go into cursor planes */
if (plane->type == WDRM_PLANE_TYPE_CURSOR)
continue;

@ -108,6 +108,7 @@ weston_hdr_metadata_type1_to_kms(struct hdr_metadata_infoframe *dst,
int
drm_output_ensure_hdr_output_metadata_blob(struct drm_output *output)
{
struct drm_device *device = output->backend->drm;
const struct weston_hdr_metadata_type1 *src;
struct hdr_output_metadata meta;
uint32_t blob_id = 0;
@ -160,7 +161,7 @@ drm_output_ensure_hdr_output_metadata_blob(struct drm_output *output)
return -1;
}
ret = drmModeCreatePropertyBlob(output->backend->drm.fd,
ret = drmModeCreatePropertyBlob(device->drm.fd,
&meta, sizeof meta, &blob_id);
if (ret != 0) {
weston_log("Error: failed to create KMS blob for HDR metadata on output '%s': %s\n",
@ -168,7 +169,7 @@ drm_output_ensure_hdr_output_metadata_blob(struct drm_output *output)
return -1;
}
drmModeDestroyPropertyBlob(output->backend->drm.fd,
drmModeDestroyPropertyBlob(device->drm.fd,
output->hdr_output_metadata_blob_id);
output->hdr_output_metadata_blob_id = blob_id;

@ -288,6 +288,7 @@ drm_property_info_populate(struct drm_backend *b,
unsigned int num_infos,
drmModeObjectProperties *props)
{
struct drm_device *device = b->drm;
drmModePropertyRes *prop;
unsigned i, j;
@ -314,7 +315,7 @@ drm_property_info_populate(struct drm_backend *b,
for (i = 0; i < props->count_props; i++) {
unsigned int k;
prop = drmModeGetProperty(b->drm.fd, props->props[i]);
prop = drmModeGetProperty(device->drm.fd, props->props[i]);
if (!prop)
continue;
@ -436,6 +437,8 @@ drm_plane_populate_formats(struct drm_plane *plane, const drmModePlane *kplane,
const drmModeObjectProperties *props,
const bool use_modifiers)
{
struct drm_backend *backend = plane->backend;
struct drm_device *device = backend->drm;
unsigned i, j;
drmModePropertyBlobRes *blob = NULL;
struct drm_format_modifier_blob *fmt_mod_blob;
@ -454,7 +457,7 @@ drm_plane_populate_formats(struct drm_plane *plane, const drmModePlane *kplane,
if (blob_id == 0)
goto fallback;
blob = drmModeGetPropertyBlob(plane->backend->drm.fd, blob_id);
blob = drmModeGetPropertyBlob(device->drm.fd, blob_id);
if (!blob)
goto fallback;
@ -515,12 +518,13 @@ drm_output_set_gamma(struct weston_output *output_base,
struct drm_output *output = to_drm_output(output_base);
struct drm_backend *backend =
to_drm_backend(output->base.compositor);
struct drm_device *device = backend->drm;
/* check */
if (output_base->gamma_size != size)
return;
rc = drmModeCrtcSetGamma(backend->drm.fd,
rc = drmModeCrtcSetGamma(device->drm.fd,
output->crtc->crtc_id,
size, r, g, b);
if (rc)
@ -539,6 +543,7 @@ drm_output_assign_state(struct drm_output_state *state,
{
struct drm_output *output = state->output;
struct drm_backend *b = to_drm_backend(output->base.compositor);
struct drm_device *device = b->drm;
struct drm_plane_state *plane_state;
struct drm_head *head;
@ -555,13 +560,13 @@ drm_output_assign_state(struct drm_output_state *state,
output->state_cur = state;
if (b->atomic_modeset && mode == DRM_STATE_APPLY_ASYNC) {
if (device->atomic_modeset && mode == DRM_STATE_APPLY_ASYNC) {
drm_debug(b, "\t[CRTC:%u] setting pending flip\n",
output->crtc->crtc_id);
output->atomic_complete_pending = true;
}
if (b->atomic_modeset &&
if (device->atomic_modeset &&
state->protection == WESTON_HDCP_DISABLE)
wl_list_for_each(head, &output->base.head_list, base.output_link)
weston_head_set_content_protection_status(&head->base,
@ -583,7 +588,7 @@ drm_output_assign_state(struct drm_output_state *state,
continue;
}
if (b->atomic_modeset)
if (device->atomic_modeset)
continue;
assert(plane->type != WDRM_PLANE_TYPE_OVERLAY);
@ -597,6 +602,7 @@ drm_output_set_cursor(struct drm_output_state *output_state)
{
struct drm_output *output = output_state->output;
struct drm_backend *b = to_drm_backend(output->base.compositor);
struct drm_device *device = b->drm;
struct drm_crtc *crtc = output->crtc;
struct drm_plane *plane = output->cursor_plane;
struct drm_plane_state *state;
@ -612,7 +618,7 @@ drm_output_set_cursor(struct drm_output_state *output_state)
if (!state->fb) {
pixman_region32_fini(&plane->base.damage);
pixman_region32_init(&plane->base.damage);
drmModeSetCursor(b->drm.fd, crtc->crtc_id, 0, 0, 0);
drmModeSetCursor(device->drm.fd, crtc->crtc_id, 0, 0, 0);
return;
}
@ -621,8 +627,8 @@ drm_output_set_cursor(struct drm_output_state *output_state)
handle = output->gbm_cursor_handle[output->current_cursor];
if (plane->state_cur->fb != state->fb) {
if (drmModeSetCursor(b->drm.fd, crtc->crtc_id, handle,
b->cursor_width, b->cursor_height)) {
if (drmModeSetCursor(device->drm.fd, crtc->crtc_id, handle,
device->cursor_width, device->cursor_height)) {
weston_log("failed to set cursor: %s\n",
strerror(errno));
goto err;
@ -632,7 +638,7 @@ drm_output_set_cursor(struct drm_output_state *output_state)
pixman_region32_fini(&plane->base.damage);
pixman_region32_init(&plane->base.damage);
if (drmModeMoveCursor(b->drm.fd, crtc->crtc_id,
if (drmModeMoveCursor(device->drm.fd, crtc->crtc_id,
state->dest_x, state->dest_y)) {
weston_log("failed to move cursor: %s\n", strerror(errno));
goto err;
@ -641,8 +647,8 @@ drm_output_set_cursor(struct drm_output_state *output_state)
return;
err:
b->cursors_are_broken = true;
drmModeSetCursor(b->drm.fd, crtc->crtc_id, 0, 0, 0);
device->cursors_are_broken = true;
drmModeSetCursor(device->drm.fd, crtc->crtc_id, 0, 0, 0);
}
static int
@ -650,6 +656,7 @@ drm_output_apply_state_legacy(struct drm_output_state *state)
{
struct drm_output *output = state->output;
struct drm_backend *backend = to_drm_backend(output->base.compositor);
struct drm_device *device = backend->drm;
struct drm_plane *scanout_plane = output->scanout_plane;
struct drm_crtc *crtc = output->crtc;
struct drm_property_info *dpms_prop;
@ -681,14 +688,14 @@ drm_output_apply_state_legacy(struct drm_output_state *state)
if (state->dpms != WESTON_DPMS_ON) {
if (output->cursor_plane) {
ret = drmModeSetCursor(backend->drm.fd, crtc->crtc_id,
ret = drmModeSetCursor(device->drm.fd, crtc->crtc_id,
0, 0, 0);
if (ret)
weston_log("drmModeSetCursor failed disable: %s\n",
strerror(errno));
}
ret = drmModeSetCrtc(backend->drm.fd, crtc->crtc_id, 0, 0, 0,
ret = drmModeSetCrtc(device->drm.fd, crtc->crtc_id, 0, 0, 0,
NULL, 0, NULL);
if (ret)
weston_log("drmModeSetCrtc failed disabling: %s\n",
@ -722,12 +729,12 @@ drm_output_apply_state_legacy(struct drm_output_state *state)
assert(scanout_state->in_fence_fd == -1);
mode = to_drm_mode(output->base.current_mode);
if (backend->state_invalid ||
if (device->state_invalid ||
!scanout_plane->state_cur->fb ||
scanout_plane->state_cur->fb->strides[0] !=
scanout_state->fb->strides[0]) {
ret = drmModeSetCrtc(backend->drm.fd, crtc->crtc_id,
ret = drmModeSetCrtc(device->drm.fd, crtc->crtc_id,
scanout_state->fb->fb_id,
0, 0,
connectors, n_conn,
@ -743,7 +750,7 @@ drm_output_apply_state_legacy(struct drm_output_state *state)
crtc->crtc_id, scanout_state->plane->plane_id,
pinfo ? pinfo->drm_format_name : "UNKNOWN");
if (drmModePageFlip(backend->drm.fd, crtc->crtc_id,
if (drmModePageFlip(device->drm.fd, crtc->crtc_id,
scanout_state->fb->fb_id,
DRM_MODE_PAGE_FLIP_EVENT, output) < 0) {
weston_log("queueing pageflip failed: %s\n", strerror(errno));
@ -764,7 +771,7 @@ drm_output_apply_state_legacy(struct drm_output_state *state)
if (dpms_prop->prop_id == 0)
continue;
ret = drmModeConnectorSetProperty(backend->drm.fd,
ret = drmModeConnectorSetProperty(device->drm.fd,
head->connector.connector_id,
dpms_prop->prop_id,
state->dpms);
@ -1059,6 +1066,7 @@ drm_pending_state_apply_atomic(struct drm_pending_state *pending_state,
enum drm_state_apply_mode mode)
{
struct drm_backend *b = pending_state->backend;
struct drm_device *device = b->drm;
struct drm_output_state *output_state, *tmp;
struct drm_plane *plane;
drmModeAtomicReq *req = drmModeAtomicAlloc();
@ -1080,7 +1088,7 @@ drm_pending_state_apply_atomic(struct drm_pending_state *pending_state,
break;
}
if (b->state_invalid) {
if (device->state_invalid) {
struct weston_head *head_base;
struct drm_head *head;
struct drm_crtc *crtc;
@ -1117,7 +1125,7 @@ drm_pending_state_apply_atomic(struct drm_pending_state *pending_state,
ret = -1;
}
wl_list_for_each(crtc, &b->crtc_list, link) {
wl_list_for_each(crtc, &device->crtc_list, link) {
struct drm_property_info *info;
drmModeObjectProperties *props;
uint64_t active;
@ -1130,7 +1138,7 @@ drm_pending_state_apply_atomic(struct drm_pending_state *pending_state,
* off, as the kernel will refuse to generate an event
* for an off->off state and fail the commit.
*/
props = drmModeObjectGetProperties(b->drm.fd,
props = drmModeObjectGetProperties(device->drm.fd,
crtc->crtc_id,
DRM_MODE_OBJECT_CRTC);
if (!props) {
@ -1153,7 +1161,7 @@ drm_pending_state_apply_atomic(struct drm_pending_state *pending_state,
/* Disable all the planes; planes which are being used will
* override this state in the output-state application. */
wl_list_for_each(plane, &b->plane_list, link) {
wl_list_for_each(plane, &device->plane_list, link) {
drm_debug(b, "\t\t[atomic] starting with plane %lu disabled\n",
(unsigned long) plane->plane_id);
plane_add_prop(req, plane, WDRM_PLANE_CRTC_ID, 0);
@ -1176,7 +1184,7 @@ drm_pending_state_apply_atomic(struct drm_pending_state *pending_state,
goto out;
}
ret = drmModeAtomicCommit(b->drm.fd, req, flags, b);
ret = drmModeAtomicCommit(device->drm.fd, req, flags, b);
drm_debug(b, "[atomic] drmModeAtomicCommit\n");
/* Test commits do not take ownership of the state; return
@ -1196,7 +1204,7 @@ drm_pending_state_apply_atomic(struct drm_pending_state *pending_state,
link)
drm_output_assign_state(output_state, mode);
b->state_invalid = false;
device->state_invalid = false;
assert(wl_list_empty(&pending_state->output_list));
@ -1228,8 +1236,9 @@ int
drm_pending_state_test(struct drm_pending_state *pending_state)
{
struct drm_backend *b = pending_state->backend;
struct drm_device *device = b->drm;
if (b->atomic_modeset)
if (device->atomic_modeset)
return drm_pending_state_apply_atomic(pending_state,
DRM_STATE_TEST_ONLY);
@ -1249,23 +1258,24 @@ int
drm_pending_state_apply(struct drm_pending_state *pending_state)
{
struct drm_backend *b = pending_state->backend;
struct drm_device *device = b->drm;
struct drm_output_state *output_state, *tmp;
struct drm_crtc *crtc;
if (b->atomic_modeset)
if (device->atomic_modeset)
return drm_pending_state_apply_atomic(pending_state,
DRM_STATE_APPLY_ASYNC);
if (b->state_invalid) {
if (device->state_invalid) {
/* If we need to reset all our state (e.g. because we've
* just started, or just been VT-switched in), explicitly
* disable all the CRTCs we aren't using. This also disables
* all connectors on these CRTCs, so we don't need to do that
* separately with the pre-atomic API. */
wl_list_for_each(crtc, &b->crtc_list, link) {
wl_list_for_each(crtc, &device->crtc_list, link) {
if (crtc->output)
continue;
drmModeSetCrtc(b->drm.fd, crtc->crtc_id, 0, 0, 0,
drmModeSetCrtc(device->drm.fd, crtc->crtc_id, 0, 0, 0,
NULL, 0, NULL);
}
}
@ -1288,7 +1298,7 @@ drm_pending_state_apply(struct drm_pending_state *pending_state)
weston_output_repaint_failed(&output->base);
drm_output_state_free(output->state_cur);
output->state_cur = drm_output_state_alloc(output, NULL);
b->state_invalid = true;
device->state_invalid = true;
if (!b->use_pixman) {
drm_output_fini_egl(output);
drm_output_init_egl(output, b);
@ -1296,7 +1306,7 @@ drm_pending_state_apply(struct drm_pending_state *pending_state)
}
}
b->state_invalid = false;
device->state_invalid = false;
assert(wl_list_empty(&pending_state->output_list));
@ -1315,24 +1325,25 @@ drm_pending_state_apply(struct drm_pending_state *pending_state)
int
drm_pending_state_apply_sync(struct drm_pending_state *pending_state)
{
struct drm_backend *b = pending_state->backend;
struct drm_backend *backend = pending_state->backend;
struct drm_device *device = backend->drm;
struct drm_output_state *output_state, *tmp;
struct drm_crtc *crtc;
if (b->atomic_modeset)
if (device->atomic_modeset)
return drm_pending_state_apply_atomic(pending_state,
DRM_STATE_APPLY_SYNC);
if (b->state_invalid) {
if (device->state_invalid) {
/* If we need to reset all our state (e.g. because we've
* just started, or just been VT-switched in), explicitly
* disable all the CRTCs we aren't using. This also disables
* all connectors on these CRTCs, so we don't need to do that
* separately with the pre-atomic API. */
wl_list_for_each(crtc, &b->crtc_list, link) {
wl_list_for_each(crtc, &device->crtc_list, link) {
if (crtc->output)
continue;
drmModeSetCrtc(b->drm.fd, crtc->crtc_id, 0, 0, 0,
drmModeSetCrtc(device->drm.fd, crtc->crtc_id, 0, 0, 0,
NULL, 0, NULL);
}
}
@ -1349,7 +1360,7 @@ drm_pending_state_apply_sync(struct drm_pending_state *pending_state)
}
}
b->state_invalid = false;
device->state_invalid = false;
assert(wl_list_empty(&pending_state->output_list));
@ -1375,13 +1386,14 @@ page_flip_handler(int fd, unsigned int frame,
{
struct drm_output *output = data;
struct drm_backend *b = to_drm_backend(output->base.compositor);
struct drm_device *device = b->drm;
uint32_t flags = WP_PRESENTATION_FEEDBACK_KIND_VSYNC |
WP_PRESENTATION_FEEDBACK_KIND_HW_COMPLETION |
WP_PRESENTATION_FEEDBACK_KIND_HW_CLOCK;
drm_output_update_msc(output, frame);
assert(!b->atomic_modeset);
assert(!device->atomic_modeset);
assert(output->page_flip_pending);
output->page_flip_pending = false;
@ -1393,6 +1405,7 @@ atomic_flip_handler(int fd, unsigned int frame, unsigned int sec,
unsigned int usec, unsigned int crtc_id, void *data)
{
struct drm_backend *b = data;
struct drm_device *device = b->drm;
struct drm_crtc *crtc;
struct drm_output *output;
uint32_t flags = WP_PRESENTATION_FEEDBACK_KIND_VSYNC |
@ -1413,7 +1426,7 @@ atomic_flip_handler(int fd, unsigned int frame, unsigned int sec,
drm_output_update_msc(output, frame);
drm_debug(b, "[atomic][CRTC:%u] flip processing started\n", crtc_id);
assert(b->atomic_modeset);
assert(device->atomic_modeset);
assert(output->atomic_complete_pending);
output->atomic_complete_pending = false;
@ -1424,12 +1437,12 @@ atomic_flip_handler(int fd, unsigned int frame, unsigned int sec,
int
on_drm_input(int fd, uint32_t mask, void *data)
{
struct drm_backend *b = data;
struct drm_device *device = data;
drmEventContext evctx;
memset(&evctx, 0, sizeof evctx);
evctx.version = 3;
if (b->atomic_modeset)
if (device->atomic_modeset)
evctx.page_flip_handler2 = atomic_flip_handler;
else
evctx.page_flip_handler = page_flip_handler;
@ -1441,12 +1454,13 @@ on_drm_input(int fd, uint32_t mask, void *data)
int
init_kms_caps(struct drm_backend *b)
{
struct drm_device *device = b->drm;
uint64_t cap;
int ret;
weston_log("using %s\n", b->drm.filename);
weston_log("using %s\n", device->drm.filename);
ret = drmGetCap(b->drm.fd, DRM_CAP_TIMESTAMP_MONOTONIC, &cap);
ret = drmGetCap(device->drm.fd, DRM_CAP_TIMESTAMP_MONOTONIC, &cap);
if (ret != 0 || cap != 1) {
weston_log("Error: kernel DRM KMS does not support DRM_CAP_TIMESTAMP_MONOTONIC.\n");
return -1;
@ -1457,43 +1471,43 @@ init_kms_caps(struct drm_backend *b)
return -1;
}
ret = drmGetCap(b->drm.fd, DRM_CAP_CURSOR_WIDTH, &cap);
ret = drmGetCap(device->drm.fd, DRM_CAP_CURSOR_WIDTH, &cap);
if (ret == 0)
b->cursor_width = cap;
device->cursor_width = cap;
else
b->cursor_width = 64;
device->cursor_width = 64;
ret = drmGetCap(b->drm.fd, DRM_CAP_CURSOR_HEIGHT, &cap);
ret = drmGetCap(device->drm.fd, DRM_CAP_CURSOR_HEIGHT, &cap);
if (ret == 0)
b->cursor_height = cap;
device->cursor_height = cap;
else
b->cursor_height = 64;
device->cursor_height = 64;
ret = drmSetClientCap(b->drm.fd, DRM_CLIENT_CAP_UNIVERSAL_PLANES, 1);
ret = drmSetClientCap(device->drm.fd, DRM_CLIENT_CAP_UNIVERSAL_PLANES, 1);
if (ret) {
weston_log("Error: drm card doesn't support universal planes!\n");
return -1;
}
if (!getenv("WESTON_DISABLE_ATOMIC")) {
ret = drmGetCap(b->drm.fd, DRM_CAP_CRTC_IN_VBLANK_EVENT, &cap);
ret = drmGetCap(device->drm.fd, DRM_CAP_CRTC_IN_VBLANK_EVENT, &cap);
if (ret != 0)
cap = 0;
ret = drmSetClientCap(b->drm.fd, DRM_CLIENT_CAP_ATOMIC, 1);
b->atomic_modeset = ((ret == 0) && (cap == 1));
ret = drmSetClientCap(device->drm.fd, DRM_CLIENT_CAP_ATOMIC, 1);
device->atomic_modeset = ((ret == 0) && (cap == 1));
}
weston_log("DRM: %s atomic modesetting\n",
b->atomic_modeset ? "supports" : "does not support");
device->atomic_modeset ? "supports" : "does not support");
if (!getenv("WESTON_DISABLE_GBM_MODIFIERS")) {
ret = drmGetCap(b->drm.fd, DRM_CAP_ADDFB2_MODIFIERS, &cap);
ret = drmGetCap(device->drm.fd, DRM_CAP_ADDFB2_MODIFIERS, &cap);
if (ret == 0)
b->fb_modifiers = cap;
device->fb_modifiers = cap;
}
weston_log("DRM: %s GBM modifiers\n",
b->fb_modifiers ? "supports" : "does not support");
device->fb_modifiers ? "supports" : "does not support");
drmSetClientCap(b->drm.fd, DRM_CLIENT_CAP_WRITEBACK_CONNECTORS, 1);
drmSetClientCap(device->drm.fd, DRM_CLIENT_CAP_WRITEBACK_CONNECTORS, 1);
/*
* KMS support for hardware planes cannot properly synchronize
@ -1503,13 +1517,13 @@ init_kms_caps(struct drm_backend *b)
* to a fraction. For cursors, it's not so bad, so they are
* enabled.
*/
if (!b->atomic_modeset || getenv("WESTON_FORCE_RENDERER"))
b->sprites_are_broken = true;
if (!device->atomic_modeset || getenv("WESTON_FORCE_RENDERER"))
device->sprites_are_broken = true;
ret = drmSetClientCap(b->drm.fd, DRM_CLIENT_CAP_ASPECT_RATIO, 1);
b->aspect_ratio_supported = (ret == 0);
ret = drmSetClientCap(device->drm.fd, DRM_CLIENT_CAP_ASPECT_RATIO, 1);
device->aspect_ratio_supported = (ret == 0);
weston_log("DRM: %s picture aspect ratio\n",
b->aspect_ratio_supported ? "supports" : "does not support");
device->aspect_ratio_supported ? "supports" : "does not support");
return 0;
}

@ -100,12 +100,13 @@ drm_subpixel_to_wayland(int drm_value)
int
drm_mode_ensure_blob(struct drm_backend *backend, struct drm_mode *mode)
{
struct drm_device *device = backend->drm;
int ret;
if (mode->blob_id)
return 0;
ret = drmModeCreatePropertyBlob(backend->drm.fd,
ret = drmModeCreatePropertyBlob(device->drm.fd,
&mode->mode_info,
sizeof(mode->mode_info),
&mode->blob_id);
@ -320,6 +321,8 @@ find_and_parse_output_edid(struct drm_head *head,
const char **serial_number,
uint32_t *eotf_mask)
{
struct drm_backend *backend = head->backend;
struct drm_device *device = backend->drm;
drmModePropertyBlobPtr edid_blob = NULL;
uint32_t blob_id;
int rc;
@ -331,7 +334,7 @@ find_and_parse_output_edid(struct drm_head *head,
if (!blob_id)
return;
edid_blob = drmModeGetPropertyBlob(head->backend->drm.fd, blob_id);
edid_blob = drmModeGetPropertyBlob(device->drm.fd, blob_id);
if (!edid_blob)
return;
@ -360,7 +363,7 @@ prune_eotf_modes_by_kms_support(struct drm_head *head, uint32_t *eotf_mask)
/* Without the KMS property, cannot do anything but SDR. */
info = &head->connector.props[WDRM_CONNECTOR_HDR_OUTPUT_METADATA];
if (!head->backend->atomic_modeset || info->prop_id == 0)
if (!head->backend->drm->atomic_modeset || info->prop_id == 0)
*eotf_mask = WESTON_EOTF_MODE_SDR;
}
@ -426,8 +429,10 @@ drm_output_add_mode(struct drm_output *output, const drmModeModeInfo *info)
static void
drm_output_destroy_mode(struct drm_backend *backend, struct drm_mode *mode)
{
struct drm_device *device = backend->drm;
if (mode->blob_id)
drmModeDestroyPropertyBlob(backend->drm.fd, mode->blob_id);
drmModeDestroyPropertyBlob(device->drm.fd, mode->blob_id);
wl_list_remove(&mode->base.link);
free(mode);
}
@ -488,15 +493,17 @@ drm_output_choose_mode(struct drm_output *output,
enum weston_mode_aspect_ratio src_aspect = WESTON_MODE_PIC_AR_NONE;
enum weston_mode_aspect_ratio target_aspect = WESTON_MODE_PIC_AR_NONE;
struct drm_backend *b;
struct drm_device *device;
b = to_drm_backend(output->base.compositor);
device = b->drm;
target_aspect = target_mode->aspect_ratio;
src_aspect = output->base.current_mode->aspect_ratio;
if (output->base.current_mode->width == target_mode->width &&
output->base.current_mode->height == target_mode->height &&
(output->base.current_mode->refresh == target_mode->refresh ||
target_mode->refresh == 0)) {
if (!b->aspect_ratio_supported || src_aspect == target_aspect)
if (!device->aspect_ratio_supported || src_aspect == target_aspect)
return to_drm_mode(output->base.current_mode);
}
@ -507,7 +514,7 @@ drm_output_choose_mode(struct drm_output *output,
mode->mode_info.vdisplay == target_mode->height) {
if (mode->base.refresh == target_mode->refresh ||
target_mode->refresh == 0) {
if (!b->aspect_ratio_supported ||
if (!device->aspect_ratio_supported ||
src_aspect == target_aspect)
return mode;
else if (!mode_fall_back)
@ -574,6 +581,7 @@ drm_output_choose_initial_mode(struct drm_backend *backend,
const char *modeline,
const drmModeModeInfo *current_mode)
{
struct drm_device *device = backend->drm;
struct drm_mode *preferred = NULL;
struct drm_mode *current = NULL;
struct drm_mode *configured = NULL;
@ -592,7 +600,7 @@ drm_output_choose_initial_mode(struct drm_backend *backend,
if (mode == WESTON_DRM_BACKEND_OUTPUT_PREFERRED && modeline) {
n = sscanf(modeline, "%dx%d@%d %u:%u", &width, &height,
&refresh, &aspect_width, &aspect_height);
if (backend->aspect_ratio_supported && n == 5) {
if (device->aspect_ratio_supported && n == 5) {
if (aspect_width == 4 && aspect_height == 3)
aspect_ratio = WESTON_MODE_PIC_AR_4_3;
else if (aspect_width == 16 && aspect_height == 9)
@ -623,7 +631,7 @@ drm_output_choose_initial_mode(struct drm_backend *backend,
if (width == drm_mode->base.width &&
height == drm_mode->base.height &&
(refresh == 0 || refresh == drm_mode->mode_info.vrefresh)) {
if (!backend->aspect_ratio_supported ||
if (!device->aspect_ratio_supported ||
aspect_ratio == drm_mode->base.aspect_ratio)
configured = drm_mode;
else

@ -73,6 +73,9 @@ drm_plane_state_alloc(struct drm_output_state *state_output,
void
drm_plane_state_free(struct drm_plane_state *state, bool force)
{
struct drm_backend *backend;
struct drm_device *device;
if (!state)
return;
@ -86,7 +89,10 @@ drm_plane_state_free(struct drm_plane_state *state, bool force)
* by the kernel, which means we can safely discard it.
*/
if (state->damage_blob_id != 0) {
drmModeDestroyPropertyBlob(state->plane->backend->drm.fd,
backend = state->plane->backend;
device = backend->drm;
drmModeDestroyPropertyBlob(device->drm.fd,
state->damage_blob_id);
state->damage_blob_id = 0;
}

@ -86,10 +86,11 @@ drm_output_try_view_on_plane(struct drm_plane *plane,
struct weston_compositor *ec = output->base.compositor;
struct weston_surface *surface = ev->surface;
struct drm_backend *b = to_drm_backend(ec);
struct drm_device *device = b->drm;
struct drm_plane_state *state = NULL;
assert(!b->sprites_are_broken);
assert(b->atomic_modeset);
assert(!device->sprites_are_broken);
assert(device->atomic_modeset);
assert(fb);
assert(mode == DRM_OUTPUT_PROPOSE_STATE_PLANES_ONLY ||
(mode == DRM_OUTPUT_PROPOSE_STATE_MIXED &&
@ -162,16 +163,17 @@ static void
cursor_bo_update(struct drm_plane_state *plane_state, struct weston_view *ev)
{
struct drm_backend *b = plane_state->plane->backend;
struct drm_device *device = b->drm;
struct gbm_bo *bo = plane_state->fb->bo;
struct weston_buffer *buffer = ev->surface->buffer_ref.buffer;
uint32_t buf[b->cursor_width * b->cursor_height];
uint32_t buf[device->cursor_width * device->cursor_height];
int32_t stride;
uint8_t *s;
int i;
assert(buffer && buffer->shm_buffer);
assert(buffer->width <= b->cursor_width);
assert(buffer->height <= b->cursor_height);
assert(buffer->width <= device->cursor_width);
assert(buffer->height <= device->cursor_height);
memset(buf, 0, sizeof buf);
stride = wl_shm_buffer_get_stride(buffer->shm_buffer);
@ -179,7 +181,7 @@ cursor_bo_update(struct drm_plane_state *plane_state, struct weston_view *ev)
wl_shm_buffer_begin_access(buffer->shm_buffer);
for (i = 0; i < buffer->height; i++)
memcpy(buf + i * b->cursor_width,
memcpy(buf + i * device->cursor_width,
s + i * stride,
buffer->width * 4);
wl_shm_buffer_end_access(buffer->shm_buffer);
@ -194,12 +196,13 @@ drm_output_prepare_cursor_view(struct drm_output_state *output_state,
{
struct drm_output *output = output_state->output;
struct drm_backend *b = to_drm_backend(output->base.compositor);
struct drm_device *device = b->drm;
struct drm_plane *plane = output->cursor_plane;
struct drm_plane_state *plane_state;
bool needs_update = false;
const char *p_name = drm_output_get_plane_type_name(plane);
assert(!b->cursors_are_broken);
assert(!device->cursors_are_broken);
assert(plane);
assert(plane->state_cur->complete);
assert(!plane->state_cur->output || plane->state_cur->output == output);
@ -220,8 +223,8 @@ drm_output_prepare_cursor_view(struct drm_output_state *output_state,
}
if (plane_state->src_x != 0 || plane_state->src_y != 0 ||
plane_state->src_w > (unsigned) b->cursor_width << 16 ||
plane_state->src_h > (unsigned) b->cursor_height << 16 ||
plane_state->src_w > (unsigned) device->cursor_width << 16 ||
plane_state->src_h > (unsigned) device->cursor_height << 16 ||
plane_state->src_w != plane_state->dest_w << 16 ||
plane_state->src_h != plane_state->dest_h << 16) {
drm_debug(b, "\t\t\t\t[%s] not assigning view %p to %s plane "
@ -260,10 +263,10 @@ drm_output_prepare_cursor_view(struct drm_output_state *output_state,
* a buffer which is always cursor_width x cursor_height, even if the
* surface we want to promote is actually smaller than this. Manually
* mangle the plane state to deal with this. */
plane_state->src_w = b->cursor_width << 16;
plane_state->src_h = b->cursor_height << 16;
plane_state->dest_w = b->cursor_width;
plane_state->dest_h = b->cursor_height;
plane_state->src_w = device->cursor_width << 16;
plane_state->src_h = device->cursor_height << 16;
plane_state->dest_w = device->cursor_width;
plane_state->dest_h = device->cursor_height;
drm_debug(b, "\t\t\t\t[%s] provisionally assigned view %p to cursor\n",
p_name, ev);
@ -328,7 +331,8 @@ dmabuf_feedback_maybe_update(struct drm_backend *b, struct weston_view *ev,
{
struct weston_dmabuf_feedback *dmabuf_feedback = ev->surface->dmabuf_feedback;
struct weston_dmabuf_feedback_tranche *scanout_tranche;
dev_t scanout_dev = b->drm.devnum;
struct drm_device *device = b->drm;
dev_t scanout_dev = device->drm.devnum;
uint32_t scanout_flags = ZWP_LINUX_DMABUF_FEEDBACK_V1_TRANCHE_FLAGS_SCANOUT;
uint32_t action_needed = ACTION_NEEDED_NONE;
struct timespec current_time, delta_time;
@ -428,6 +432,7 @@ drm_output_find_plane_for_view(struct drm_output_state *state,
{
struct drm_output *output = state->output;
struct drm_backend *b = to_drm_backend(output->base.compositor);
struct drm_device *device = b->drm;
struct drm_plane_state *ps = NULL;
struct drm_plane *plane;
@ -454,7 +459,7 @@ drm_output_find_plane_for_view(struct drm_output_state *state,
FAILURE_REASONS_FB_FORMAT_INCOMPATIBLE;
return NULL;
} else if (buffer->type == WESTON_BUFFER_SHM) {
if (!output->cursor_plane || b->cursors_are_broken) {
if (!output->cursor_plane || device->cursors_are_broken) {
pnode->try_view_on_plane_failure_reasons |=
FAILURE_REASONS_FB_FORMAT_INCOMPATIBLE;
return NULL;
@ -471,8 +476,8 @@ drm_output_find_plane_for_view(struct drm_output_state *state,
return NULL;
}
if (buffer->width > b->cursor_width ||
buffer->height > b->cursor_height) {
if (buffer->width > device->cursor_width ||
buffer->height > device->cursor_height) {
drm_debug(b, "\t\t\t\t[view] not assigning view %p to plane "
"(buffer (%dx%d) too large for cursor plane)\n",
ev, buffer->width, buffer->height);
@ -507,7 +512,7 @@ drm_output_find_plane_for_view(struct drm_output_state *state,
state);
/* assemble a list with possible candidates */
wl_list_for_each(plane, &b->plane_list, link) {
wl_list_for_each(plane, &device->plane_list, link) {
const char *p_name = drm_output_get_plane_type_name(plane);
uint64_t zpos;
@ -912,7 +917,8 @@ void
drm_assign_planes(struct weston_output *output_base)
{
struct drm_backend *b = to_drm_backend(output_base->compositor);
struct drm_pending_state *pending_state = b->repaint_data;
struct drm_device *device = b->drm;
struct drm_pending_state *pending_state = device->repaint_data;
struct drm_output *output = to_drm_output(output_base);
struct drm_output_state *state = NULL;
struct drm_plane_state *plane_state;
@ -923,7 +929,7 @@ drm_assign_planes(struct weston_output *output_base)
drm_debug(b, "\t[repaint] preparing state for output %s (%lu)\n",
output_base->name, (unsigned long) output_base->id);
if (!b->sprites_are_broken && !output->virtual && b->gbm) {
if (!device->sprites_are_broken && !output->virtual && b->gbm) {
drm_debug(b, "\t[repaint] trying planes-only build state\n");
state = drm_output_propose_state(output_base, pending_state, mode);
if (!state) {
@ -985,8 +991,8 @@ drm_assign_planes(struct weston_output *output_base)
buffer->type == WESTON_BUFFER_RENDERER_OPAQUE)
ev->surface->keep_buffer = true;
else if (buffer->type == WESTON_BUFFER_SHM &&
(ev->surface->width <= b->cursor_width &&
ev->surface->height <= b->cursor_height))
(ev->surface->width <= device->cursor_width &&
ev->surface->height <= device->cursor_height))
ev->surface->keep_buffer = true;
}

Loading…
Cancel
Save