libweston: Support zwp_surface_synchronization_v1.get_release

Implement the get_release request of the zwp_surface_synchronization_v1
interface.

This commit implements the zwp_buffer_release_v1 interface. It supports
the zwp_buffer_release_v1.fenced_release event for surfaces rendered by
the GL renderer, and the zwp_buffer_release_v1.immediate_release event
for other cases.

Note that the immediate_release event is safe to be used for surface
buffers used as planes in the DRM backend, since the backend releases
them only after the next page flip that doesn't use the buffers has
finished.

Changes in v7:
  - Remove "partial" from commit title and description.
  - Fix inverted check when clearing used_in_output_repaint flag.

Changes in v5:
  - Use the new, generic explicit sync server error reporting function.
  - Introduce and use weston_buffer_release_move.
  - Introduce internally and use weston_buffer_release_destroy.

Changes in v4:
  - Support the zwp_buffer_release_v1.fenced_release event.
  - Support release fences in the GL renderer.
  - Assert that pending state buffer_release is always NULL after a
    commit.
  - Simplify weston_buffer_release_reference.
  - Move removal of destroy listener before resource destruction to
    avoid concerns about use-after-free in
    weston_buffer_release_reference
  - Rename weston_buffer_release_reference.busy_count to ref_count.
  - Add documentation for weston_buffer_release and ..._reference.

Changes in v3:
  - Raise NO_BUFFER for get_release if no buffer has been committed,
    don't raise UNSUPPORTED_BUFFER for non-dmabuf buffers,
    so get_release works for all valid buffers.
  - Destroy the buffer_release object after sending an event.
  - Track lifetime of buffer_release objects per commit, independently
    of any buffers.
  - Use updated protocol interface names.
  - Use correct format specifier for resource ids.

Changes in v2:
  - Raise UNSUPPORTED_BUFFER at commit if client has requested a
    buffer_release, but the committed buffer is not a valid linux_dmabuf.
  - Remove tests that are not viable anymore due to our inability to
    create dmabuf buffers and fences in a unit-test environment.

Signed-off-by: Alexandros Frantzis <alexandros.frantzis@collabora.com>
dev
Alexandros Frantzis 6 years ago committed by Pekka Paalanen
parent acff29b3b3
commit 676296749a
  1. 10
      libweston/compositor-drm.c
  2. 89
      libweston/compositor.c
  3. 30
      libweston/compositor.h
  4. 93
      libweston/gl-renderer.c
  5. 56
      libweston/linux-explicit-synchronization.c
  6. 6
      libweston/pixman-renderer.c
  7. 181
      tests/linux-explicit-synchronization-test.c

@ -385,6 +385,7 @@ struct drm_fb {
int width, height;
int fd;
struct weston_buffer_reference buffer_ref;
struct weston_buffer_release_reference buffer_release_ref;
/* Used by gbm fbs */
struct gbm_bo *bo;
@ -951,6 +952,7 @@ drm_fb_destroy(struct drm_fb *fb)
if (fb->fb_id != 0)
drmModeRmFB(fb->fd, fb->fb_id);
weston_buffer_reference(&fb->buffer_ref, NULL);
weston_buffer_release_reference(&fb->buffer_release_ref, NULL);
free(fb);
}
@ -1338,11 +1340,14 @@ err_free:
}
static void
drm_fb_set_buffer(struct drm_fb *fb, struct weston_buffer *buffer)
drm_fb_set_buffer(struct drm_fb *fb, struct weston_buffer *buffer,
struct weston_buffer_release *buffer_release)
{
assert(fb->buffer_ref.buffer == NULL);
assert(fb->type == BUFFER_CLIENT || fb->type == BUFFER_DMABUF);
weston_buffer_reference(&fb->buffer_ref, buffer);
weston_buffer_release_reference(&fb->buffer_release_ref,
buffer_release);
}
static void
@ -1650,7 +1655,8 @@ drm_fb_get_from_view(struct drm_output_state *state, struct weston_view *ev)
drm_debug(b, "\t\t\t[view] view %p format: %s\n",
ev, fb->format->drm_format_name);
drm_fb_set_buffer(fb, buffer);
drm_fb_set_buffer(fb, buffer,
ev->surface->buffer_release_ref.buffer_release);
return fb;
}

@ -61,6 +61,7 @@
#include "viewporter-server-protocol.h"
#include "presentation-time-server-protocol.h"
#include "linux-explicit-synchronization-unstable-v1-server-protocol.h"
#include "linux-explicit-synchronization.h"
#include "shared/fd-util.h"
#include "shared/helpers.h"
#include "shared/os-compatibility.h"
@ -476,6 +477,7 @@ weston_surface_state_fini(struct weston_surface_state *state)
state->buffer = NULL;
fd_clear(&state->acquire_fence_fd);
weston_buffer_release_reference(&state->buffer_release_ref, NULL);
}
static void
@ -2011,6 +2013,7 @@ weston_surface_destroy(struct weston_surface *surface)
weston_surface_state_fini(&surface->pending);
weston_buffer_reference(&surface->buffer_ref, NULL);
weston_buffer_release_reference(&surface->buffer_release_ref, NULL);
pixman_region32_fini(&surface->damage);
pixman_region32_fini(&surface->opaque);
@ -2125,6 +2128,68 @@ weston_buffer_reference(struct weston_buffer_reference *ref,
ref->destroy_listener.notify = weston_buffer_reference_handle_destroy;
}
static void
weston_buffer_release_reference_handle_destroy(struct wl_listener *listener,
void *data)
{
struct weston_buffer_release_reference *ref =
container_of(listener, struct weston_buffer_release_reference,
destroy_listener);
assert((struct wl_resource *)data == ref->buffer_release->resource);
ref->buffer_release = NULL;
}
static void
weston_buffer_release_destroy(struct weston_buffer_release *buffer_release)
{
struct wl_resource *resource = buffer_release->resource;
int release_fence_fd = buffer_release->fence_fd;
if (release_fence_fd >= 0) {
zwp_linux_buffer_release_v1_send_fenced_release(
resource, release_fence_fd);
} else {
zwp_linux_buffer_release_v1_send_immediate_release(
resource);
}
wl_resource_destroy(resource);
}
WL_EXPORT void
weston_buffer_release_reference(struct weston_buffer_release_reference *ref,
struct weston_buffer_release *buffer_release)
{
if (buffer_release == ref->buffer_release)
return;
if (ref->buffer_release) {
ref->buffer_release->ref_count--;
wl_list_remove(&ref->destroy_listener.link);
if (ref->buffer_release->ref_count == 0)
weston_buffer_release_destroy(ref->buffer_release);
}
if (buffer_release) {
buffer_release->ref_count++;
wl_resource_add_destroy_listener(buffer_release->resource,
&ref->destroy_listener);
}
ref->buffer_release = buffer_release;
ref->destroy_listener.notify =
weston_buffer_release_reference_handle_destroy;
}
WL_EXPORT void
weston_buffer_release_move(struct weston_buffer_release_reference *dest,
struct weston_buffer_release_reference *src)
{
weston_buffer_release_reference(dest, src->buffer_release);
weston_buffer_release_reference(src, NULL);
}
static void
weston_surface_attach(struct weston_surface *surface,
struct weston_buffer *buffer)
@ -2250,8 +2315,11 @@ compositor_accumulate_damage(struct weston_compositor *ec)
* reference now, and allow early buffer release. This enables
* clients to use single-buffering.
*/
if (!ev->surface->keep_buffer)
if (!ev->surface->keep_buffer) {
weston_buffer_reference(&ev->surface->buffer_ref, NULL);
weston_buffer_release_reference(
&ev->surface->buffer_release_ref, NULL);
}
}
}
@ -3231,11 +3299,15 @@ weston_surface_commit_state(struct weston_surface *surface,
/* zwp_surface_synchronization_v1.set_acquire_fence */
fd_move(&surface->acquire_fence_fd,
&state->acquire_fence_fd);
/* zwp_surface_synchronization_v1.get_release */
weston_buffer_release_move(&surface->buffer_release_ref,
&state->buffer_release_ref);
weston_surface_attach(surface, state->buffer);
}
weston_surface_state_set_buffer(state, NULL);
assert(state->acquire_fence_fd == -1);
assert(state->buffer_release_ref.buffer_release == NULL);
weston_surface_build_buffer_matrix(surface,
&surface->surface_to_buffer_matrix);
@ -3375,6 +3447,17 @@ surface_commit(struct wl_client *client, struct wl_resource *resource)
}
}
if (surface->pending.buffer_release_ref.buffer_release &&
!surface->pending.buffer) {
assert(surface->synchronization_resource);
wl_resource_post_error(surface->synchronization_resource,
ZWP_LINUX_SURFACE_SYNCHRONIZATION_V1_ERROR_NO_BUFFER,
"wl_surface@%"PRIu32" no buffer for synchronization",
wl_resource_get_id(resource));
return;
}
if (sub) {
weston_subsurface_commit(sub);
return;
@ -3584,8 +3667,12 @@ weston_subsurface_commit_to_cache(struct weston_subsurface *sub)
/* zwp_surface_synchronization_v1.set_acquire_fence */
fd_move(&sub->cached.acquire_fence_fd,
&surface->pending.acquire_fence_fd);
/* zwp_surface_synchronization_v1.get_release */
weston_buffer_release_move(&sub->cached.buffer_release_ref,
&surface->pending.buffer_release_ref);
}
assert(surface->pending.acquire_fence_fd == -1);
assert(surface->pending.buffer_release_ref.buffer_release == NULL);
sub->cached.sx += surface->pending.sx;
sub->cached.sy += surface->pending.sy;

@ -1215,6 +1215,24 @@ struct weston_buffer_viewport {
int changed;
};
struct weston_buffer_release {
/* The associated zwp_linux_buffer_release_v1 resource. */
struct wl_resource *resource;
/* How many weston_buffer_release_reference objects point to this
* object. */
uint32_t ref_count;
/* The fence fd, if any, associated with this release. If the fence fd
* is -1 then this is considered an immediate release. */
int fence_fd;
};
struct weston_buffer_release_reference {
struct weston_buffer_release *buffer_release;
/* Listener for the destruction of the wl_resource associated with the
* referenced buffer_release object. */
struct wl_listener destroy_listener;
};
struct weston_region {
struct wl_resource *resource;
pixman_region32_t region;
@ -1368,6 +1386,9 @@ struct weston_surface_state {
/* zwp_surface_synchronization_v1.set_acquire_fence */
int acquire_fence_fd;
/* zwp_surface_synchronization_v1.get_release */
struct weston_buffer_release_reference buffer_release_ref;
};
struct weston_surface_activation_data {
@ -1496,6 +1517,7 @@ struct weston_surface {
/* zwp_surface_synchronization_v1 resource for this surface */
struct wl_resource *synchronization_resource;
int acquire_fence_fd;
struct weston_buffer_release_reference buffer_release_ref;
};
struct weston_subsurface {
@ -1946,6 +1968,14 @@ void
weston_buffer_reference(struct weston_buffer_reference *ref,
struct weston_buffer *buffer);
void
weston_buffer_release_reference(struct weston_buffer_release_reference *ref,
struct weston_buffer_release *buf_release);
void
weston_buffer_release_move(struct weston_buffer_release_reference *dest,
struct weston_buffer_release_reference *src);
void
weston_compositor_get_time(struct timespec *time);

@ -52,6 +52,7 @@
#include "linux-explicit-synchronization.h"
#include "pixel-formats.h"
#include "shared/fd-util.h"
#include "shared/helpers.h"
#include "shared/platform.h"
#include "shared/timespec-util.h"
@ -175,6 +176,7 @@ struct gl_surface_state {
int num_images;
struct weston_buffer_reference buffer_ref;
struct weston_buffer_release_reference buffer_release_ref;
enum buffer_type buffer_type;
int pitch; /* in pixels */
int height; /* in pixels */
@ -187,6 +189,10 @@ struct gl_surface_state {
struct weston_surface *surface;
/* Whether this surface was used in the current output repaint.
Used only in the context of a gl_renderer_repaint_output call. */
bool used_in_output_repaint;
struct wl_listener surface_destroy_listener;
struct wl_listener renderer_destroy_listener;
};
@ -1022,12 +1028,14 @@ draw_view(struct weston_view *ev, struct weston_output *output,
glDisable(GL_BLEND);
repaint_region(ev, &repaint, &surface_opaque);
gs->used_in_output_repaint = true;
}
if (pixman_region32_not_empty(&surface_blend)) {
use_shader(gr, gs->shader);
glEnable(GL_BLEND);
repaint_region(ev, &repaint, &surface_blend);
gs->used_in_output_repaint = true;
}
pixman_region32_fini(&surface_blend);
@ -1048,6 +1056,73 @@ repaint_views(struct weston_output *output, pixman_region32_t *damage)
draw_view(view, output, damage);
}
static int
gl_renderer_create_fence_fd(struct weston_output *output);
/* Updates the release fences of surfaces that were used in the current output
* repaint. Should only be used from gl_renderer_repaint_output, so that the
* information in gl_surface_state.used_in_output_repaint is accurate.
*/
static void
update_buffer_release_fences(struct weston_compositor *compositor,
struct weston_output *output)
{
struct weston_view *view;
wl_list_for_each_reverse(view, &compositor->view_list, link) {
struct gl_surface_state *gs;
struct weston_buffer_release *buffer_release;
int fence_fd;
if (view->plane != &compositor->primary_plane)
continue;
gs = get_surface_state(view->surface);
buffer_release = gs->buffer_release_ref.buffer_release;
if (!gs->used_in_output_repaint || !buffer_release)
continue;
fence_fd = gl_renderer_create_fence_fd(output);
/* If we have a buffer_release then it means we support fences,
* and we should be able to create the release fence. If we
* can't, something has gone horribly wrong, so disconnect the
* client.
*/
if (fence_fd == -1) {
linux_explicit_synchronization_send_server_error(
buffer_release->resource,
"Failed to create release fence");
fd_clear(&buffer_release->fence_fd);
continue;
}
/* At the moment it is safe to just replace the fence_fd,
* discarding the previous one:
*
* 1. If the previous fence fd represents a sync fence from
* a previous repaint cycle, that fence fd is now not
* sufficient to provide the release guarantee and should
* be replaced.
*
* 2. If the fence fd represents a sync fence from another
* output in the same repaint cycle, it's fine to replace
* it since we are rendering to all outputs using the same
* EGL context, so a fence issued for a later output rendering
* is guaranteed to signal after fences for previous output
* renderings.
*
* Note that the above is only valid if the buffer_release
* fences only originate from the GL renderer, which guarantees
* a total order of operations and fences. If we introduce
* fences from other sources (e.g., plane out-fences), we will
* need to merge fences instead.
*/
fd_update(&buffer_release->fence_fd, fence_fd);
}
}
static void
draw_output_border_texture(struct gl_output_state *go,
enum gl_renderer_border_side side,
@ -1298,10 +1373,21 @@ gl_renderer_repaint_output(struct weston_output *output,
pixman_box32_t *rects;
pixman_region32_t buffer_damage, total_damage;
enum gl_border_status border_damage = BORDER_STATUS_CLEAN;
struct weston_view *view;
if (use_output(output) < 0)
return;
/* Clear the used_in_output_repaint flag, so that we can properly track
* which surfaces were used in this output repaint. */
wl_list_for_each_reverse(view, &compositor->view_list, link) {
if (view->plane == &compositor->primary_plane) {
struct gl_surface_state *gs =
get_surface_state(view->surface);
gs->used_in_output_repaint = false;
}
}
if (go->begin_render_sync != EGL_NO_SYNC_KHR)
gr->destroy_sync(gr->egl_display, go->begin_render_sync);
if (go->end_render_sync != EGL_NO_SYNC_KHR)
@ -1413,6 +1499,8 @@ gl_renderer_repaint_output(struct weston_output *output,
TIMELINE_RENDER_POINT_TYPE_BEGIN);
timeline_submit_render_sync(gr, compositor, output, go->end_render_sync,
TIMELINE_RENDER_POINT_TYPE_END);
update_buffer_release_fences(compositor, output);
}
static int
@ -1571,6 +1659,7 @@ done:
gs->needs_full_upload = false;
weston_buffer_reference(&gs->buffer_ref, NULL);
weston_buffer_release_reference(&gs->buffer_release_ref, NULL);
}
static void
@ -2395,6 +2484,8 @@ gl_renderer_attach(struct weston_surface *es, struct weston_buffer *buffer)
int i;
weston_buffer_reference(&gs->buffer_ref, buffer);
weston_buffer_release_reference(&gs->buffer_release_ref,
es->buffer_release_ref.buffer_release);
if (!buffer) {
for (i = 0; i < gs->num_images; i++) {
@ -2427,6 +2518,7 @@ gl_renderer_attach(struct weston_surface *es, struct weston_buffer *buffer)
gl_renderer_print_egl_error_state();
}
weston_buffer_reference(&gs->buffer_ref, NULL);
weston_buffer_release_reference(&gs->buffer_release_ref, NULL);
gs->buffer_type = BUFFER_TYPE_NULL;
gs->y_inverted = 1;
es->is_opaque = false;
@ -2614,6 +2706,7 @@ surface_state_destroy(struct gl_surface_state *gs, struct gl_renderer *gr)
egl_image_unref(gs->images[i]);
weston_buffer_reference(&gs->buffer_ref, NULL);
weston_buffer_release_reference(&gs->buffer_release_ref, NULL);
pixman_region32_fini(&gs->texture_damage);
free(gs);
}

@ -34,6 +34,16 @@
#include "linux-sync-file.h"
#include "shared/fd-util.h"
static void
destroy_linux_buffer_release(struct wl_resource *resource)
{
struct weston_buffer_release *buffer_release =
wl_resource_get_user_data(resource);
fd_clear(&buffer_release->fence_fd);
free(buffer_release);
}
static void
destroy_linux_surface_synchronization(struct wl_resource *resource)
{
@ -97,7 +107,53 @@ linux_surface_synchronization_get_release(struct wl_client *client,
struct wl_resource *resource,
uint32_t id)
{
struct weston_surface *surface =
wl_resource_get_user_data(resource);
struct weston_buffer_release *buffer_release;
if (!surface) {
wl_resource_post_error(
resource,
ZWP_LINUX_SURFACE_SYNCHRONIZATION_V1_ERROR_NO_SURFACE,
"surface no longer exists");
return;
}
if (surface->pending.buffer_release_ref.buffer_release) {
wl_resource_post_error(
resource,
ZWP_LINUX_SURFACE_SYNCHRONIZATION_V1_ERROR_DUPLICATE_RELEASE,
"already has a buffer release");
return;
}
buffer_release = zalloc(sizeof *buffer_release);
if (buffer_release == NULL)
goto err_alloc;
buffer_release->fence_fd = -1;
buffer_release->resource =
wl_resource_create(client,
&zwp_linux_buffer_release_v1_interface,
wl_resource_get_version(resource), id);
if (!buffer_release->resource)
goto err_create;
wl_resource_set_implementation(buffer_release->resource, NULL,
buffer_release,
destroy_linux_buffer_release);
weston_buffer_release_reference(&surface->pending.buffer_release_ref,
buffer_release);
return;
err_create:
free(buffer_release);
err_alloc:
wl_client_post_no_memory(client);
}
const struct zwp_linux_surface_synchronization_v1_interface

@ -49,6 +49,7 @@ struct pixman_surface_state {
pixman_image_t *image;
struct weston_buffer_reference buffer_ref;
struct weston_buffer_release_reference buffer_release_ref;
struct wl_listener buffer_destroy_listener;
struct wl_listener surface_destroy_listener;
@ -625,6 +626,8 @@ pixman_renderer_attach(struct weston_surface *es, struct weston_buffer *buffer)
pixman_format_code_t pixman_format;
weston_buffer_reference(&ps->buffer_ref, buffer);
weston_buffer_release_reference(&ps->buffer_release_ref,
es->buffer_release_ref.buffer_release);
if (ps->buffer_destroy_listener.notify) {
wl_list_remove(&ps->buffer_destroy_listener.link);
@ -644,6 +647,7 @@ pixman_renderer_attach(struct weston_surface *es, struct weston_buffer *buffer)
if (! shm_buffer) {
weston_log("Pixman renderer supports only SHM buffers\n");
weston_buffer_reference(&ps->buffer_ref, NULL);
weston_buffer_release_reference(&ps->buffer_release_ref, NULL);
return;
}
@ -664,6 +668,7 @@ pixman_renderer_attach(struct weston_surface *es, struct weston_buffer *buffer)
weston_log("Unsupported SHM buffer format 0x%x\n",
wl_shm_buffer_get_format(shm_buffer));
weston_buffer_reference(&ps->buffer_ref, NULL);
weston_buffer_release_reference(&ps->buffer_release_ref, NULL);
weston_buffer_send_server_error(buffer,
"disconnecting due to unhandled buffer type");
return;
@ -702,6 +707,7 @@ pixman_renderer_surface_state_destroy(struct pixman_surface_state *ps)
ps->image = NULL;
}
weston_buffer_reference(&ps->buffer_ref, NULL);
weston_buffer_release_reference(&ps->buffer_release_ref, NULL);
free(ps);
}

@ -146,3 +146,184 @@ TEST(set_acquire_fence_on_destroyed_surface_raises_error)
zwp_linux_surface_synchronization_v1_destroy(surface_sync);
zwp_linux_explicit_synchronization_v1_destroy(sync);
}
TEST(second_buffer_release_in_commit_raises_error)
{
struct client *client = create_test_client();
struct zwp_linux_explicit_synchronization_v1 *sync =
get_linux_explicit_synchronization(client);
struct zwp_linux_surface_synchronization_v1 *surface_sync =
zwp_linux_explicit_synchronization_v1_get_synchronization(
sync, client->surface->wl_surface);
struct zwp_linux_buffer_release_v1 *buffer_release1;
struct zwp_linux_buffer_release_v1 *buffer_release2;
buffer_release1 =
zwp_linux_surface_synchronization_v1_get_release(surface_sync);
client_roundtrip(client);
/* Second buffer_release creation should fail */
buffer_release2 =
zwp_linux_surface_synchronization_v1_get_release(surface_sync);
expect_protocol_error(
client,
&zwp_linux_surface_synchronization_v1_interface,
ZWP_LINUX_SURFACE_SYNCHRONIZATION_V1_ERROR_DUPLICATE_RELEASE);
zwp_linux_buffer_release_v1_destroy(buffer_release2);
zwp_linux_buffer_release_v1_destroy(buffer_release1);
zwp_linux_surface_synchronization_v1_destroy(surface_sync);
zwp_linux_explicit_synchronization_v1_destroy(sync);
}
TEST(get_release_without_buffer_raises_commit_error)
{
struct client *client = create_test_client();
struct zwp_linux_explicit_synchronization_v1 *sync =
get_linux_explicit_synchronization(client);
struct zwp_linux_surface_synchronization_v1 *surface_sync =
zwp_linux_explicit_synchronization_v1_get_synchronization(
sync, client->surface->wl_surface);
struct wl_surface *surface = client->surface->wl_surface;
struct zwp_linux_buffer_release_v1 *buffer_release;
buffer_release =
zwp_linux_surface_synchronization_v1_get_release(surface_sync);
wl_surface_commit(surface);
expect_protocol_error(
client,
&zwp_linux_surface_synchronization_v1_interface,
ZWP_LINUX_SURFACE_SYNCHRONIZATION_V1_ERROR_NO_BUFFER);
zwp_linux_buffer_release_v1_destroy(buffer_release);
zwp_linux_surface_synchronization_v1_destroy(surface_sync);
zwp_linux_explicit_synchronization_v1_destroy(sync);
}
TEST(get_release_on_destroyed_surface_raises_error)
{
struct client *client = create_test_client();
struct zwp_linux_explicit_synchronization_v1 *sync =
get_linux_explicit_synchronization(client);
struct zwp_linux_surface_synchronization_v1 *surface_sync =
zwp_linux_explicit_synchronization_v1_get_synchronization(
sync, client->surface->wl_surface);
struct zwp_linux_buffer_release_v1 *buffer_release;
wl_surface_destroy(client->surface->wl_surface);
buffer_release =
zwp_linux_surface_synchronization_v1_get_release(surface_sync);
expect_protocol_error(
client,
&zwp_linux_surface_synchronization_v1_interface,
ZWP_LINUX_SURFACE_SYNCHRONIZATION_V1_ERROR_NO_SURFACE);
zwp_linux_buffer_release_v1_destroy(buffer_release);
zwp_linux_surface_synchronization_v1_destroy(surface_sync);
zwp_linux_explicit_synchronization_v1_destroy(sync);
}
TEST(get_release_after_commit_succeeds)
{
struct client *client = create_test_client();
struct zwp_linux_explicit_synchronization_v1 *sync =
get_linux_explicit_synchronization(client);
struct wl_surface *surface = client->surface->wl_surface;
struct zwp_linux_surface_synchronization_v1 *surface_sync =
zwp_linux_explicit_synchronization_v1_get_synchronization(
sync, surface);
struct buffer *buf1 = create_shm_buffer_a8r8g8b8(client, 100, 100);
struct zwp_linux_buffer_release_v1 *buffer_release1;
struct zwp_linux_buffer_release_v1 *buffer_release2;
buffer_release1 =
zwp_linux_surface_synchronization_v1_get_release(surface_sync);
client_roundtrip(client);
wl_surface_attach(surface, buf1->proxy, 0, 0);
wl_surface_commit(surface);
buffer_release2 =
zwp_linux_surface_synchronization_v1_get_release(surface_sync);
client_roundtrip(client);
buffer_destroy(buf1);
zwp_linux_buffer_release_v1_destroy(buffer_release2);
zwp_linux_buffer_release_v1_destroy(buffer_release1);
zwp_linux_surface_synchronization_v1_destroy(surface_sync);
zwp_linux_explicit_synchronization_v1_destroy(sync);
}
static void
buffer_release_fenced_handler(void *data,
struct zwp_linux_buffer_release_v1 *buffer_release,
int32_t fence)
{
assert(!"Fenced release not supported yet");
}
static void
buffer_release_immediate_handler(void *data,
struct zwp_linux_buffer_release_v1 *buffer_release)
{
int *released = data;
*released += 1;
}
struct zwp_linux_buffer_release_v1_listener buffer_release_listener = {
buffer_release_fenced_handler,
buffer_release_immediate_handler
};
TEST(get_release_events_are_emitted)
{
struct client *client = create_test_client();
struct zwp_linux_explicit_synchronization_v1 *sync =
get_linux_explicit_synchronization(client);
struct zwp_linux_surface_synchronization_v1 *surface_sync =
zwp_linux_explicit_synchronization_v1_get_synchronization(
sync, client->surface->wl_surface);
struct buffer *buf1 = create_shm_buffer_a8r8g8b8(client, 100, 100);
struct buffer *buf2 = create_shm_buffer_a8r8g8b8(client, 100, 100);
struct wl_surface *surface = client->surface->wl_surface;
struct zwp_linux_buffer_release_v1 *buffer_release1;
struct zwp_linux_buffer_release_v1 *buffer_release2;
int buf_released1 = 0;
int buf_released2 = 0;
int frame;
buffer_release1 =
zwp_linux_surface_synchronization_v1_get_release(surface_sync);
zwp_linux_buffer_release_v1_add_listener(buffer_release1,
&buffer_release_listener,
&buf_released1);
wl_surface_attach(surface, buf1->proxy, 0, 0);
frame_callback_set(surface, &frame);
wl_surface_commit(surface);
frame_callback_wait(client, &frame);
/* Check that exactly one buffer_release event was emitted. */
assert(buf_released1 == 1);
buffer_release2 =
zwp_linux_surface_synchronization_v1_get_release(surface_sync);
zwp_linux_buffer_release_v1_add_listener(buffer_release2,
&buffer_release_listener,
&buf_released2);
wl_surface_attach(surface, buf2->proxy, 0, 0);
frame_callback_set(surface, &frame);
wl_surface_commit(surface);
frame_callback_wait(client, &frame);
/* Check that we didn't get any new events on the inactive
* buffer_release. */
assert(buf_released1 == 1);
/* Check that exactly one buffer_release event was emitted. */
assert(buf_released2 == 1);
buffer_destroy(buf2);
buffer_destroy(buf1);
zwp_linux_buffer_release_v1_destroy(buffer_release2);
zwp_linux_buffer_release_v1_destroy(buffer_release1);
zwp_linux_surface_synchronization_v1_destroy(surface_sync);
zwp_linux_explicit_synchronization_v1_destroy(sync);
}

Loading…
Cancel
Save