virgl: add support for per-context fencing

Per-context fences signal in creation order only within a context.  Two
per-context fences in two contexts might signal in any order.

When a per-context fence is created, a fence cookie can be specified.
The cookie will be passed to write_context_fence callback.  This
replaces fence_id that is used in ctx0 fencing.

write_context_fence is called on each fence unless the fence has
VIRGL_RENDERER_FENCE_FLAG_MERGEABLE set.  When the bit is set,
write_context_fence might be skipped.

Signed-off-by: Chia-I Wu <olvaffe@gmail.com>
Acked-by: Gert Wollny <gert.wollny@collabora.com>
Reviewed-by: Louis-Francis Ratté-Boulianne <lfrb@collabora.com>
macos/master
Chia-I Wu 4 years ago
parent 3a2a537c69
commit d470a2df58
  1. 41
      src/virglrenderer.c
  2. 16
      src/virglrenderer.h
  3. 7
      src/vrend_renderer.c

@ -165,10 +165,14 @@ void virgl_renderer_fill_caps(uint32_t set, uint32_t version,
}
}
static void per_context_fence_retire(UNUSED struct virgl_context *ctx,
UNUSED uint64_t queue_id,
UNUSED void *fence_cookie)
static void per_context_fence_retire(struct virgl_context *ctx,
uint64_t queue_id,
void *fence_cookie)
{
state.cbs->write_context_fence(state.cookie,
ctx->ctx_id,
queue_id,
fence_cookie);
}
int virgl_renderer_context_create_with_flags(uint32_t ctx_id,
@ -363,6 +367,37 @@ int virgl_renderer_create_fence(int client_fence_id, UNUSED uint32_t ctx_id)
return EINVAL;
}
int virgl_renderer_context_create_fence(uint32_t ctx_id,
uint32_t flags,
uint64_t queue_id,
void *fence_cookie)
{
struct virgl_context *ctx = virgl_context_lookup(ctx_id);
if (!ctx)
return -EINVAL;
assert(state.cbs->version >= 3 && state.cbs->write_context_fence);
return ctx->submit_fence(ctx, flags, queue_id, fence_cookie);
}
void virgl_renderer_context_poll(uint32_t ctx_id)
{
struct virgl_context *ctx = virgl_context_lookup(ctx_id);
if (!ctx)
return;
ctx->retire_fences(ctx);
}
int virgl_renderer_context_get_poll_fd(uint32_t ctx_id)
{
struct virgl_context *ctx = virgl_context_lookup(ctx_id);
if (!ctx)
return -1;
return ctx->get_fencing_fd(ctx);
}
void virgl_renderer_force_ctx_0(void)
{
TRACE_FUNC();

@ -45,7 +45,11 @@ struct virgl_renderer_gl_ctx_param {
int minor_ver;
};
#ifdef VIRGL_RENDERER_UNSTABLE_APIS
#define VIRGL_RENDERER_CALLBACKS_VERSION 3
#else
#define VIRGL_RENDERER_CALLBACKS_VERSION 2
#endif
struct virgl_renderer_callbacks {
int version;
@ -57,6 +61,10 @@ struct virgl_renderer_callbacks {
int (*make_current)(void *cookie, int scanout_idx, virgl_renderer_gl_context ctx);
int (*get_drm_fd)(void *cookie); /* v2, used with flags & VIRGL_RENDERER_USE_EGL */
#ifdef VIRGL_RENDERER_UNSTABLE_APIS
void (*write_context_fence)(void *cookie, uint32_t ctx_id, uint64_t queue_id, void *fence_cookie);
#endif
};
/* virtio-gpu compatible interface */
@ -307,6 +315,14 @@ virgl_renderer_resource_export_blob(uint32_t res_id, uint32_t *fd_type, int *fd)
VIRGL_EXPORT int
virgl_renderer_export_fence(uint32_t client_fence_id, int *fd);
#define VIRGL_RENDERER_FENCE_FLAG_MERGEABLE (1 << 0)
VIRGL_EXPORT int virgl_renderer_context_create_fence(uint32_t ctx_id,
uint32_t flags,
uint64_t queue_id,
void *fence_cookie);
VIRGL_EXPORT void virgl_renderer_context_poll(uint32_t ctx_id); /* force fences */
VIRGL_EXPORT int virgl_renderer_context_get_poll_fd(uint32_t ctx_id);
#endif /* VIRGL_RENDERER_UNSTABLE_APIS */
#endif

@ -9234,6 +9234,10 @@ static bool need_fence_retire_signal_locked(struct vrend_fence *fence)
if (next->ctx != fence->ctx)
return true;
/* not mergeable */
if (!(fence->flags & VIRGL_RENDERER_FENCE_FLAG_MERGEABLE))
return true;
return false;
}
@ -11018,7 +11022,8 @@ int vrend_renderer_resource_unmap(struct pipe_resource *pres)
int vrend_renderer_create_ctx0_fence(uint32_t fence_id)
{
void *fence_cookie = (void *)(uintptr_t)fence_id;
return vrend_renderer_create_fence(vrend_state.ctx0, 0, fence_cookie);
return vrend_renderer_create_fence(vrend_state.ctx0,
VIRGL_RENDERER_FENCE_FLAG_MERGEABLE, fence_cookie);
}
static bool find_ctx0_fence_locked(struct list_head *fence_list,

Loading…
Cancel
Save