vkr: defer sync retire and free until next retire_fences

When VKR_RENDERER_ASYNC_FENCE_CB is not set, we destroy the VkFence at
device destruction and move the queue syncs to the ctx so that they can
get retired and freed in the next retire_fences call.

Signed-off-by: Yiwei Zhang <zzyiwei@chromium.org>
Reviewed-by: Chia-I Wu <olvaffe@gmail.com>
Reviewed-by: Ryan Neph <ryanneph@google.com>
macos/master
Yiwei Zhang 3 years ago
parent c0b76a5322
commit b5fdf8a7dc
  1. 15
      src/venus/vkr_context.c
  2. 1
      src/venus/vkr_context.h
  3. 10
      src/venus/vkr_queue.c

@ -141,7 +141,7 @@ vkr_context_submit_fence(struct virgl_context *base,
}
static void
vkr_context_retire_fences_locked(UNUSED struct virgl_context *base)
vkr_context_retire_fences_locked(struct virgl_context *base)
{
struct vkr_context *ctx = (struct vkr_context *)base;
struct vkr_queue_sync *sync, *sync_tmp;
@ -149,6 +149,14 @@ vkr_context_retire_fences_locked(UNUSED struct virgl_context *base)
assert(!(vkr_renderer_flags & VKR_RENDERER_ASYNC_FENCE_CB));
/* retire syncs from destroyed devices */
LIST_FOR_EACH_ENTRY_SAFE (sync, sync_tmp, &ctx->signaled_syncs, head) {
/* queue_id might have already get reused but is opaque to the clients */
ctx->base.fence_retire(&ctx->base, sync->queue_id, sync->fence_cookie);
free(sync);
}
list_inithead(&ctx->signaled_syncs);
/* flush first and once because the per-queue sync threads might write to
* it any time
*/
@ -514,6 +522,10 @@ vkr_context_destroy(struct virgl_context *base)
util_hash_table_destroy(ctx->resource_table);
util_hash_table_destroy_u64(ctx->object_table);
struct vkr_queue_sync *sync, *tmp;
LIST_FOR_EACH_ENTRY_SAFE (sync, tmp, &ctx->signaled_syncs, head)
free(sync);
if (ctx->fence_eventfd >= 0)
close(ctx->fence_eventfd);
@ -621,6 +633,7 @@ vkr_context_create(size_t debug_len, const char *debug_name)
}
list_inithead(&ctx->busy_queues);
list_inithead(&ctx->signaled_syncs);
return &ctx->base;

@ -59,6 +59,7 @@ struct vkr_context {
int fence_eventfd;
struct list_head busy_queues;
struct list_head signaled_syncs;
struct vkr_instance *instance;
};

@ -128,10 +128,16 @@ vkr_queue_sync_retire(struct vkr_context *ctx,
struct vkr_device *dev,
struct vkr_queue_sync *sync)
{
if (vkr_renderer_flags & VKR_RENDERER_ASYNC_FENCE_CB)
if (vkr_renderer_flags & VKR_RENDERER_ASYNC_FENCE_CB) {
ctx->base.fence_retire(&ctx->base, sync->queue_id, sync->fence_cookie);
vkr_device_free_queue_sync(dev, sync);
} else {
vkDestroyFence(dev->base.handle.device, sync->fence, NULL);
sync->fence = VK_NULL_HANDLE;
vkr_device_free_queue_sync(dev, sync);
/* move to the ctx to be retired and freed at the next retire_fences */
list_addtail(&sync->head, &ctx->signaled_syncs);
}
}
static void

Loading…
Cancel
Save