vkr: retire syncs at vkr_queue_destroy when ASYNC_FENCE_CB is set

Refactored queue syncs retire on destroy into vkr_queue_retire_all_syncs
and rename vkr_queue_retire_syncs to vkr_queue_get_signaled_syncs.

Signed-off-by: Yiwei Zhang <zzyiwei@chromium.org>
Reviewed-by: Chia-I Wu <olvaffe@gmail.com>
Reviewed-by: Ryan Neph <ryanneph@google.com>
macos/master
Yiwei Zhang 3 years ago
parent e9b685e715
commit c0b76a5322
  1. 2
      src/venus/vkr_context.c
  2. 32
      src/venus/vkr_queue.c
  3. 6
      src/venus/vkr_queue.h

@ -160,7 +160,7 @@ vkr_context_retire_fences_locked(UNUSED struct virgl_context *base)
struct list_head retired_syncs;
bool queue_empty;
vkr_queue_retire_syncs(queue, &retired_syncs, &queue_empty);
vkr_queue_get_signaled_syncs(queue, &retired_syncs, &queue_empty);
LIST_FOR_EACH_ENTRY_SAFE (sync, sync_tmp, &retired_syncs, head) {
ctx->base.fence_retire(&ctx->base, sync->queue_id, sync->fence_cookie);

@ -78,9 +78,9 @@ vkr_device_free_queue_sync(struct vkr_device *dev, struct vkr_queue_sync *sync)
}
void
vkr_queue_retire_syncs(struct vkr_queue *queue,
struct list_head *retired_syncs,
bool *queue_empty)
vkr_queue_get_signaled_syncs(struct vkr_queue *queue,
struct list_head *retired_syncs,
bool *queue_empty)
{
struct vkr_device *dev = queue->device;
struct vkr_queue_sync *sync, *tmp;
@ -123,8 +123,19 @@ vkr_queue_retire_syncs(struct vkr_queue *queue,
}
}
void
vkr_queue_destroy(struct vkr_context *ctx, struct vkr_queue *queue)
static void
vkr_queue_sync_retire(struct vkr_context *ctx,
struct vkr_device *dev,
struct vkr_queue_sync *sync)
{
if (vkr_renderer_flags & VKR_RENDERER_ASYNC_FENCE_CB)
ctx->base.fence_retire(&ctx->base, sync->queue_id, sync->fence_cookie);
vkr_device_free_queue_sync(dev, sync);
}
static void
vkr_queue_retire_all_syncs(struct vkr_context *ctx, struct vkr_queue *queue)
{
struct vkr_queue_sync *sync, *tmp;
@ -137,13 +148,20 @@ vkr_queue_destroy(struct vkr_context *ctx, struct vkr_queue *queue)
thrd_join(queue->thread, NULL);
LIST_FOR_EACH_ENTRY_SAFE (sync, tmp, &queue->signaled_syncs, head)
vkr_device_free_queue_sync(queue->device, sync);
vkr_queue_sync_retire(ctx, queue->device, sync);
} else {
assert(LIST_IS_EMPTY(&queue->signaled_syncs));
}
LIST_FOR_EACH_ENTRY_SAFE (sync, tmp, &queue->pending_syncs, head)
vkr_device_free_queue_sync(queue->device, sync);
vkr_queue_sync_retire(ctx, queue->device, sync);
}
void
vkr_queue_destroy(struct vkr_context *ctx, struct vkr_queue *queue)
{
/* vkDeviceWaitIdle has been called */
vkr_queue_retire_all_syncs(ctx, queue);
mtx_destroy(&queue->mutex);
cnd_destroy(&queue->cond);

@ -89,9 +89,9 @@ void
vkr_device_free_queue_sync(struct vkr_device *dev, struct vkr_queue_sync *sync);
void
vkr_queue_retire_syncs(struct vkr_queue *queue,
struct list_head *retired_syncs,
bool *queue_empty);
vkr_queue_get_signaled_syncs(struct vkr_queue *queue,
struct list_head *retired_syncs,
bool *queue_empty);
struct vkr_queue *
vkr_queue_create(struct vkr_context *ctx,

Loading…
Cancel
Save