From 45d1e4fa7720f13af0c8eec5c093dc7553fed106 Mon Sep 17 00:00:00 2001 From: Chia-I Wu Date: Thu, 11 Mar 2021 12:15:46 -0800 Subject: [PATCH] vrend: fix fence polling without sync thread This was regressed by commit 3a2a537c (vrend: hook up per-context fencing internally), bisected by Gert. When there is no sync thread, vrend_renderer_check_fences checks fences on vrend_state.fence_list for signaled ones and call ctx->fence_retire on them. Because fences belonging to the same context are ordered, as an optimization, we ideally want to call ctx->fence_retire only on the last fence of those belonging to the same context. (Note we are close to that but not quite because we want to avoid complex changes until virgl starts using per-context fencing, if ever) The issue is in need_fence_retire_signal_locked. It has this check if (fence->fences.next == &vrend_state.fence_list) return true; which says that, if the fence is the last one in vrend_state.fence_list, call ctx->fence_retire (because it must be the last fence of some context). The check is incorrect because not all fences on the list have signaled when the sync thread is not used. It will fail when there are also unsignaled fences on the list. To fix the issue, we contruct a list of signaled fences first before calling need_fence_retire_signal_locked. We could merge the paths with and without sync thread further because the non-sync-thread path just needs this extra step to construct a list of signaled fences. But we will save that for future and focus on fixing the performance regression. Signed-off-by: Chia-I Wu Reviewed-by: Gert Wollny --- src/vrend_renderer.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/src/vrend_renderer.c b/src/vrend_renderer.c index f53da1e..5bb3907 100644 --- a/src/vrend_renderer.c +++ b/src/vrend_renderer.c @@ -9255,12 +9255,13 @@ int vrend_renderer_create_fence(struct vrend_context *ctx, static void vrend_renderer_check_queries(void); -static bool need_fence_retire_signal_locked(struct vrend_fence *fence) +static bool need_fence_retire_signal_locked(struct vrend_fence *fence, + const struct list_head *signaled_list) { struct vrend_fence *next; /* last fence */ - if (fence->fences.next == &vrend_state.fence_list) + if (fence->fences.next == signaled_list) return true; /* next fence belongs to a different context */ @@ -9294,7 +9295,7 @@ void vrend_renderer_check_fences(void) continue; } - if (need_fence_retire_signal_locked(fence)) { + if (need_fence_retire_signal_locked(fence, &vrend_state.fence_list)) { list_del(&fence->fences); list_addtail(&fence->fences, &retired_fences); } else { @@ -9307,17 +9308,18 @@ void vrend_renderer_check_fences(void) LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_list, fences) { if (do_wait(fence, /* can_block */ false)) { - if (need_fence_retire_signal_locked(fence)) { - list_del(&fence->fences); - list_addtail(&fence->fences, &retired_fences); - } else { - free_fence_locked(fence); - } + list_del(&fence->fences); + list_addtail(&fence->fences, &retired_fences); } else { /* don't bother checking any subsequent ones */ break; } } + + LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &retired_fences, fences) { + if (!need_fence_retire_signal_locked(fence, &retired_fences)) + free_fence_locked(fence); + } } if (LIST_IS_EMPTY(&retired_fences))