vkr: split out struct definitions to their own headers

No functional change.

Signed-off-by: Chia-I Wu <olvaffe@gmail.com>
Reviewed-by: Yiwei Zhang <zzyiwei@chromium.org>
Reviewed-by: Ryan Neph <ryanneph@google.com>
macos/master
Chia-I Wu 3 years ago
parent 965a920893
commit 4f619e630f
  1. 11
      src/meson.build
  2. 19
      src/venus/vkr_buffer.h
  3. 25
      src/venus/vkr_command_buffer.h
  4. 29
      src/venus/vkr_common.h
  5. 66
      src/venus/vkr_context.h
  6. 33
      src/venus/vkr_descriptor_set.h
  7. 87
      src/venus/vkr_device.h
  8. 23
      src/venus/vkr_device_memory.h
  9. 27
      src/venus/vkr_image.h
  10. 27
      src/venus/vkr_pipeline.h
  11. 15
      src/venus/vkr_query_pool.h
  12. 69
      src/venus/vkr_queue.h
  13. 19
      src/venus/vkr_render_pass.h
  14. 293
      src/venus/vkr_renderer.c

@ -77,9 +77,20 @@ vrend_winsys_glx_sources = [
venus_sources = [
'venus_hw.h',
'venus/venus-protocol/vn_protocol_renderer.h',
'venus/vkr_buffer.h',
'venus/vkr_command_buffer.h',
'venus/vkr_common.h',
'venus/vkr_context.h',
'venus/vkr_cs.c',
'venus/vkr_cs.h',
'venus/vkr_descriptor_set.h',
'venus/vkr_device.h',
'venus/vkr_device_memory.h',
'venus/vkr_image.h',
'venus/vkr_pipeline.h',
'venus/vkr_query_pool.h',
'venus/vkr_queue.h',
'venus/vkr_render_pass.h',
'venus/vkr_renderer.c',
'venus/vkr_renderer.h',
'venus/vkr_ring.c',

@ -0,0 +1,19 @@
/*
* Copyright 2020 Google LLC
* SPDX-License-Identifier: MIT
*/
#ifndef VKR_BUFFER_H
#define VKR_BUFFER_H
#include "vkr_common.h"
struct vkr_buffer {
struct vkr_object base;
};
struct vkr_buffer_view {
struct vkr_object base;
};
#endif /* VKR_BUFFER_H */

@ -0,0 +1,25 @@
/*
* Copyright 2020 Google LLC
* SPDX-License-Identifier: MIT
*/
#ifndef VKR_COMMAND_BUFFER_H
#define VKR_COMMAND_BUFFER_H
#include "vkr_common.h"
struct vkr_command_pool {
struct vkr_object base;
struct list_head command_buffers;
};
struct vkr_command_buffer {
struct vkr_object base;
struct vkr_device *device;
struct list_head head;
};
#endif /* VKR_COMMAND_BUFFER_H */

@ -33,6 +33,35 @@
#include "vkr_renderer.h"
struct vkr_context;
struct vkr_instance;
struct vkr_physical_device;
struct vkr_device;
struct vkr_queue;
struct vkr_fence;
struct vkr_semaphore;
struct vkr_event;
struct vkr_device_memory;
struct vkr_buffer;
struct vkr_buffer_view;
struct vkr_image;
struct vkr_image_view;
struct vkr_sampler;
struct vkr_sampler_ycbcr_conversion;
struct vkr_descriptor_set_layout;
struct vkr_descriptor_pool;
struct vkr_descriptor_set;
struct vkr_descriptor_update_template;
struct vkr_render_pass;
struct vkr_framebuffer;
struct vkr_query_pool;
struct vkr_shader_module;
struct vkr_pipeline_layout;
struct vkr_pipeline_cache;
struct vkr_pipeline;
struct vkr_command_pool;
struct vkr_command_buffer;
typedef uint64_t vkr_object_id;
/* base class for all objects */

@ -0,0 +1,66 @@
/*
* Copyright 2020 Google LLC
* SPDX-License-Identifier: MIT
*/
#ifndef VKR_CONTEXT_H
#define VKR_CONTEXT_H
#include "vkr_common.h"
#include "venus-protocol/vn_protocol_renderer_defines.h"
#include "virgl_context.h"
#include "vkr_cs.h"
struct virgl_resource;
/*
* When a virgl_resource is attached in vkr_context_attach_resource, a
* vkr_resource_attachment is created. A vkr_resource_attachment is valid
* until the resource it tracks is detached.
*
* To support transfers to resources not backed by coherent dma-bufs, we
* associate a vkr_resource_attachment with a (list of) vkr_device_memory.
* This way, we can find a vkr_device_memory from a vkr_resource_attachment
* and do transfers using VkDeviceMemory.
*/
struct vkr_resource_attachment {
struct virgl_resource *resource;
struct list_head memories;
};
enum vkr_context_validate_level {
/* no validation */
VKR_CONTEXT_VALIDATE_NONE,
/* force enabling a subset of the validation layer */
VKR_CONTEXT_VALIDATE_ON,
/* force enabling the validation layer */
VKR_CONTEXT_VALIDATE_FULL,
};
struct vkr_context {
struct virgl_context base;
char *debug_name;
enum vkr_context_validate_level validate_level;
bool validate_fatal;
mtx_t mutex;
struct list_head rings;
struct util_hash_table_u64 *object_table;
struct util_hash_table *resource_table;
struct list_head newly_exported_memories;
struct vkr_cs_encoder encoder;
struct vkr_cs_decoder decoder;
struct vn_dispatch_context dispatch;
int fence_eventfd;
struct list_head busy_queues;
struct vkr_instance *instance;
};
#endif /* VKR_CONTEXT_H */

@ -0,0 +1,33 @@
/*
* Copyright 2020 Google LLC
* SPDX-License-Identifier: MIT
*/
#ifndef VKR_DESCRIPTOR_SET_H
#define VKR_DESCRIPTOR_SET_H
#include "vkr_common.h"
struct vkr_descriptor_set_layout {
struct vkr_object base;
};
struct vkr_descriptor_pool {
struct vkr_object base;
struct list_head descriptor_sets;
};
struct vkr_descriptor_set {
struct vkr_object base;
struct vkr_device *device;
struct list_head head;
};
struct vkr_descriptor_update_template {
struct vkr_object base;
};
#endif /* VKR_DESCRIPTOR_SET_H */

@ -0,0 +1,87 @@
/*
* Copyright 2020 Google LLC
* SPDX-License-Identifier: MIT
*/
#ifndef VKR_DEVICE_H
#define VKR_DEVICE_H
#include "vkr_common.h"
struct vkr_physical_device;
struct vkr_instance {
struct vkr_object base;
uint32_t api_version;
PFN_vkCreateDebugUtilsMessengerEXT create_debug_utils_messenger;
PFN_vkDestroyDebugUtilsMessengerEXT destroy_debug_utils_messenger;
PFN_vkGetMemoryFdKHR get_memory_fd;
PFN_vkGetFenceFdKHR get_fence_fd;
VkDebugUtilsMessengerEXT validation_messenger;
uint32_t physical_device_count;
VkPhysicalDevice *physical_device_handles;
struct vkr_physical_device **physical_devices;
};
struct vkr_physical_device {
struct vkr_object base;
VkPhysicalDeviceProperties properties;
uint32_t api_version;
VkExtensionProperties *extensions;
uint32_t extension_count;
bool KHR_external_memory_fd;
bool EXT_external_memory_dma_buf;
bool KHR_external_fence_fd;
VkPhysicalDeviceMemoryProperties memory_properties;
struct list_head devices;
};
struct vkr_device {
struct vkr_object base;
struct vkr_physical_device *physical_device;
/* Vulkan 1.2 */
PFN_vkGetSemaphoreCounterValue GetSemaphoreCounterValue;
PFN_vkWaitSemaphores WaitSemaphores;
PFN_vkSignalSemaphore SignalSemaphore;
PFN_vkGetDeviceMemoryOpaqueCaptureAddress GetDeviceMemoryOpaqueCaptureAddress;
PFN_vkGetBufferOpaqueCaptureAddress GetBufferOpaqueCaptureAddress;
PFN_vkGetBufferDeviceAddress GetBufferDeviceAddress;
PFN_vkResetQueryPool ResetQueryPool;
PFN_vkCreateRenderPass2 CreateRenderPass2;
PFN_vkCmdBeginRenderPass2 CmdBeginRenderPass2;
PFN_vkCmdNextSubpass2 CmdNextSubpass2;
PFN_vkCmdEndRenderPass2 CmdEndRenderPass2;
PFN_vkCmdDrawIndirectCount CmdDrawIndirectCount;
PFN_vkCmdDrawIndexedIndirectCount CmdDrawIndexedIndirectCount;
PFN_vkCmdBindTransformFeedbackBuffersEXT cmd_bind_transform_feedback_buffers;
PFN_vkCmdBeginTransformFeedbackEXT cmd_begin_transform_feedback;
PFN_vkCmdEndTransformFeedbackEXT cmd_end_transform_feedback;
PFN_vkCmdBeginQueryIndexedEXT cmd_begin_query_indexed;
PFN_vkCmdEndQueryIndexedEXT cmd_end_query_indexed;
PFN_vkCmdDrawIndirectByteCountEXT cmd_draw_indirect_byte_count;
PFN_vkGetImageDrmFormatModifierPropertiesEXT get_image_drm_format_modifier_properties;
PFN_vkGetMemoryFdPropertiesKHR get_memory_fd_properties;
struct list_head queues;
mtx_t free_sync_mutex;
struct list_head free_syncs;
struct list_head objects;
};
#endif /* VKR_DEVICE_H */

@ -0,0 +1,23 @@
/*
* Copyright 2020 Google LLC
* SPDX-License-Identifier: MIT
*/
#ifndef VKR_DEVICE_MEMORY_H
#define VKR_DEVICE_MEMORY_H
#include "vkr_common.h"
struct vkr_device_memory {
struct vkr_object base;
VkDevice device;
uint32_t property_flags;
uint32_t valid_fd_types;
bool exported;
uint32_t exported_res_id;
struct list_head head;
};
#endif /* VKR_DEVICE_MEMORY_H */

@ -0,0 +1,27 @@
/*
* Copyright 2020 Google LLC
* SPDX-License-Identifier: MIT
*/
#ifndef VKR_IMAGE_H
#define VKR_IMAGE_H
#include "vkr_common.h"
struct vkr_image {
struct vkr_object base;
};
struct vkr_image_view {
struct vkr_object base;
};
struct vkr_sampler {
struct vkr_object base;
};
struct vkr_sampler_ycbcr_conversion {
struct vkr_object base;
};
#endif /* VKR_IMAGE_H */

@ -0,0 +1,27 @@
/*
* Copyright 2020 Google LLC
* SPDX-License-Identifier: MIT
*/
#ifndef VKR_PIPELINE_H
#define VKR_PIPELINE_H
#include "vkr_common.h"
struct vkr_shader_module {
struct vkr_object base;
};
struct vkr_pipeline_layout {
struct vkr_object base;
};
struct vkr_pipeline_cache {
struct vkr_object base;
};
struct vkr_pipeline {
struct vkr_object base;
};
#endif /* VKR_PIPELINE_H */

@ -0,0 +1,15 @@
/*
* Copyright 2020 Google LLC
* SPDX-License-Identifier: MIT
*/
#ifndef VKR_QUERY_POOL_H
#define VKR_QUERY_POOL_H
#include "vkr_common.h"
struct vkr_query_pool {
struct vkr_object base;
};
#endif /* VKR_QUERY_POOL_H */

@ -0,0 +1,69 @@
/*
* Copyright 2020 Google LLC
* SPDX-License-Identifier: MIT
*/
#ifndef VKR_QUEUE_H
#define VKR_QUEUE_H
#include "vkr_common.h"
struct vkr_queue_sync {
VkFence fence;
uint32_t flags;
void *fence_cookie;
struct list_head head;
};
struct vkr_queue {
struct vkr_object base;
struct vkr_context *context;
struct vkr_device *device;
uint32_t family;
uint32_t index;
/* Submitted fences are added to pending_syncs first. How submitted fences
* are retired depends on VKR_RENDERER_THREAD_SYNC and
* VKR_RENDERER_ASYNC_FENCE_CB.
*
* When VKR_RENDERER_THREAD_SYNC is not set, the main thread calls
* vkGetFenceStatus and retires signaled fences in pending_syncs in order.
*
* When VKR_RENDERER_THREAD_SYNC is set but VKR_RENDERER_ASYNC_FENCE_CB is
* not set, the sync thread calls vkWaitForFences and moves signaled fences
* from pending_syncs to signaled_syncs in order. The main thread simply
* retires all fences in signaled_syncs.
*
* When VKR_RENDERER_THREAD_SYNC and VKR_RENDERER_ASYNC_FENCE_CB are both
* set, the sync thread calls vkWaitForFences and retires signaled fences
* in pending_syncs in order.
*/
int eventfd;
thrd_t thread;
mtx_t mutex;
cnd_t cond;
bool join;
struct list_head pending_syncs;
struct list_head signaled_syncs;
struct list_head head;
struct list_head busy_head;
};
struct vkr_fence {
struct vkr_object base;
};
struct vkr_semaphore {
struct vkr_object base;
};
struct vkr_event {
struct vkr_object base;
};
#endif /* VKR_QUEUE_H */

@ -0,0 +1,19 @@
/*
* Copyright 2020 Google LLC
* SPDX-License-Identifier: MIT
*/
#ifndef VKR_RENDER_PASS_H
#define VKR_RENDER_PASS_H
#include "vkr_common.h"
struct vkr_render_pass {
struct vkr_object base;
};
struct vkr_framebuffer {
struct vkr_object base;
};
#endif /* VKR_RENDER_PASS_H */

@ -18,7 +18,18 @@
#include "vrend_debug.h"
#include "vrend_iov.h"
#include "vkr_buffer.h"
#include "vkr_command_buffer.h"
#include "vkr_context.h"
#include "vkr_cs.h"
#include "vkr_descriptor_set.h"
#include "vkr_device.h"
#include "vkr_device_memory.h"
#include "vkr_image.h"
#include "vkr_pipeline.h"
#include "vkr_query_pool.h"
#include "vkr_queue.h"
#include "vkr_render_pass.h"
#include "vkr_ring.h"
/*
@ -186,288 +197,6 @@
object_array_fini(&arr); \
} while (0)
struct vkr_physical_device;
struct vkr_instance {
struct vkr_object base;
uint32_t api_version;
PFN_vkCreateDebugUtilsMessengerEXT create_debug_utils_messenger;
PFN_vkDestroyDebugUtilsMessengerEXT destroy_debug_utils_messenger;
PFN_vkGetMemoryFdKHR get_memory_fd;
PFN_vkGetFenceFdKHR get_fence_fd;
VkDebugUtilsMessengerEXT validation_messenger;
uint32_t physical_device_count;
VkPhysicalDevice *physical_device_handles;
struct vkr_physical_device **physical_devices;
};
struct vkr_physical_device {
struct vkr_object base;
VkPhysicalDeviceProperties properties;
uint32_t api_version;
VkExtensionProperties *extensions;
uint32_t extension_count;
bool KHR_external_memory_fd;
bool EXT_external_memory_dma_buf;
bool KHR_external_fence_fd;
VkPhysicalDeviceMemoryProperties memory_properties;
struct list_head devices;
};
struct vkr_queue_sync {
VkFence fence;
uint32_t flags;
void *fence_cookie;
struct list_head head;
};
struct vkr_device {
struct vkr_object base;
struct vkr_physical_device *physical_device;
/* Vulkan 1.2 */
PFN_vkGetSemaphoreCounterValue GetSemaphoreCounterValue;
PFN_vkWaitSemaphores WaitSemaphores;
PFN_vkSignalSemaphore SignalSemaphore;
PFN_vkGetDeviceMemoryOpaqueCaptureAddress GetDeviceMemoryOpaqueCaptureAddress;
PFN_vkGetBufferOpaqueCaptureAddress GetBufferOpaqueCaptureAddress;
PFN_vkGetBufferDeviceAddress GetBufferDeviceAddress;
PFN_vkResetQueryPool ResetQueryPool;
PFN_vkCreateRenderPass2 CreateRenderPass2;
PFN_vkCmdBeginRenderPass2 CmdBeginRenderPass2;
PFN_vkCmdNextSubpass2 CmdNextSubpass2;
PFN_vkCmdEndRenderPass2 CmdEndRenderPass2;
PFN_vkCmdDrawIndirectCount CmdDrawIndirectCount;
PFN_vkCmdDrawIndexedIndirectCount CmdDrawIndexedIndirectCount;
PFN_vkCmdBindTransformFeedbackBuffersEXT cmd_bind_transform_feedback_buffers;
PFN_vkCmdBeginTransformFeedbackEXT cmd_begin_transform_feedback;
PFN_vkCmdEndTransformFeedbackEXT cmd_end_transform_feedback;
PFN_vkCmdBeginQueryIndexedEXT cmd_begin_query_indexed;
PFN_vkCmdEndQueryIndexedEXT cmd_end_query_indexed;
PFN_vkCmdDrawIndirectByteCountEXT cmd_draw_indirect_byte_count;
PFN_vkGetImageDrmFormatModifierPropertiesEXT get_image_drm_format_modifier_properties;
PFN_vkGetMemoryFdPropertiesKHR get_memory_fd_properties;
struct list_head queues;
mtx_t free_sync_mutex;
struct list_head free_syncs;
struct list_head objects;
};
struct vkr_queue {
struct vkr_object base;
struct vkr_context *context;
struct vkr_device *device;
uint32_t family;
uint32_t index;
/* Submitted fences are added to pending_syncs first. How submitted fences
* are retired depends on VKR_RENDERER_THREAD_SYNC and
* VKR_RENDERER_ASYNC_FENCE_CB.
*
* When VKR_RENDERER_THREAD_SYNC is not set, the main thread calls
* vkGetFenceStatus and retires signaled fences in pending_syncs in order.
*
* When VKR_RENDERER_THREAD_SYNC is set but VKR_RENDERER_ASYNC_FENCE_CB is
* not set, the sync thread calls vkWaitForFences and moves signaled fences
* from pending_syncs to signaled_syncs in order. The main thread simply
* retires all fences in signaled_syncs.
*
* When VKR_RENDERER_THREAD_SYNC and VKR_RENDERER_ASYNC_FENCE_CB are both
* set, the sync thread calls vkWaitForFences and retires signaled fences
* in pending_syncs in order.
*/
int eventfd;
thrd_t thread;
mtx_t mutex;
cnd_t cond;
bool join;
struct list_head pending_syncs;
struct list_head signaled_syncs;
struct list_head head;
struct list_head busy_head;
};
struct vkr_device_memory {
struct vkr_object base;
VkDevice device;
uint32_t property_flags;
uint32_t valid_fd_types;
bool exported;
uint32_t exported_res_id;
struct list_head head;
};
struct vkr_fence {
struct vkr_object base;
};
struct vkr_semaphore {
struct vkr_object base;
};
struct vkr_buffer {
struct vkr_object base;
};
struct vkr_buffer_view {
struct vkr_object base;
};
struct vkr_image {
struct vkr_object base;
};
struct vkr_image_view {
struct vkr_object base;
};
struct vkr_sampler {
struct vkr_object base;
};
struct vkr_sampler_ycbcr_conversion {
struct vkr_object base;
};
struct vkr_descriptor_set_layout {
struct vkr_object base;
};
struct vkr_descriptor_pool {
struct vkr_object base;
struct list_head descriptor_sets;
};
struct vkr_descriptor_set {
struct vkr_object base;
struct vkr_device *device;
struct list_head head;
};
struct vkr_descriptor_update_template {
struct vkr_object base;
};
struct vkr_render_pass {
struct vkr_object base;
};
struct vkr_framebuffer {
struct vkr_object base;
};
struct vkr_event {
struct vkr_object base;
};
struct vkr_query_pool {
struct vkr_object base;
};
struct vkr_shader_module {
struct vkr_object base;
};
struct vkr_pipeline_layout {
struct vkr_object base;
};
struct vkr_pipeline_cache {
struct vkr_object base;
};
struct vkr_pipeline {
struct vkr_object base;
};
struct vkr_command_pool {
struct vkr_object base;
struct list_head command_buffers;
};
struct vkr_command_buffer {
struct vkr_object base;
struct vkr_device *device;
struct list_head head;
};
/*
* When a virgl_resource is attached in vkr_context_attach_resource, a
* vkr_resource_attachment is created. A vkr_resource_attachment is valid
* until the resource it tracks is detached.
*
* To support transfers to resources not backed by coherent dma-bufs, we
* associate a vkr_resource_attachment with a (list of) vkr_device_memory.
* This way, we can find a vkr_device_memory from a vkr_resource_attachment
* and do transfers using VkDeviceMemory.
*/
struct vkr_resource_attachment {
struct virgl_resource *resource;
struct list_head memories;
};
enum vkr_context_validate_level {
/* no validation */
VKR_CONTEXT_VALIDATE_NONE,
/* force enabling a subset of the validation layer */
VKR_CONTEXT_VALIDATE_ON,
/* force enabling the validation layer */
VKR_CONTEXT_VALIDATE_FULL,
};
struct vkr_context {
struct virgl_context base;
char *debug_name;
enum vkr_context_validate_level validate_level;
bool validate_fatal;
mtx_t mutex;
struct list_head rings;
struct util_hash_table_u64 *object_table;
struct util_hash_table *resource_table;
struct list_head newly_exported_memories;
struct vkr_cs_encoder encoder;
struct vkr_cs_decoder decoder;
struct vn_dispatch_context dispatch;
int fence_eventfd;
struct list_head busy_queues;
struct vkr_instance *instance;
};
enum vkr_debug_flags {
VKR_DEBUG_VALIDATE = 1 << 0,
};

Loading…
Cancel
Save