diff --git a/src/meson.build b/src/meson.build index 6b87bcd..e413a23 100644 --- a/src/meson.build +++ b/src/meson.build @@ -79,6 +79,7 @@ venus_sources = [ 'venus/venus-protocol/vn_protocol_renderer.h', 'venus/vkr_buffer.h', 'venus/vkr_command_buffer.h', + 'venus/vkr_common.c', 'venus/vkr_common.h', 'venus/vkr_context.h', 'venus/vkr_cs.c', diff --git a/src/venus/vkr_common.c b/src/venus/vkr_common.c new file mode 100644 index 0000000..a0af970 --- /dev/null +++ b/src/venus/vkr_common.c @@ -0,0 +1,59 @@ +/* + * Copyright 2021 Google LLC + * SPDX-License-Identifier: MIT + */ + +#include "vkr_common.h" + +#include "vkr_cs.h" + +void +object_array_fini(struct object_array *arr) +{ + if (!arr->objects_stolen) { + for (uint32_t i = 0; i < arr->count; i++) + free(arr->objects[i]); + } + + free(arr->objects); + free(arr->handle_storage); +} + +bool +object_array_init(struct object_array *arr, + uint32_t count, + VkObjectType obj_type, + size_t obj_size, + size_t handle_size, + const void *handles) +{ + arr->count = count; + + arr->objects = malloc(sizeof(*arr->objects) * count); + if (!arr->objects) + return false; + + arr->handle_storage = malloc(handle_size * count); + if (!arr->handle_storage) { + free(arr->objects); + return false; + } + + arr->objects_stolen = false; + for (uint32_t i = 0; i < count; i++) { + struct vkr_object *obj = calloc(1, obj_size); + if (!obj) { + arr->count = i; + object_array_fini(arr); + return false; + } + + obj->type = obj_type; + obj->id = vkr_cs_handle_load_id((const void **)((char *)handles + handle_size * i), + obj->type); + + arr->objects[i] = obj; + } + + return arr; +} diff --git a/src/venus/vkr_common.h b/src/venus/vkr_common.h index 3866362..df46d5d 100644 --- a/src/venus/vkr_common.h +++ b/src/venus/vkr_common.h @@ -33,6 +33,171 @@ #include "vkr_renderer.h" +/* + * TODO what extensions do we need from the host driver? + * + * We don't check vkGetPhysicalDeviceExternalBufferProperties, etc. yet. Even + * if we did, silently adding external memory info to vkCreateBuffer or + * vkCreateImage could change the results of vkGetBufferMemoryRequirements or + * vkGetImageMemoryRequirements and confuse the guest. + */ +#define FORCE_ENABLE_DMABUF + +#define VKR_DEBUG(category) (unlikely(vkr_debug_flags & VKR_DEBUG_##category)) + +/* + * TODO Most of the functions are generated. Some of them are then + * hand-edited. Find a better/cleaner way to reduce manual works. + */ +#define CREATE_OBJECT(obj, vkr_type, vk_obj, vk_cmd, vk_arg) \ + struct vkr_device *dev_obj = (struct vkr_device *)args->device; \ + if (!dev_obj || dev_obj->base.type != VK_OBJECT_TYPE_DEVICE) { \ + vkr_cs_decoder_set_fatal(&ctx->decoder); \ + return; \ + } \ + \ + struct vkr_##vkr_type *obj = calloc(1, sizeof(*obj)); \ + if (!obj) { \ + args->ret = VK_ERROR_OUT_OF_HOST_MEMORY; \ + return; \ + } \ + obj->base.type = VK_OBJECT_TYPE_##vk_obj; \ + obj->base.id = vkr_cs_handle_load_id((const void **)args->vk_arg, obj->base.type); \ + \ + vn_replace_##vk_cmd##_args_handle(args); \ + args->ret = \ + vk_cmd(args->device, args->pCreateInfo, NULL, &obj->base.handle.vkr_type); \ + if (args->ret != VK_SUCCESS) { \ + free(obj); \ + return; \ + } \ + \ + list_add(&obj->base.track_head, &dev_obj->objects) + +#define DESTROY_OBJECT(obj, vkr_type, vk_obj, vk_cmd, vk_arg) \ + struct vkr_##vkr_type *obj = (struct vkr_##vkr_type *)(uintptr_t)args->vk_arg; \ + if (!obj || obj->base.type != VK_OBJECT_TYPE_##vk_obj) { \ + if (obj) \ + vkr_cs_decoder_set_fatal(&ctx->decoder); \ + return; \ + } \ + \ + vn_replace_##vk_cmd##_args_handle(args); \ + vk_cmd(args->device, args->vk_arg, NULL); \ + \ + list_del(&obj->base.track_head) + +#define ALLOCATE_OBJECT_ARRAY(obj, vkr_type, vk_type, vk_obj, vk_cmd, arg_count, \ + arg_pool, vkr_pool_type, vk_pool_type) \ + do { \ + struct vkr_device *dev = (struct vkr_device *)args->device; \ + if (!dev || dev->base.type != VK_OBJECT_TYPE_DEVICE) { \ + vkr_cs_decoder_set_fatal(&ctx->decoder); \ + return; \ + } \ + \ + struct vkr_##vkr_pool_type *pool = \ + (struct vkr_##vkr_pool_type *)(uintptr_t)args->pAllocateInfo->arg_pool; \ + if (!pool || pool->base.type != VK_OBJECT_TYPE_##vk_pool_type) { \ + vkr_cs_decoder_set_fatal(&ctx->decoder); \ + return; \ + } \ + \ + struct object_array arr; \ + if (!object_array_init(&arr, args->pAllocateInfo->arg_count, \ + VK_OBJECT_TYPE_##vk_type, sizeof(struct vkr_##vkr_type), \ + sizeof(Vk##vk_obj), args->p##vk_obj##s)) { \ + args->ret = VK_ERROR_OUT_OF_HOST_MEMORY; \ + return; \ + } \ + \ + vn_replace_##vk_cmd##_args_handle(args); \ + args->ret = vk_cmd(args->device, args->pAllocateInfo, arr.handle_storage); \ + if (args->ret != VK_SUCCESS) { \ + object_array_fini(&arr); \ + return; \ + } \ + \ + for (uint32_t i = 0; i < arr.count; i++) { \ + struct vkr_##vkr_type *obj = arr.objects[i]; \ + \ + obj->base.handle.vkr_type = ((Vk##vk_obj *)arr.handle_storage)[i]; \ + obj->device = dev; \ + \ + /* pool objects are tracked by the pool other than the device */ \ + list_add(&obj->head, &pool->vkr_type##s); \ + \ + util_hash_table_set_u64(ctx->object_table, obj->base.id, obj); \ + } \ + \ + arr.objects_stolen = true; \ + object_array_fini(&arr); \ + } while (0) + +#define FREE_OBJECT_ARRAY(obj, vkr_type, vk_type, vk_cmd, arg_obj, arg_count, arg_pool) \ + do { \ + struct list_head free_list; \ + \ + list_inithead(&free_list); \ + for (uint32_t i = 0; i < args->arg_count; i++) { \ + struct vkr_##vkr_type *obj = (struct vkr_##vkr_type *)args->arg_obj[i]; \ + if (!obj) \ + continue; \ + if (obj->base.type != VK_OBJECT_TYPE_##vk_type) { \ + vkr_cs_decoder_set_fatal(&ctx->decoder); \ + return; \ + } \ + \ + list_del(&obj->head); \ + list_addtail(&obj->head, &free_list); \ + } \ + \ + vn_replace_##vk_cmd##_args_handle(args); \ + vk_cmd(args->device, args->arg_pool, args->arg_count, args->arg_obj); \ + \ + struct vkr_##vkr_type *obj, *tmp; \ + LIST_FOR_EACH_ENTRY_SAFE (obj, tmp, &free_list, head) \ + util_hash_table_remove_u64(ctx->object_table, obj->base.id); \ + } while (0) + +#define CREATE_PIPELINE_ARRAY(vk_cmd) \ + do { \ + struct vkr_device *dev = (struct vkr_device *)args->device; \ + if (!dev || dev->base.type != VK_OBJECT_TYPE_DEVICE) { \ + vkr_cs_decoder_set_fatal(&ctx->decoder); \ + return; \ + } \ + \ + struct object_array arr; \ + if (!object_array_init(&arr, args->createInfoCount, VK_OBJECT_TYPE_PIPELINE, \ + sizeof(struct vkr_pipeline), sizeof(VkPipeline), \ + args->pPipelines)) { \ + args->ret = VK_ERROR_OUT_OF_HOST_MEMORY; \ + return; \ + } \ + \ + vn_replace_##vk_cmd##_args_handle(args); \ + args->ret = vk_cmd(args->device, args->pipelineCache, args->createInfoCount, \ + args->pCreateInfos, NULL, arr.handle_storage); \ + if (args->ret != VK_SUCCESS) { \ + object_array_fini(&arr); \ + return; \ + } \ + \ + for (uint32_t i = 0; i < arr.count; i++) { \ + struct vkr_pipeline *pipeline = arr.objects[i]; \ + \ + pipeline->base.handle.pipeline = ((VkPipeline *)arr.handle_storage)[i]; \ + \ + list_add(&pipeline->base.track_head, &dev->objects); \ + \ + util_hash_table_set_u64(ctx->object_table, pipeline->base.id, pipeline); \ + } \ + \ + arr.objects_stolen = true; \ + object_array_fini(&arr); \ + } while (0) + struct vkr_context; struct vkr_instance; struct vkr_physical_device; @@ -64,6 +229,10 @@ struct vkr_command_buffer; typedef uint64_t vkr_object_id; +enum vkr_debug_flags { + VKR_DEBUG_VALIDATE = 1 << 0, +}; + /* base class for all objects */ struct vkr_object { VkObjectType type; @@ -105,4 +274,41 @@ struct vkr_object { struct list_head track_head; }; +struct object_array { + uint32_t count; + void **objects; + void *handle_storage; + + /* true if the ownership of the objects has been transferred (to + * vkr_context::object_table) + */ + bool objects_stolen; +}; + +extern uint32_t vkr_renderer_flags; +extern uint32_t vkr_debug_flags; + +bool +object_array_init(struct object_array *arr, + uint32_t count, + VkObjectType obj_type, + size_t obj_size, + size_t handle_size, + const void *handles); + +void +object_array_fini(struct object_array *arr); + +static inline void * +vkr_find_pnext(const void *chain, VkStructureType type) +{ + VkBaseOutStructure *pnext = (VkBaseOutStructure *)chain; + while (pnext) { + if (pnext->sType == type) + return pnext; + pnext = pnext->pNext; + } + return NULL; +} + #endif /* VKR_COMMON_H */ diff --git a/src/venus/vkr_renderer.c b/src/venus/vkr_renderer.c index f097417..219a285 100644 --- a/src/venus/vkr_renderer.c +++ b/src/venus/vkr_renderer.c @@ -32,256 +32,13 @@ #include "vkr_render_pass.h" #include "vkr_ring.h" -/* - * TODO what extensions do we need from the host driver? - * - * We don't check vkGetPhysicalDeviceExternalBufferProperties, etc. yet. Even - * if we did, silently adding external memory info to vkCreateBuffer or - * vkCreateImage could change the results of vkGetBufferMemoryRequirements or - * vkGetImageMemoryRequirements and confuse the guest. - */ -#define FORCE_ENABLE_DMABUF - -#define VKR_DEBUG(category) (unlikely(vkr_debug_flags & VKR_DEBUG_##category)) - -/* - * TODO Most of the functions are generated. Some of them are then - * hand-edited. Find a better/cleaner way to reduce manual works. - */ -#define CREATE_OBJECT(obj, vkr_type, vk_obj, vk_cmd, vk_arg) \ - struct vkr_device *dev_obj = (struct vkr_device *)args->device; \ - if (!dev_obj || dev_obj->base.type != VK_OBJECT_TYPE_DEVICE) { \ - vkr_cs_decoder_set_fatal(&ctx->decoder); \ - return; \ - } \ - \ - struct vkr_##vkr_type *obj = calloc(1, sizeof(*obj)); \ - if (!obj) { \ - args->ret = VK_ERROR_OUT_OF_HOST_MEMORY; \ - return; \ - } \ - obj->base.type = VK_OBJECT_TYPE_##vk_obj; \ - obj->base.id = vkr_cs_handle_load_id((const void **)args->vk_arg, obj->base.type); \ - \ - vn_replace_##vk_cmd##_args_handle(args); \ - args->ret = \ - vk_cmd(args->device, args->pCreateInfo, NULL, &obj->base.handle.vkr_type); \ - if (args->ret != VK_SUCCESS) { \ - free(obj); \ - return; \ - } \ - \ - list_add(&obj->base.track_head, &dev_obj->objects) - -#define DESTROY_OBJECT(obj, vkr_type, vk_obj, vk_cmd, vk_arg) \ - struct vkr_##vkr_type *obj = (struct vkr_##vkr_type *)(uintptr_t)args->vk_arg; \ - if (!obj || obj->base.type != VK_OBJECT_TYPE_##vk_obj) { \ - if (obj) \ - vkr_cs_decoder_set_fatal(&ctx->decoder); \ - return; \ - } \ - \ - vn_replace_##vk_cmd##_args_handle(args); \ - vk_cmd(args->device, args->vk_arg, NULL); \ - \ - list_del(&obj->base.track_head) - -#define ALLOCATE_OBJECT_ARRAY(obj, vkr_type, vk_type, vk_obj, vk_cmd, arg_count, \ - arg_pool, vkr_pool_type, vk_pool_type) \ - do { \ - struct vkr_device *dev = (struct vkr_device *)args->device; \ - if (!dev || dev->base.type != VK_OBJECT_TYPE_DEVICE) { \ - vkr_cs_decoder_set_fatal(&ctx->decoder); \ - return; \ - } \ - \ - struct vkr_##vkr_pool_type *pool = \ - (struct vkr_##vkr_pool_type *)(uintptr_t)args->pAllocateInfo->arg_pool; \ - if (!pool || pool->base.type != VK_OBJECT_TYPE_##vk_pool_type) { \ - vkr_cs_decoder_set_fatal(&ctx->decoder); \ - return; \ - } \ - \ - struct object_array arr; \ - if (!object_array_init(&arr, args->pAllocateInfo->arg_count, \ - VK_OBJECT_TYPE_##vk_type, sizeof(struct vkr_##vkr_type), \ - sizeof(Vk##vk_obj), args->p##vk_obj##s)) { \ - args->ret = VK_ERROR_OUT_OF_HOST_MEMORY; \ - return; \ - } \ - \ - vn_replace_##vk_cmd##_args_handle(args); \ - args->ret = vk_cmd(args->device, args->pAllocateInfo, arr.handle_storage); \ - if (args->ret != VK_SUCCESS) { \ - object_array_fini(&arr); \ - return; \ - } \ - \ - for (uint32_t i = 0; i < arr.count; i++) { \ - struct vkr_##vkr_type *obj = arr.objects[i]; \ - \ - obj->base.handle.vkr_type = ((Vk##vk_obj *)arr.handle_storage)[i]; \ - obj->device = dev; \ - \ - /* pool objects are tracked by the pool other than the device */ \ - list_add(&obj->head, &pool->vkr_type##s); \ - \ - util_hash_table_set_u64(ctx->object_table, obj->base.id, obj); \ - } \ - \ - arr.objects_stolen = true; \ - object_array_fini(&arr); \ - } while (0) - -#define FREE_OBJECT_ARRAY(obj, vkr_type, vk_type, vk_cmd, arg_obj, arg_count, arg_pool) \ - do { \ - struct list_head free_list; \ - \ - list_inithead(&free_list); \ - for (uint32_t i = 0; i < args->arg_count; i++) { \ - struct vkr_##vkr_type *obj = (struct vkr_##vkr_type *)args->arg_obj[i]; \ - if (!obj) \ - continue; \ - if (obj->base.type != VK_OBJECT_TYPE_##vk_type) { \ - vkr_cs_decoder_set_fatal(&ctx->decoder); \ - return; \ - } \ - \ - list_del(&obj->head); \ - list_addtail(&obj->head, &free_list); \ - } \ - \ - vn_replace_##vk_cmd##_args_handle(args); \ - vk_cmd(args->device, args->arg_pool, args->arg_count, args->arg_obj); \ - \ - struct vkr_##vkr_type *obj, *tmp; \ - LIST_FOR_EACH_ENTRY_SAFE (obj, tmp, &free_list, head) \ - util_hash_table_remove_u64(ctx->object_table, obj->base.id); \ - } while (0) - -#define CREATE_PIPELINE_ARRAY(vk_cmd) \ - do { \ - struct vkr_device *dev = (struct vkr_device *)args->device; \ - if (!dev || dev->base.type != VK_OBJECT_TYPE_DEVICE) { \ - vkr_cs_decoder_set_fatal(&ctx->decoder); \ - return; \ - } \ - \ - struct object_array arr; \ - if (!object_array_init(&arr, args->createInfoCount, VK_OBJECT_TYPE_PIPELINE, \ - sizeof(struct vkr_pipeline), sizeof(VkPipeline), \ - args->pPipelines)) { \ - args->ret = VK_ERROR_OUT_OF_HOST_MEMORY; \ - return; \ - } \ - \ - vn_replace_##vk_cmd##_args_handle(args); \ - args->ret = vk_cmd(args->device, args->pipelineCache, args->createInfoCount, \ - args->pCreateInfos, NULL, arr.handle_storage); \ - if (args->ret != VK_SUCCESS) { \ - object_array_fini(&arr); \ - return; \ - } \ - \ - for (uint32_t i = 0; i < arr.count; i++) { \ - struct vkr_pipeline *pipeline = arr.objects[i]; \ - \ - pipeline->base.handle.pipeline = ((VkPipeline *)arr.handle_storage)[i]; \ - \ - list_add(&pipeline->base.track_head, &dev->objects); \ - \ - util_hash_table_set_u64(ctx->object_table, pipeline->base.id, pipeline); \ - } \ - \ - arr.objects_stolen = true; \ - object_array_fini(&arr); \ - } while (0) - -enum vkr_debug_flags { - VKR_DEBUG_VALIDATE = 1 << 0, -}; - static const struct debug_named_value vkr_debug_options[] = { { "validate", VKR_DEBUG_VALIDATE, "Force enabling the validation layer" }, DEBUG_NAMED_VALUE_END }; -static uint32_t vkr_renderer_flags; -static uint32_t vkr_debug_flags; - -struct object_array { - uint32_t count; - void **objects; - void *handle_storage; - - /* true if the ownership of the objects has been transferred (to - * vkr_context::object_table) - */ - bool objects_stolen; -}; - -static void -object_array_fini(struct object_array *arr) -{ - if (!arr->objects_stolen) { - for (uint32_t i = 0; i < arr->count; i++) - free(arr->objects[i]); - } - - free(arr->objects); - free(arr->handle_storage); -} - -static bool -object_array_init(struct object_array *arr, - uint32_t count, - VkObjectType obj_type, - size_t obj_size, - size_t handle_size, - const void *handles) -{ - arr->count = count; - - arr->objects = malloc(sizeof(*arr->objects) * count); - if (!arr->objects) - return false; - - arr->handle_storage = malloc(handle_size * count); - if (!arr->handle_storage) { - free(arr->objects); - return false; - } - - arr->objects_stolen = false; - for (uint32_t i = 0; i < count; i++) { - struct vkr_object *obj = calloc(1, obj_size); - if (!obj) { - arr->count = i; - object_array_fini(arr); - return false; - } - - obj->type = obj_type; - obj->id = vkr_cs_handle_load_id((const void **)((char *)handles + handle_size * i), - obj->type); - - arr->objects[i] = obj; - } - - return arr; -} - -static void * -vkr_find_pnext(const void *chain, VkStructureType type) -{ - VkBaseOutStructure *pnext = (VkBaseOutStructure *)chain; - while (pnext) { - if (pnext->sType == type) - return pnext; - pnext = pnext->pNext; - } - return NULL; -} +uint32_t vkr_renderer_flags; +uint32_t vkr_debug_flags; static void vkr_dispatch_vkSetReplyCommandStreamMESA(