vkr: move queue creation into device creation and handle errors

vkr_queue_create returns the created queue or NULL for simplicity and we
map NULL to VK_ERROR_OUT_OF_HOST_MEMORY.

Signed-off-by: Yiwei Zhang <zzyiwei@chromium.org>
Reviewed-by: Chia-I Wu <olvaffe@gmail.com>
Reviewed-by: Ryan Neph <ryanneph@google.com>
macos/master
Yiwei Zhang 3 years ago
parent 6ae45fe585
commit 4c646dc68f
  1. 48
      src/venus/vkr_device.c
  2. 84
      src/venus/vkr_queue.c
  3. 8
      src/venus/vkr_queue.h

@ -14,6 +14,44 @@
#include "vkr_physical_device.h"
#include "vkr_queue.h"
static VkResult
vkr_device_create_queues(struct vkr_context *ctx,
struct vkr_device *dev,
uint32_t create_info_count,
const VkDeviceQueueCreateInfo *create_infos)
{
list_inithead(&dev->queues);
for (uint32_t i = 0; i < create_info_count; i++) {
for (uint32_t j = 0; j < create_infos[i].queueCount; j++) {
const VkDeviceQueueInfo2 info = {
.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
.pNext = NULL,
.flags = create_infos[i].flags,
.queueFamilyIndex = create_infos[i].queueFamilyIndex,
.queueIndex = j,
};
VkQueue handle = VK_NULL_HANDLE;
vkGetDeviceQueue2(dev->base.handle.device, &info, &handle);
struct vkr_queue *queue = vkr_queue_create(
ctx, dev, info.flags, info.queueFamilyIndex, info.queueIndex, handle);
if (!queue) {
struct vkr_queue *entry, *tmp;
LIST_FOR_EACH_ENTRY_SAFE (entry, tmp, &dev->queues, base.track_head)
vkr_queue_destroy(ctx, entry);
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
/* queues are not tracked as device objects */
list_add(&queue->base.track_head, &dev->queues);
}
}
return VK_SUCCESS;
}
static void
vkr_device_init_entry_points(struct vkr_device *dev, uint32_t api_version)
{
@ -168,9 +206,15 @@ vkr_dispatch_vkCreateDevice(struct vn_dispatch_context *dispatch,
dev->physical_device = physical_dev;
vkr_device_init_entry_points(dev, physical_dev->api_version);
args->ret = vkr_device_create_queues(ctx, dev, args->pCreateInfo->queueCreateInfoCount,
args->pCreateInfo->pQueueCreateInfos);
if (args->ret != VK_SUCCESS) {
vkDestroyDevice(dev->base.handle.device, NULL);
free(dev);
return;
}
list_inithead(&dev->queues);
vkr_device_init_entry_points(dev, physical_dev->api_version);
mtx_init(&dev->free_sync_mutex, mtx_plain);
list_inithead(&dev->free_syncs);

@ -148,7 +148,10 @@ vkr_queue_destroy(struct vkr_context *ctx, struct vkr_queue *queue)
list_del(&queue->busy_head);
list_del(&queue->base.track_head);
if (queue->base.id)
util_hash_table_remove_u64(ctx->object_table, queue->base.id);
else
free(queue);
}
static int
@ -199,29 +202,23 @@ vkr_queue_thread(void *arg)
return 0;
}
static struct vkr_queue *
struct vkr_queue *
vkr_queue_create(struct vkr_context *ctx,
struct vkr_device *dev,
vkr_object_id id,
VkQueue handle,
VkDeviceQueueCreateFlags flags,
uint32_t family,
uint32_t index)
uint32_t index,
VkQueue handle)
{
struct vkr_queue *queue;
int ret;
LIST_FOR_EACH_ENTRY (queue, &dev->queues, base.track_head) {
if (queue->flags == flags && queue->family == family && queue->index == index)
return queue;
}
queue = calloc(1, sizeof(*queue));
if (!queue)
return NULL;
queue->base.type = VK_OBJECT_TYPE_QUEUE;
queue->base.id = id;
/* queue->base.id is not assigned until vkr_queue_assign_object_id */
queue->base.handle.queue = handle;
queue->context = ctx;
@ -257,13 +254,41 @@ vkr_queue_create(struct vkr_context *ctx,
}
list_inithead(&queue->busy_head);
list_inithead(&queue->base.track_head);
/* queues are not tracked as device objects */
list_addtail(&queue->base.track_head, &dev->queues);
return queue;
}
static void
vkr_queue_assign_object_id(struct vkr_context *ctx,
struct vkr_queue *queue,
vkr_object_id id)
{
if (queue->base.id) {
if (queue->base.id != id)
vkr_cs_decoder_set_fatal(&ctx->decoder);
return;
}
queue->base.id = id;
util_hash_table_set_u64(ctx->object_table, queue->base.id, queue);
}
static struct vkr_queue *
vkr_device_lookup_queue(struct vkr_device *dev,
VkDeviceQueueCreateFlags flags,
uint32_t family,
uint32_t index)
{
struct vkr_queue *queue;
LIST_FOR_EACH_ENTRY (queue, &dev->queues, base.track_head) {
if (queue->flags == flags && queue->family == family && queue->index == index)
return queue;
}
return NULL;
}
static void
@ -278,18 +303,16 @@ vkr_dispatch_vkGetDeviceQueue(struct vn_dispatch_context *dispatch,
return;
}
struct vkr_queue *queue = vkr_device_lookup_queue(
dev, 0 /* flags */, args->queueFamilyIndex, args->queueIndex);
if (!queue) {
vkr_cs_decoder_set_fatal(&ctx->decoder);
return;
}
const vkr_object_id id =
vkr_cs_handle_load_id((const void **)args->pQueue, VK_OBJECT_TYPE_QUEUE);
VkQueue handle;
vn_replace_vkGetDeviceQueue_args_handle(args);
vkGetDeviceQueue(args->device, args->queueFamilyIndex, args->queueIndex, &handle);
struct vkr_queue *queue = vkr_queue_create(ctx, dev, id, handle, 0 /* flags */,
args->queueFamilyIndex, args->queueIndex);
/* TODO create queues with device and deal with failures there */
if (!queue)
vrend_printf("failed to create queue\n");
vkr_queue_assign_object_id(ctx, queue, id);
}
static void
@ -304,16 +327,17 @@ vkr_dispatch_vkGetDeviceQueue2(struct vn_dispatch_context *dispatch,
return;
}
struct vkr_queue *queue = vkr_device_lookup_queue(dev, args->pQueueInfo->flags,
args->pQueueInfo->queueFamilyIndex,
args->pQueueInfo->queueIndex);
if (!queue) {
vkr_cs_decoder_set_fatal(&ctx->decoder);
return;
}
const vkr_object_id id =
vkr_cs_handle_load_id((const void **)args->pQueue, VK_OBJECT_TYPE_QUEUE);
VkQueue handle;
vn_replace_vkGetDeviceQueue2_args_handle(args);
vkGetDeviceQueue2(args->device, args->pQueueInfo, &handle);
/* TODO deal with errors */
vkr_queue_create(ctx, dev, id, handle, args->pQueueInfo->flags,
args->pQueueInfo->queueFamilyIndex, args->pQueueInfo->queueIndex);
vkr_queue_assign_object_id(ctx, queue, id);
}
static void

@ -91,6 +91,14 @@ vkr_queue_retire_syncs(struct vkr_queue *queue,
struct list_head *retired_syncs,
bool *queue_empty);
struct vkr_queue *
vkr_queue_create(struct vkr_context *ctx,
struct vkr_device *dev,
VkDeviceQueueCreateFlags flags,
uint32_t family,
uint32_t index,
VkQueue handle);
void
vkr_queue_destroy(struct vkr_context *ctx, struct vkr_queue *queue);

Loading…
Cancel
Save