vkr: drop transfer_3d support

Venus does not do transfers at all.

Signed-off-by: Yiwei Zhang <zzyiwei@chromium.org>
Reviewed-by: Chia-I Wu <olvaffe@gmail.com>
macos/master
Yiwei Zhang 3 years ago
parent dcbe205df5
commit 2668dae374
  1. 103
      src/venus/vkr_context.c
  2. 7
      src/venus/vkr_context.h
  3. 3
      src/venus/vkr_device_memory.c
  4. 2
      src/venus/vkr_device_memory.h

@ -13,7 +13,6 @@
#include "pipe/p_state.h" #include "pipe/p_state.h"
#include "util/anon_file.h" #include "util/anon_file.h"
#include "venus-protocol/vn_protocol_renderer_dispatches.h" #include "venus-protocol/vn_protocol_renderer_dispatches.h"
#include "virgl_protocol.h" /* for transfer_mode */
#define XXH_INLINE_ALL #define XXH_INLINE_ALL
#include "util/xxhash.h" #include "util/xxhash.h"
@ -408,97 +407,23 @@ vkr_context_get_blob_done(struct virgl_context *base,
struct vkr_context *ctx = (struct vkr_context *)base; struct vkr_context *ctx = (struct vkr_context *)base;
struct vkr_device_memory *mem = blob->renderer_data; struct vkr_device_memory *mem = blob->renderer_data;
/* when blob_id is 0, there is no associated mem */ if (mem)
if (mem) {
mem->exported = true; mem->exported = true;
mem->exported_res_id = res_id;
list_add(&mem->exported_head, &ctx->newly_exported_memories);
}
/* XXX locked in vkr_context_get_blob */ /* XXX locked in vkr_context_get_blob */
mtx_unlock(&ctx->mutex); mtx_unlock(&ctx->mutex);
} }
static int
vkr_context_transfer_3d_locked(struct virgl_context *base,
struct virgl_resource *res,
const struct vrend_transfer_info *info,
int transfer_mode)
{
struct vkr_context *ctx = (struct vkr_context *)base;
struct vkr_resource_attachment *att;
const struct iovec *iov;
int iov_count;
if (info->level || info->stride || info->layer_stride)
return -EINVAL;
if (info->iovec) {
iov = info->iovec;
iov_count = info->iovec_cnt;
} else {
iov = res->iov;
iov_count = res->iov_count;
}
if (!iov || !iov_count)
return 0;
att = vkr_context_get_resource(ctx, res->res_id);
if (!att)
return -EINVAL;
assert(att->resource == res);
/* TODO transfer via dmabuf (and find a solution to coherency issues) */
if (LIST_IS_EMPTY(&att->memories)) {
vkr_log("unable to transfer without VkDeviceMemory (TODO)");
return -EINVAL;
}
struct vkr_device_memory *mem =
LIST_ENTRY(struct vkr_device_memory, att->memories.next, exported_head);
const VkMappedMemoryRange range = {
.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
.memory = mem->base.handle.device_memory,
.offset = info->box->x,
.size = info->box->width,
};
void *ptr;
VkDevice dev_handle = mem->device->base.handle.device;
VkResult result =
vkMapMemory(dev_handle, range.memory, range.offset, range.size, 0, &ptr);
if (result != VK_SUCCESS)
return -EINVAL;
if (transfer_mode == VIRGL_TRANSFER_TO_HOST) {
vrend_read_from_iovec(iov, iov_count, range.offset, ptr, range.size);
vkFlushMappedMemoryRanges(dev_handle, 1, &range);
} else {
vkInvalidateMappedMemoryRanges(dev_handle, 1, &range);
vrend_write_to_iovec(iov, iov_count, range.offset, ptr, range.size);
}
vkUnmapMemory(dev_handle, range.memory);
return 0;
}
static int static int
vkr_context_transfer_3d(struct virgl_context *base, vkr_context_transfer_3d(struct virgl_context *base,
struct virgl_resource *res, struct virgl_resource *res,
const struct vrend_transfer_info *info, UNUSED const struct vrend_transfer_info *info,
int transfer_mode) UNUSED int transfer_mode)
{ {
struct vkr_context *ctx = (struct vkr_context *)base; struct vkr_context *ctx = (struct vkr_context *)base;
int ret;
mtx_lock(&ctx->mutex); vkr_log("no transfer support for ctx %d and res %d", ctx->base.ctx_id, res->res_id);
ret = vkr_context_transfer_3d_locked(base, res, info, transfer_mode); return -1;
mtx_unlock(&ctx->mutex);
return ret;
} }
static void static void
@ -528,17 +453,6 @@ vkr_context_attach_resource_locked(struct virgl_context *base, struct virgl_reso
} }
att->resource = res; att->resource = res;
list_inithead(&att->memories);
/* associate a memory with the resource, if any */
struct vkr_device_memory *mem;
LIST_FOR_EACH_ENTRY (mem, &ctx->newly_exported_memories, exported_head) {
if (mem->exported_res_id == res->res_id) {
list_del(&mem->exported_head);
list_addtail(&mem->exported_head, &att->memories);
break;
}
}
if (mmap_ptr) { if (mmap_ptr) {
att->shm_iov.iov_base = mmap_ptr; att->shm_iov.iov_base = mmap_ptr;
@ -680,11 +594,6 @@ void
vkr_context_free_resource(struct hash_entry *entry) vkr_context_free_resource(struct hash_entry *entry)
{ {
struct vkr_resource_attachment *att = entry->data; struct vkr_resource_attachment *att = entry->data;
struct vkr_device_memory *mem, *tmp;
LIST_FOR_EACH_ENTRY_SAFE (mem, tmp, &att->memories, exported_head)
list_delinit(&mem->exported_head);
free(att); free(att);
} }
@ -730,8 +639,6 @@ vkr_context_create(size_t debug_len, const char *debug_name)
if (!ctx->object_table || !ctx->resource_table) if (!ctx->object_table || !ctx->resource_table)
goto fail; goto fail;
list_inithead(&ctx->newly_exported_memories);
vkr_cs_decoder_init(&ctx->decoder, ctx->object_table); vkr_cs_decoder_init(&ctx->decoder, ctx->object_table);
vkr_cs_encoder_init(&ctx->encoder, &ctx->decoder.fatal_error); vkr_cs_encoder_init(&ctx->encoder, &ctx->decoder.fatal_error);

@ -20,15 +20,9 @@ struct virgl_resource;
* When a virgl_resource is attached in vkr_context_attach_resource, a * When a virgl_resource is attached in vkr_context_attach_resource, a
* vkr_resource_attachment is created. A vkr_resource_attachment is valid * vkr_resource_attachment is created. A vkr_resource_attachment is valid
* until the resource it tracks is detached. * until the resource it tracks is detached.
*
* To support transfers to resources not backed by coherent dma-bufs, we
* associate a vkr_resource_attachment with a (list of) vkr_device_memory.
* This way, we can find a vkr_device_memory from a vkr_resource_attachment
* and do transfers using VkDeviceMemory.
*/ */
struct vkr_resource_attachment { struct vkr_resource_attachment {
struct virgl_resource *resource; struct virgl_resource *resource;
struct list_head memories;
/* if VIRGL_RESOURCE_FD_SHM, this is the mapping of the shm and iov below /* if VIRGL_RESOURCE_FD_SHM, this is the mapping of the shm and iov below
* points to this * points to this
@ -60,7 +54,6 @@ struct vkr_context {
struct list_head rings; struct list_head rings;
struct hash_table *object_table; struct hash_table *object_table;
struct hash_table *resource_table; struct hash_table *resource_table;
struct list_head newly_exported_memories;
struct vkr_cs_encoder encoder; struct vkr_cs_encoder encoder;
struct vkr_cs_decoder decoder; struct vkr_cs_decoder decoder;

@ -226,7 +226,6 @@ vkr_dispatch_vkAllocateMemory(struct vn_dispatch_context *dispatch,
mem->property_flags = property_flags; mem->property_flags = property_flags;
mem->valid_fd_types = valid_fd_types; mem->valid_fd_types = valid_fd_types;
mem->gbm_bo = gbm_bo; mem->gbm_bo = gbm_bo;
list_inithead(&mem->exported_head);
} }
static void static void
@ -333,8 +332,6 @@ vkr_context_init_device_memory_dispatch(struct vkr_context *ctx)
void void
vkr_device_memory_release(struct vkr_device_memory *mem) vkr_device_memory_release(struct vkr_device_memory *mem)
{ {
list_del(&mem->exported_head);
if (mem->gbm_bo) if (mem->gbm_bo)
gbm_bo_destroy(mem->gbm_bo); gbm_bo_destroy(mem->gbm_bo);
} }

@ -21,8 +21,6 @@ struct vkr_device_memory {
struct gbm_bo *gbm_bo; struct gbm_bo *gbm_bo;
bool exported; bool exported;
uint32_t exported_res_id;
struct list_head exported_head;
}; };
VKR_DEFINE_OBJECT_CAST(device_memory, VK_OBJECT_TYPE_DEVICE_MEMORY, VkDeviceMemory) VKR_DEFINE_OBJECT_CAST(device_memory, VK_OBJECT_TYPE_DEVICE_MEMORY, VkDeviceMemory)

Loading…
Cancel
Save