vkr: add iov support to vkr_ring_buffer

vkr_ring_read_buffer now supports reading from a ring buffer that is on
top of iov.  It is overly complex though.

For further optimization and simplication, we should consider requiring
a logically contiguous virgl_resource.  Possible options are requiring a
physically contiguous guest memory (this can have other use cases) or
requiring a host VkDeviceMemory (already doable, but meh).

We also use the chance to replace size_t by uint32_t in
vkr_ring_read_buffer.  No functional difference.

Signed-off-by: Chia-I Wu <olvaffe@gmail.com>
Reviewed-by: Ryan Neph <ryanneph@google.com>
Reviewed-by: Yiwei Zhang <zzyiwei@chromium.org>
macos/master
Chia-I Wu 3 years ago
parent 94a9cbe4ef
commit 6691ebec9f
  1. 69
      src/venus/vkr_ring.c
  2. 9
      src/venus/vkr_ring.h

@ -61,14 +61,18 @@ vkr_ring_init_buffer(struct vkr_ring *ring, const struct vkr_ring_layout *layout
{ {
struct vkr_ring_buffer *buf = &ring->buffer; struct vkr_ring_buffer *buf = &ring->buffer;
const struct iovec *base_iov =
seek_resource(layout->resource, 0, layout->buffer.begin, &buf->base_iov_index,
&buf->base_iov_offset);
buf->size = vkr_region_size(&layout->buffer); buf->size = vkr_region_size(&layout->buffer);
assert(buf->size && util_is_power_of_two(buf->size)); assert(buf->size && util_is_power_of_two(buf->size));
buf->mask = buf->size - 1; buf->mask = buf->size - 1;
buf->cur = 0; buf->cur = 0;
buf->cur_iov = base_iov;
/* TODO iov support */ buf->cur_iov_index = buf->base_iov_index;
buf->data = get_resource_pointer(layout->resource, 0, layout->buffer.begin); buf->cur_iov_offset = buf->base_iov_offset;
} }
static bool static bool
@ -111,22 +115,67 @@ vkr_ring_store_status(struct vkr_ring *ring, uint32_t status)
atomic_store_explicit(ring->control.status, status, memory_order_seq_cst); atomic_store_explicit(ring->control.status, status, memory_order_seq_cst);
} }
/* TODO consider requiring virgl_resource to be logically contiguous */
static void static void
vkr_ring_read_buffer(struct vkr_ring *ring, void *data, size_t size) vkr_ring_read_buffer(struct vkr_ring *ring, void *data, uint32_t size)
{ {
struct vkr_ring_buffer *buf = &ring->buffer; struct vkr_ring_buffer *buf = &ring->buffer;
const struct virgl_resource *res = ring->resource;
const size_t offset = buf->cur & buf->mask;
assert(size <= buf->size); assert(size <= buf->size);
if (offset + size <= buf->size) { const uint32_t buf_offset = buf->cur & buf->mask;
memcpy(data, buf->data + offset, size); const uint32_t buf_avail = buf->size - buf_offset;
const bool wrap = size >= buf_avail;
uint32_t read_size;
uint32_t wrap_size;
if (!wrap) {
read_size = size;
wrap_size = 0;
} else { } else {
const size_t s = buf->size - offset; read_size = buf_avail;
memcpy(data, buf->data + offset, s); /* When size == buf_avail, wrap is true but wrap_size is 0. We want to
memcpy((uint8_t *)data + s, buf->data, size - s); * wrap because it seems slightly faster on the next call. Besides,
* seek_resource does not support seeking to end-of-resource which could
* happen if we don't wrap and the buffer region end coincides with the
* resource end.
*/
wrap_size = size - buf_avail;
} }
/* do the reads */
if (read_size <= buf->cur_iov->iov_len - buf->cur_iov_offset) {
const void *src = (const uint8_t *)buf->cur_iov->iov_base + buf->cur_iov_offset;
memcpy(data, src, read_size);
/* fast path */
if (!wrap) {
assert(!wrap_size);
buf->cur += read_size;
buf->cur_iov_offset += read_size;
return;
}
} else {
vrend_read_from_iovec(buf->cur_iov, res->iov_count - buf->cur_iov_index,
buf->cur_iov_offset, data, read_size);
}
if (wrap_size) {
vrend_read_from_iovec(res->iov + buf->base_iov_index,
res->iov_count - buf->base_iov_index, buf->base_iov_offset,
(char *)data + read_size, wrap_size);
}
/* advance cur */
buf->cur += size; buf->cur += size;
if (!wrap) {
buf->cur_iov = seek_resource(res, buf->cur_iov_index, buf->cur_iov_offset + size,
&buf->cur_iov_index, &buf->cur_iov_offset);
} else {
buf->cur_iov =
seek_resource(res, buf->base_iov_index, buf->base_iov_offset + wrap_size,
&buf->cur_iov_index, &buf->cur_iov_offset);
}
} }
struct vkr_ring * struct vkr_ring *

@ -42,6 +42,10 @@ struct vkr_ring_control {
/* the buffer region of a ring */ /* the buffer region of a ring */
struct vkr_ring_buffer { struct vkr_ring_buffer {
/* the base of the region in the resource */
int base_iov_index;
size_t base_iov_offset;
uint32_t size; uint32_t size;
uint32_t mask; uint32_t mask;
@ -50,7 +54,10 @@ struct vkr_ring_buffer {
*/ */
uint32_t cur; uint32_t cur;
const uint8_t *data; /* The current iov and iov offset in the resource. */
const struct iovec *cur_iov;
int cur_iov_index;
size_t cur_iov_offset;
}; };
/* the extra region of a ring */ /* the extra region of a ring */

Loading…
Cancel
Save