vrend: fix copy/inline transfer with gbm allocation

Like in vrend_renderer_transfer_internal, virgl_gbm_transfer is a
special path that is generally preferred.  However, a copy transfer
can be a synchronized transfer.  We don't want to use GBM in that
case.

v2: add a comment on glFinish
v3: use GBM only when VIRGL_TEXTURE_NEED_SWIZZLE is set

Signed-off-by: Chia-I Wu <olvaffe@gmail.com>
Reviewed-by: David Riley <davidriley@chromium.org>
Tested-by: David Riley <davidriley@chromium.org>
Acked-by: Gurchetan Singh <gurchetansingh@chromium.org>
macos/master
Chia-I Wu 5 years ago
parent 979653e73c
commit 0fd03056e1
  1. 4
      src/virgl_gbm.c
  2. 2
      src/virgl_gbm.h
  3. 44
      src/vrend_renderer.c

@ -189,7 +189,7 @@ static void virgl_gbm_transfer_internal(uint32_t planar_bytes_per_pixel,
uint32_t guest_plane_stride,
uint32_t guest_resource_offset,
uint32_t host_plane_stride, uint8_t *host_address,
struct iovec *iovecs, uint32_t num_iovecs,
const struct iovec *iovecs, uint32_t num_iovecs,
uint32_t direction)
{
bool next_iovec, next_line;
@ -307,7 +307,7 @@ int virgl_gbm_convert_format(uint32_t *virgl_format, uint32_t *gbm_format)
}
#ifdef ENABLE_MINIGBM_ALLOCATION
int virgl_gbm_transfer(struct gbm_bo *bo, uint32_t direction, struct iovec *iovecs,
int virgl_gbm_transfer(struct gbm_bo *bo, uint32_t direction, const struct iovec *iovecs,
uint32_t num_iovecs, const struct vrend_transfer_info *info)
{
void *map_data;

@ -51,7 +51,7 @@ void virgl_gbm_fini(struct virgl_gbm *gbm);
int virgl_gbm_convert_format(uint32_t *virgl_format, uint32_t *gbm_format);
int virgl_gbm_transfer(struct gbm_bo *bo, uint32_t direction, struct iovec *iovecs,
int virgl_gbm_transfer(struct gbm_bo *bo, uint32_t direction, const struct iovec *iovecs,
uint32_t num_iovecs, const struct vrend_transfer_info *info);
uint32_t virgl_gbm_convert_flags(uint32_t virgl_bind_flags);

@ -7656,8 +7656,10 @@ static int vrend_renderer_transfer_internal(struct vrend_context *ctx,
#ifdef ENABLE_MINIGBM_ALLOCATION
if (res->gbm_bo && (transfer_mode == VIRGL_TRANSFER_TO_HOST ||
!has_bit(res->storage_bits, VREND_STORAGE_EGL_IMAGE)))
!has_bit(res->storage_bits, VREND_STORAGE_EGL_IMAGE))) {
assert(!info->synchronized);
return virgl_gbm_transfer(res->gbm_bo, transfer_mode, iov, num_iovs, info);
}
#endif
if (!check_transfer_bounds(res, info)) {
@ -7736,6 +7738,17 @@ int vrend_transfer_inline_write(struct vrend_context *ctx,
return EINVAL;
}
#ifdef ENABLE_MINIGBM_ALLOCATION
if (res->gbm_bo) {
assert(!info->synchronized);
return virgl_gbm_transfer(res->gbm_bo,
VIRGL_TRANSFER_TO_HOST,
info->iovec,
info->iovec_cnt,
info);
}
#endif
return vrend_renderer_transfer_write_iov(ctx, res, info->iovec, info->iovec_cnt, info);
}
@ -7775,6 +7788,35 @@ int vrend_renderer_copy_transfer3d(struct vrend_context *ctx,
return EINVAL;
}
#ifdef ENABLE_MINIGBM_ALLOCATION
if (dst_res->gbm_bo) {
bool use_gbm = true;
/* The guest uses copy transfers against busy resources to avoid
* waiting. The host driver is usually smart enough to avoid blocking
* by putting the data in a staging buffer and doing a pipelined copy.
*
* However, we cannot do that with GBM. Use GBM only when we have to
* (until vrend_renderer_transfer_write_iov swizzles).
*/
if (info->synchronized) {
if (tex_conv_table[dst_res->base.format].internalformat == 0 ||
tex_conv_table[dst_res->base.format].flags & VIRGL_TEXTURE_NEED_SWIZZLE)
glFinish();
else
use_gbm = false;
}
if (use_gbm) {
return virgl_gbm_transfer(dst_res->gbm_bo,
VIRGL_TRANSFER_TO_HOST,
src_res->iov,
src_res->num_iovs,
info);
}
}
#endif
return vrend_renderer_transfer_write_iov(ctx, dst_res, src_res->iov,
src_res->num_iovs, info);
}

Loading…
Cancel
Save