vrend: Improve iov bound checking for transfers

This commit improves bound checking by more accurately calculating
the expected transfer size. This improvement is achieved in two ways:

1. Using the image level (layer_)stride when the (layer_)stride is 0,
   which matches the interpretation we use for performing the actual
   read/write.

2. Calculating the transfer size by using the exact end offset in
   the image, instead of using multiples of whole image rows.

The increased accuracy increases safety, and also allows us to support
some transfers with explicit strides that were previously rejected (see
added test).

Signed-off-by: Alexandros Frantzis <alexandros.frantzis@collabora.com>
Reviewed-by: Chia-I Wu <olvaffe@gmail.com>
macos/master
Alexandros Frantzis 6 years ago committed by Gurchetan Singh
parent 2af3a83136
commit 0bb764b6cf
  1. 97
      src/vrend_renderer.c
  2. 34
      tests/test_virgl_transfer.c
  3. 13
      tests/testvirgl_encode.c
  4. 7
      tests/testvirgl_encode.h

@ -6416,42 +6416,95 @@ static bool check_transfer_bounds(struct vrend_resource *res,
return true; return true;
} }
/* Calculate the size of the memory needed to hold all the data of a
* transfer for particular stride values.
*/
static uint64_t vrend_transfer_size(struct vrend_resource *vres,
const struct vrend_transfer_info *info,
uint32_t stride, uint32_t layer_stride)
{
struct pipe_resource *pres = &vres->base;
struct pipe_box *box = info->box;
uint64_t size;
/* For purposes of size calculation, assume that invalid dimension values
* correspond to 1.
*/
int w = box->width > 0 ? box->width : 1;
int h = box->height > 0 ? box->height : 1;
int d = box->depth > 0 ? box->depth : 1;
int nblocksx = util_format_get_nblocksx(pres->format, w);
int nblocksy = util_format_get_nblocksy(pres->format, h);
/* Calculate the box size, not including the last layer. The last layer
* is the only one which may be incomplete, and is the only layer for
* non 3d/2d-array formats.
*/
size = (d - 1) * layer_stride;
/* Calculate the size of the last (or only) layer, not including the last
* block row. The last block row is the only one which may be incomplete and
* is the only block row for non 2d/1d-array formats.
*/
size += (nblocksy - 1) * stride;
/* Calculate the size of the the last (or only) block row. */
size += nblocksx * util_format_get_blocksize(pres->format);
return size;
}
static bool check_iov_bounds(struct vrend_resource *res, static bool check_iov_bounds(struct vrend_resource *res,
const struct vrend_transfer_info *info, const struct vrend_transfer_info *info,
struct iovec *iov, int num_iovs) struct iovec *iov, int num_iovs)
{ {
GLuint send_size; GLuint transfer_size;
GLuint iovsize = vrend_get_iovec_size(iov, num_iovs); GLuint iovsize = vrend_get_iovec_size(iov, num_iovs);
GLuint valid_stride, valid_layer_stride; GLuint valid_stride, valid_layer_stride;
/* validate the send size */ /* If the transfer specifies a stride, verify that it's at least as large as
valid_stride = util_format_get_stride(res->base.format, info->box->width); * the minimum required for the transfer. If no stride is specified use the
if (info->stride) { * image stride for the specified level. For backward compatibility, we only
/* only validate passed in stride for boxes with height */ * use any guest-specified transfer stride for boxes with height.
if (info->box->height > 1) { */
if (info->stride < valid_stride) if (info->stride && info->box->height > 1) {
return false; GLuint min_stride = util_format_get_stride(res->base.format, info->box->width);
valid_stride = info->stride; if (info->stride < min_stride)
} return false;
valid_stride = info->stride;
} else {
valid_stride = util_format_get_stride(res->base.format,
u_minify(res->base.width0, info->level));
} }
valid_layer_stride = util_format_get_2d_size(res->base.format, valid_stride, /* If the transfer specifies a layer_stride, verify that it's at least as
info->box->height); * large as the minimum required for the transfer. If no layer_stride is
if (info->layer_stride) { * specified use the image layer_stride for the specified level. For
/* only validate passed in layer_stride for boxes with depth */ * backward compatibility, we only use any guest-specified transfer
if (info->box->depth > 1) { * layer_stride for boxes with depth.
if (info->layer_stride < valid_layer_stride) */
return false; if (info->layer_stride && info->box->depth > 1) {
valid_layer_stride = info->layer_stride; GLuint min_layer_stride = util_format_get_2d_size(res->base.format,
} valid_stride,
info->box->height);
if (info->layer_stride < min_layer_stride)
return false;
valid_layer_stride = info->layer_stride;
} else {
valid_layer_stride =
util_format_get_2d_size(res->base.format, valid_stride,
u_minify(res->base.height0, info->level));
} }
send_size = valid_layer_stride * info->box->depth; /* Calculate the size required for the transferred data, based on the
* calculated or provided strides, and ensure that the iov, starting at the
* specified offset, is able to hold at least that size.
*/
transfer_size = vrend_transfer_size(res, info,
valid_stride,
valid_layer_stride);
if (iovsize < info->offset) if (iovsize < info->offset)
return false; return false;
if (iovsize < send_size) if (iovsize < transfer_size)
return false; return false;
if (iovsize < info->offset + send_size) if (iovsize < info->offset + transfer_size)
return false; return false;
return true; return true;

@ -932,6 +932,35 @@ START_TEST(virgl_test_copy_transfer_to_staging_with_iov_succeeds)
} }
END_TEST END_TEST
START_TEST(virgl_test_transfer_near_res_bounds_with_stride_succeeds)
{
struct virgl_context ctx = {0};
struct virgl_resource res = {0};
int res_width = 4;
int res_height = 3;
int res_stride = res_width * 4;
struct pipe_box box = {.x = 2, .y = 1, .z = 0, .width = 2, .height = 2, .depth = 1};
int ret;
ret = testvirgl_init_ctx_cmdbuf(&ctx);
ck_assert_int_eq(ret, 0);
ret = testvirgl_create_backed_simple_2d_res(&res, 1, res_width, res_height);
ck_assert_int_eq(ret, 0);
virgl_renderer_ctx_attach_resource(ctx.ctx_id, res.handle);
virgl_encoder_transfer_with_stride(&ctx, &res, 0, 0, &box, 6 * 4, VIRGL_TRANSFER_TO_HOST,
res_stride, 0);
ret = virgl_renderer_submit_cmd(ctx.cbuf->buf, ctx.ctx_id, ctx.cbuf->cdw);
ck_assert_int_eq(ret, 0);
virgl_renderer_ctx_detach_resource(ctx.ctx_id, res.handle);
testvirgl_destroy_backed_res(&res);
testvirgl_fini_ctx_cmdbuf(&ctx);
}
END_TEST
static Suite *virgl_init_suite(void) static Suite *virgl_init_suite(void)
{ {
Suite *s; Suite *s;
@ -986,6 +1015,11 @@ static Suite *virgl_init_suite(void)
suite_add_tcase(s, tc_core); suite_add_tcase(s, tc_core);
tc_core = tcase_create("transfer_command_bounds");
tcase_add_test(tc_core, virgl_test_transfer_near_res_bounds_with_stride_succeeds);
suite_add_tcase(s, tc_core);
return s; return s;
} }

@ -568,11 +568,22 @@ int virgl_encoder_transfer(struct virgl_context *ctx,
unsigned level, unsigned usage, unsigned level, unsigned usage,
const struct pipe_box *box, const struct pipe_box *box,
unsigned offset, unsigned direction) unsigned offset, unsigned direction)
{
return virgl_encoder_transfer_with_stride(ctx, res, level, usage, box,
offset, direction, 0, 0);
}
int virgl_encoder_transfer_with_stride(struct virgl_context *ctx,
struct virgl_resource *res,
unsigned level, unsigned usage,
const struct pipe_box *box,
unsigned offset, unsigned direction,
unsigned stride, unsigned layer_stride)
{ {
uint32_t command; uint32_t command;
command = VIRGL_CMD0(VIRGL_CCMD_TRANSFER3D, 0, VIRGL_TRANSFER3D_SIZE); command = VIRGL_CMD0(VIRGL_CCMD_TRANSFER3D, 0, VIRGL_TRANSFER3D_SIZE);
virgl_encoder_write_dword(ctx->cbuf, command); virgl_encoder_write_dword(ctx->cbuf, command);
virgl_encoder_transfer3d_common(ctx, res, level, usage, box, 0, 0); virgl_encoder_transfer3d_common(ctx, res, level, usage, box, stride, layer_stride);
virgl_encoder_write_dword(ctx->cbuf, offset); virgl_encoder_write_dword(ctx->cbuf, offset);
virgl_encoder_write_dword(ctx->cbuf, direction); virgl_encoder_write_dword(ctx->cbuf, direction);
return 0; return 0;

@ -149,6 +149,13 @@ int virgl_encoder_transfer(struct virgl_context *ctx,
const struct pipe_box *box, const struct pipe_box *box,
unsigned offset, unsigned direction); unsigned offset, unsigned direction);
int virgl_encoder_transfer_with_stride(struct virgl_context *ctx,
struct virgl_resource *res,
unsigned level, unsigned usage,
const struct pipe_box *box,
unsigned offset, unsigned direction,
unsigned stride, unsigned layer_stride);
int virgl_encoder_copy_transfer(struct virgl_context *ctx, int virgl_encoder_copy_transfer(struct virgl_context *ctx,
struct virgl_resource *res, struct virgl_resource *res,
unsigned level, unsigned usage, unsigned level, unsigned usage,

Loading…
Cancel
Save