vrend: reject bad strides in check_iov_bounds

Transfer strides are either set internally or coming from
virgl_renderer_transfer_{write,read}_iov.  As far as I can tell,
Mesa always passes sane values.  We also expect sane values in
places like vrend_renderer_transfer_write_iov,
vrend_transfer_send_readpixels, and vrend_transfer_send_getteximage
already.  Let's reject bad strides.

This fixes, for example, transfers to 1D array (thus box->height is
1) with non-default stride.

Signed-off-by: Chia-I Wu <olvaffe@gmail.com>
Reviewed-by: Alexandros Frantzis <alexandros.frantzis@collabora.com>
macos/master
Chia-I Wu 5 years ago
parent fb749a1f6a
commit ab3640c085
  1. 11
      src/vrend_renderer.c
  2. 54
      tests/test_virgl_transfer.c

@ -6481,10 +6481,9 @@ static bool check_iov_bounds(struct vrend_resource *res,
/* If the transfer specifies a stride, verify that it's at least as large as
* the minimum required for the transfer. If no stride is specified use the
* image stride for the specified level. For backward compatibility, we only
* use any guest-specified transfer stride for boxes with height.
* image stride for the specified level.
*/
if (info->stride && info->box->height > 1) {
if (info->stride) {
GLuint min_stride = util_format_get_stride(res->base.format, info->box->width);
if (info->stride < min_stride)
return false;
@ -6496,11 +6495,9 @@ static bool check_iov_bounds(struct vrend_resource *res,
/* If the transfer specifies a layer_stride, verify that it's at least as
* large as the minimum required for the transfer. If no layer_stride is
* specified use the image layer_stride for the specified level. For
* backward compatibility, we only use any guest-specified transfer
* layer_stride for boxes with depth.
* specified use the image layer_stride for the specified level.
*/
if (info->layer_stride && info->box->depth > 1) {
if (info->layer_stride) {
GLuint min_layer_stride = util_format_get_2d_size(res->base.format,
valid_stride,
info->box->height);

@ -374,7 +374,7 @@ START_TEST(virgl_test_transfer_1d_bad_iov_offset)
}
END_TEST
START_TEST(virgl_test_transfer_1d_strides_are_ignored)
START_TEST(virgl_test_transfer_1d_bad_strides)
{
struct virgl_renderer_resource_create_args res;
unsigned char data[50*4];
@ -395,10 +395,10 @@ START_TEST(virgl_test_transfer_1d_strides_are_ignored)
ret = virgl_renderer_transfer_write_iov(res.handle, 1, 0, bad_stride, 0,
&box, 0, &iov, niovs);
ck_assert_int_eq(ret, 0);
ck_assert_int_eq(ret, EINVAL);
ret = virgl_renderer_transfer_write_iov(res.handle, 1, 0, 0, bad_stride,
&box, 0, &iov, niovs);
ck_assert_int_eq(ret, 0);
ck_assert_int_eq(ret, EINVAL);
virgl_renderer_ctx_detach_resource(1, res.handle);
@ -406,7 +406,7 @@ START_TEST(virgl_test_transfer_1d_strides_are_ignored)
}
END_TEST
START_TEST(virgl_test_transfer_2d_layer_stride_is_ignored)
START_TEST(virgl_test_transfer_2d_bad_strides)
{
struct virgl_renderer_resource_create_args res;
unsigned char data[50*4];
@ -423,9 +423,12 @@ START_TEST(virgl_test_transfer_2d_layer_stride_is_ignored)
virgl_renderer_ctx_attach_resource(1, res.handle);
ret = virgl_renderer_transfer_write_iov(res.handle, 1, 0, bad_stride, 0,
&box, 0, &iov, niovs);
ck_assert_int_eq(ret, EINVAL);
ret = virgl_renderer_transfer_write_iov(res.handle, 1, 0, 0, bad_stride,
&box, 0, &iov, niovs);
ck_assert_int_eq(ret, 0);
ck_assert_int_eq(ret, EINVAL);
virgl_renderer_ctx_detach_resource(1, res.handle);
@ -433,7 +436,7 @@ START_TEST(virgl_test_transfer_2d_layer_stride_is_ignored)
}
END_TEST
START_TEST(virgl_test_transfer_buffer_strides_are_ignored)
START_TEST(virgl_test_transfer_buffer_bad_strides)
{
struct virgl_renderer_resource_create_args res;
unsigned char data[50*4];
@ -452,10 +455,10 @@ START_TEST(virgl_test_transfer_buffer_strides_are_ignored)
ret = virgl_renderer_transfer_write_iov(res.handle, 1, 0, bad_stride, 0,
&box, 0, &iov, niovs);
ck_assert_int_eq(ret, 0);
ret = virgl_renderer_transfer_write_iov(res.handle, 1, 0, bad_stride, 0,
ck_assert_int_eq(ret, EINVAL);
ret = virgl_renderer_transfer_write_iov(res.handle, 1, 0, 0, bad_stride,
&box, 0, &iov, niovs);
ck_assert_int_eq(ret, 0);
ck_assert_int_eq(ret, EINVAL);
virgl_renderer_ctx_detach_resource(1, res.handle);
@ -519,32 +522,6 @@ START_TEST(virgl_test_transfer_2d_bad_level)
}
END_TEST
/* test stride less than box size */
START_TEST(virgl_test_transfer_2d_bad_stride)
{
struct virgl_renderer_resource_create_args res;
unsigned char data[50*4*2];
struct iovec iov = { .iov_base = data, .iov_len = sizeof(data) };
int niovs = 1;
int ret;
struct virgl_box box = { .w = 50, .h = 2, .d = 1 };
testvirgl_init_simple_2d_resource(&res, 1);
ret = virgl_renderer_resource_create(&res, NULL, 0);
ck_assert_int_eq(ret, 0);
virgl_renderer_ctx_attach_resource(1, res.handle);
ret = virgl_renderer_transfer_write_iov(res.handle, 1, 0, 10, 0, &box, 0, &iov, niovs);
ck_assert_int_eq(ret, EINVAL);
virgl_renderer_ctx_detach_resource(1, res.handle);
virgl_renderer_resource_unref(1);
}
END_TEST
/* for each texture type construct a valid and invalid transfer,
invalid using a box outside the bounds of the transfer */
#define LARGE_FLAG_WIDTH (1 << 0)
@ -985,12 +962,11 @@ static Suite *virgl_init_suite(void)
tcase_add_test(tc_core, virgl_test_transfer_1d);
tcase_add_test(tc_core, virgl_test_transfer_1d_bad_iov);
tcase_add_test(tc_core, virgl_test_transfer_1d_bad_iov_offset);
tcase_add_test(tc_core, virgl_test_transfer_1d_strides_are_ignored);
tcase_add_test(tc_core, virgl_test_transfer_2d_layer_stride_is_ignored);
tcase_add_test(tc_core, virgl_test_transfer_buffer_strides_are_ignored);
tcase_add_test(tc_core, virgl_test_transfer_1d_bad_strides);
tcase_add_test(tc_core, virgl_test_transfer_2d_bad_strides);
tcase_add_test(tc_core, virgl_test_transfer_buffer_bad_strides);
tcase_add_test(tc_core, virgl_test_transfer_2d_array_bad_layer_stride);
tcase_add_test(tc_core, virgl_test_transfer_2d_bad_level);
tcase_add_test(tc_core, virgl_test_transfer_2d_bad_stride);
tcase_add_loop_test(tc_core, virgl_test_transfer_res_read_valid, 0, PIPE_MAX_TEXTURE_TYPES);
tcase_add_loop_test(tc_core, virgl_test_transfer_res_write_valid, 0, PIPE_MAX_TEXTURE_TYPES);

Loading…
Cancel
Save