renderer: Add support for TGSI_FILE_HW_ATOMIC

Add support for TGSI's HW atomic counters, implemented here with
atomic_uint.

v2: - Fix calculation of atomic count in cmd
v3: - Add feature-checks (Dave Airlie)
v4: - Pass max-values for all stages and combined (Erik)

Reviewed-by: Gurchetan Singh <gurchetansingh@chromium.org>
Signed-off-by: Dave Airlie <airlied@redhat.com>
macos/master
Tomeu Vizoso 7 years ago committed by Dave Airlie
parent 13ca94b7f4
commit cd8c1b66d2
  1. 4
      src/virgl_hw.h
  2. 9
      src/virgl_protocol.h
  3. 29
      src/vrend_decode.c
  4. 105
      src/vrend_renderer.c
  5. 4
      src/vrend_renderer.h

@ -352,6 +352,10 @@ struct virgl_caps_v2 {
uint32_t max_texture_3d_size;
uint32_t max_texture_cube_size;
uint32_t max_combined_shader_buffers;
uint32_t max_atomic_counters[6];
uint32_t max_atomic_counter_buffers[6];
uint32_t max_combined_atomic_counters;
uint32_t max_combined_atomic_counter_buffers;
};
union virgl_caps {

@ -91,6 +91,7 @@ enum virgl_context_cmd {
VIRGL_CCMD_LAUNCH_GRID,
VIRGL_CCMD_SET_FRAMEBUFFER_STATE_NO_ATTACH,
VIRGL_CCMD_TEXTURE_BARRIER,
VIRGL_CCMD_SET_ATOMIC_BUFFERS,
};
/*
@ -544,4 +545,12 @@ enum virgl_context_cmd {
#define VIRGL_TEXTURE_BARRIER_SIZE 1
#define VIRGL_TEXTURE_BARRIER_FLAGS 1
/* hw atomics */
#define VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE 3
#define VIRGL_SET_ATOMIC_BUFFER_SIZE(x) (VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE * (x)) + 1
#define VIRGL_SET_ATOMIC_BUFFER_START_SLOT 1
#define VIRGL_SET_ATOMIC_BUFFER_OFFSET(x) ((x) * VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE + 2)
#define VIRGL_SET_ATOMIC_BUFFER_LENGTH(x) ((x) * VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE + 3)
#define VIRGL_SET_ATOMIC_BUFFER_RES_HANDLE(x) ((x) * VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE + 4)
#endif

@ -1133,6 +1133,32 @@ static int vrend_decode_set_shader_buffers(struct vrend_decode_ctx *ctx, uint16_
return 0;
}
static int vrend_decode_set_atomic_buffers(struct vrend_decode_ctx *ctx, uint16_t length)
{
int num_abo;
uint32_t start_slot;
if (length < 2)
return EINVAL;
num_abo = (length - 1) / VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE;
start_slot = get_buf_entry(ctx, VIRGL_SET_ATOMIC_BUFFER_START_SLOT);
if (num_abo < 1)
return 0;
if (start_slot + num_abo > PIPE_MAX_HW_ATOMIC_BUFFERS)
return EINVAL;
for (int i = 0; i < num_abo; i++) {
uint32_t offset = get_buf_entry(ctx, i * VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE + 2);
uint32_t buf_len = get_buf_entry(ctx, i * VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE + 3);
uint32_t handle = get_buf_entry(ctx, i * VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE + 4);
vrend_set_single_abo(ctx->grctx, start_slot + i, offset, buf_len, handle);
}
return 0;
}
static int vrend_decode_set_shader_images(struct vrend_decode_ctx *ctx, uint16_t length)
{
int num_images;
@ -1437,6 +1463,9 @@ int vrend_decode_block(uint32_t ctx_id, uint32_t *block, int ndw)
case VIRGL_CCMD_SET_SHADER_IMAGES:
ret = vrend_decode_set_shader_images(gdctx, len);
break;
case VIRGL_CCMD_SET_ATOMIC_BUFFERS:
ret = vrend_decode_set_atomic_buffers(gdctx, len);
break;
case VIRGL_CCMD_MEMORY_BARRIER:
ret = vrend_decode_memory_barrier(gdctx, len);
break;

@ -91,6 +91,7 @@ enum features_id
{
feat_arb_or_gles_ext_texture_buffer,
feat_arb_robustness,
feat_atomic_counters,
feat_base_instance,
feat_barrier,
feat_bit_encoding,
@ -157,6 +158,7 @@ static const struct {
} feature_list[] = {
[feat_arb_or_gles_ext_texture_buffer] = { 31, UNAVAIL, { "GL_ARB_texture_buffer_object", "GL_EXT_texture_buffer", NULL } },
[feat_arb_robustness] = { UNAVAIL, UNAVAIL, { "GL_ARB_robustness" } },
[feat_atomic_counters] = { 42, 31, { "GL_ARB_shader_atomic_counters" } },
[feat_base_instance] = { 42, UNAVAIL, { "GL_ARB_base_instance", "GL_EXT_base_instance" } },
[feat_barrier] = { 42, 31, {} },
[feat_bit_encoding] = { 33, UNAVAIL, { "GL_ARB_shader_bit_encoding" } },
@ -390,6 +392,12 @@ struct vrend_ssbo {
unsigned buffer_offset;
};
struct vrend_abo {
struct vrend_resource *res;
unsigned buffer_size;
unsigned buffer_offset;
};
struct vrend_vertex_element {
struct pipe_vertex_element base;
GLenum type;
@ -535,6 +543,9 @@ struct vrend_sub_context {
struct vrend_ssbo ssbo[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_BUFFERS];
uint32_t ssbo_used_mask[PIPE_SHADER_TYPES];
struct vrend_abo abo[PIPE_MAX_HW_ATOMIC_BUFFERS];
uint32_t abo_used_mask;
};
struct vrend_context {
@ -2622,6 +2633,35 @@ void vrend_set_single_ssbo(struct vrend_context *ctx,
}
}
void vrend_set_single_abo(struct vrend_context *ctx,
int index,
uint32_t offset, uint32_t length,
uint32_t handle)
{
struct vrend_abo *abo = &ctx->sub->abo[index];
struct vrend_resource *res;
if (!has_feature(feat_atomic_counters))
return;
if (handle) {
res = vrend_renderer_ctx_res_lookup(ctx, handle);
if (!res) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, handle);
return;
}
abo->res = res;
abo->buffer_offset = offset;
abo->buffer_size = length;
ctx->sub->abo_used_mask |= (1 << index);
} else {
abo->res = 0;
abo->buffer_offset = 0;
abo->buffer_size = 0;
ctx->sub->abo_used_mask &= ~(1 << index);
}
}
void vrend_memory_barrier(UNUSED struct vrend_context *ctx,
unsigned flags)
{
@ -3627,6 +3667,27 @@ static void vrend_draw_bind_ssbo_shader(struct vrend_context *ctx, int shader_ty
}
}
static void vrend_draw_bind_abo_shader(struct vrend_context *ctx)
{
uint32_t mask;
struct vrend_abo *abo;
struct vrend_resource *res;
int i;
if (!has_feature(feat_atomic_counters))
return;
mask = ctx->sub->abo_used_mask;
while (mask) {
i = u_bit_scan(&mask);
abo = &ctx->sub->abo[i];
res = (struct vrend_resource *)abo->res;
glBindBufferRange(GL_ATOMIC_COUNTER_BUFFER, i, res->id,
abo->buffer_offset, abo->buffer_size);
}
}
static void vrend_draw_bind_images_shader(struct vrend_context *ctx, int shader_type)
{
GLenum access;
@ -3704,6 +3765,8 @@ static void vrend_draw_bind_objects(struct vrend_context *ctx, bool new_program)
vrend_draw_bind_ssbo_shader(ctx, shader_type);
}
vrend_draw_bind_abo_shader(ctx);
if (vrend_state.use_core_profile && ctx->sub->prog->fs_stipple_loc != -1) {
glActiveTexture(GL_TEXTURE0 + sampler_id);
glBindTexture(GL_TEXTURE_2D, ctx->pstipple_tex_id);
@ -4037,6 +4100,7 @@ void vrend_launch_grid(struct vrend_context *ctx,
vrend_draw_bind_samplers_shader(ctx, PIPE_SHADER_COMPUTE, &sampler_id);
vrend_draw_bind_images_shader(ctx, PIPE_SHADER_COMPUTE);
vrend_draw_bind_ssbo_shader(ctx, PIPE_SHADER_COMPUTE);
vrend_draw_bind_abo_shader(ctx);
if (indirect_handle) {
indirect_res = vrend_renderer_ctx_res_lookup(ctx, indirect_handle);
@ -8179,6 +8243,47 @@ static void vrend_renderer_fill_caps_v2(int gl_ver, int gles_ver, union virgl_c
caps->v2.capability_bits |= VIRGL_CAP_COMPUTE_SHADER;
}
if (has_feature(feat_atomic_counters)) {
glGetIntegerv(GL_MAX_VERTEX_ATOMIC_COUNTERS,
(GLint*)(caps->v2.max_atomic_counters + PIPE_SHADER_VERTEX));
glGetIntegerv(GL_MAX_VERTEX_ATOMIC_COUNTER_BUFFERS,
(GLint*)(caps->v2.max_atomic_counter_buffers + PIPE_SHADER_VERTEX));
glGetIntegerv(GL_MAX_FRAGMENT_ATOMIC_COUNTERS,
(GLint*)(caps->v2.max_atomic_counters + PIPE_SHADER_FRAGMENT));
glGetIntegerv(GL_MAX_FRAGMENT_ATOMIC_COUNTER_BUFFERS,
(GLint*)(caps->v2.max_atomic_counter_buffers + PIPE_SHADER_FRAGMENT));
if (has_feature(feat_geometry_shader)) {
glGetIntegerv(GL_MAX_GEOMETRY_ATOMIC_COUNTERS,
(GLint*)(caps->v2.max_atomic_counters + PIPE_SHADER_GEOMETRY));
glGetIntegerv(GL_MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS,
(GLint*)(caps->v2.max_atomic_counter_buffers + PIPE_SHADER_GEOMETRY));
}
if (has_feature(feat_tessellation)) {
glGetIntegerv(GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS,
(GLint*)(caps->v2.max_atomic_counters + PIPE_SHADER_TESS_CTRL));
glGetIntegerv(GL_MAX_TESS_CONTROL_ATOMIC_COUNTER_BUFFERS,
(GLint*)(caps->v2.max_atomic_counter_buffers + PIPE_SHADER_TESS_CTRL));
glGetIntegerv(GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS,
(GLint*)(caps->v2.max_atomic_counters + PIPE_SHADER_TESS_EVAL));
glGetIntegerv(GL_MAX_TESS_EVALUATION_ATOMIC_COUNTER_BUFFERS,
(GLint*)(caps->v2.max_atomic_counter_buffers + PIPE_SHADER_TESS_EVAL));
}
if (has_feature(feat_compute_shader)) {
glGetIntegerv(GL_MAX_COMPUTE_ATOMIC_COUNTERS,
(GLint*)(caps->v2.max_atomic_counters + PIPE_SHADER_COMPUTE));
glGetIntegerv(GL_MAX_COMPUTE_ATOMIC_COUNTER_BUFFERS,
(GLint*)(caps->v2.max_atomic_counter_buffers + PIPE_SHADER_COMPUTE));
}
glGetIntegerv(GL_MAX_COMBINED_ATOMIC_COUNTERS,
(GLint*)&caps->v2.max_combined_atomic_counters);
glGetIntegerv(GL_MAX_COMBINED_ATOMIC_COUNTER_BUFFERS,
(GLint*)&caps->v2.max_combined_atomic_counter_buffers);
}
if (has_feature(feat_fb_no_attach))
caps->v2.capability_bits |= VIRGL_CAP_FB_NO_ATTACH;

@ -243,6 +243,10 @@ void vrend_set_single_ssbo(struct vrend_context *ctx,
int index,
uint32_t offset, uint32_t length,
uint32_t handle);
void vrend_set_single_abo(struct vrend_context *ctx,
int index,
uint32_t offset, uint32_t length,
uint32_t handle);
void vrend_memory_barrier(struct vrend_context *ctx,
unsigned flags);
void vrend_launch_grid(struct vrend_context *ctx,

Loading…
Cancel
Save