compute: handle launch grid.

This adds protocol for dispatch compute shaders using
the launch grid command from gallium.

Reviewed-by: Gurchetan Singh <gurchetansingh@chromium.org>
macos/master
Dave Airlie 6 years ago
parent 40a4b1141e
commit 38207b5bbb
  1. 11
      src/virgl_protocol.h
  2. 22
      src/vrend_decode.c
  3. 71
      src/vrend_renderer.c
  4. 5
      src/vrend_renderer.h

@ -88,6 +88,7 @@ enum virgl_context_cmd {
VIRGL_CCMD_SET_SHADER_BUFFERS,
VIRGL_CCMD_SET_SHADER_IMAGES,
VIRGL_CCMD_MEMORY_BARRIER,
VIRGL_CCMD_LAUNCH_GRID,
};
/*
@ -517,4 +518,14 @@ enum virgl_context_cmd {
#define VIRGL_MEMORY_BARRIER_SIZE 1
#define VIRGL_MEMORY_BARRIER_FLAGS 1
/* launch grid */
#define VIRGL_LAUNCH_GRID_SIZE 8
#define VIRGL_LAUNCH_BLOCK_X 1
#define VIRGL_LAUNCH_BLOCK_Y 2
#define VIRGL_LAUNCH_BLOCK_Z 3
#define VIRGL_LAUNCH_GRID_X 4
#define VIRGL_LAUNCH_GRID_Y 5
#define VIRGL_LAUNCH_GRID_Z 6
#define VIRGL_LAUNCH_INDIRECT_HANDLE 7
#define VIRGL_LAUNCH_INDIRECT_OFFSET 8
#endif

@ -1153,6 +1153,25 @@ static int vrend_decode_memory_barrier(struct vrend_decode_ctx *ctx, uint16_t le
return 0;
}
static int vrend_decode_launch_grid(struct vrend_decode_ctx *ctx, uint16_t length)
{
uint32_t block[3], grid[3];
uint32_t indirect_handle, indirect_offset;
if (length != VIRGL_LAUNCH_GRID_SIZE)
return EINVAL;
block[0] = get_buf_entry(ctx, VIRGL_LAUNCH_BLOCK_X);
block[1] = get_buf_entry(ctx, VIRGL_LAUNCH_BLOCK_Y);
block[2] = get_buf_entry(ctx, VIRGL_LAUNCH_BLOCK_Z);
grid[0] = get_buf_entry(ctx, VIRGL_LAUNCH_GRID_X);
grid[1] = get_buf_entry(ctx, VIRGL_LAUNCH_GRID_Y);
grid[2] = get_buf_entry(ctx, VIRGL_LAUNCH_GRID_Z);
indirect_handle = get_buf_entry(ctx, VIRGL_LAUNCH_INDIRECT_HANDLE);
indirect_offset = get_buf_entry(ctx, VIRGL_LAUNCH_INDIRECT_OFFSET);
vrend_launch_grid(ctx->grctx, block, grid, indirect_handle, indirect_offset);
return 0;
}
static int vrend_decode_set_streamout_targets(struct vrend_decode_ctx *ctx,
uint16_t length)
{
@ -1390,6 +1409,9 @@ int vrend_decode_block(uint32_t ctx_id, uint32_t *block, int ndw)
case VIRGL_CCMD_MEMORY_BARRIER:
ret = vrend_decode_memory_barrier(gdctx, len);
break;
case VIRGL_CCMD_LAUNCH_GRID:
ret = vrend_decode_launch_grid(gdctx, len);
break;
default:
ret = EINVAL;
}

@ -3928,6 +3928,77 @@ int vrend_draw_vbo(struct vrend_context *ctx,
return 0;
}
void vrend_launch_grid(struct vrend_context *ctx,
uint32_t *block,
uint32_t *grid,
uint32_t indirect_handle,
uint32_t indirect_offset)
{
bool new_program = false;
struct vrend_resource *indirect_res = NULL;
if (ctx->sub->cs_shader_dirty) {
struct vrend_linked_shader_program *prog;
bool same_prog, cs_dirty;
if (!ctx->sub->shaders[PIPE_SHADER_COMPUTE]) {
fprintf(stderr,"dropping rendering due to missing shaders: %s\n", ctx->debug_name);
return;
}
vrend_shader_select(ctx, ctx->sub->shaders[PIPE_SHADER_COMPUTE], &cs_dirty);
if (!ctx->sub->shaders[PIPE_SHADER_COMPUTE]->current) {
fprintf(stderr, "failure to compile shader variants: %s\n", ctx->debug_name);
return;
}
same_prog = true;
if (ctx->sub->shaders[PIPE_SHADER_COMPUTE]->current->id != (GLuint)ctx->sub->prog_ids[PIPE_SHADER_COMPUTE])
same_prog = false;
if (!same_prog) {
prog = lookup_cs_shader_program(ctx, ctx->sub->shaders[PIPE_SHADER_COMPUTE]->current->id);
if (!prog) {
prog = add_cs_shader_program(ctx, ctx->sub->shaders[PIPE_SHADER_COMPUTE]->current);
if (!prog)
return;
}
} else
prog = ctx->sub->prog;
if (ctx->sub->prog != prog) {
new_program = true;
ctx->sub->prog_ids[PIPE_SHADER_VERTEX] = -1;
ctx->sub->prog_ids[PIPE_SHADER_COMPUTE] = ctx->sub->shaders[PIPE_SHADER_COMPUTE]->current->id;
ctx->sub->prog = prog;
}
ctx->sub->shader_dirty = true;
}
vrend_use_program(ctx, ctx->sub->prog->id);
int sampler_id = 0, ubo_id = 0;
vrend_draw_bind_ubo_shader(ctx, PIPE_SHADER_COMPUTE, &ubo_id);
vrend_draw_bind_const_shader(ctx, PIPE_SHADER_COMPUTE, new_program);
vrend_draw_bind_samplers_shader(ctx, PIPE_SHADER_COMPUTE, &sampler_id);
vrend_draw_bind_images_shader(ctx, PIPE_SHADER_COMPUTE);
vrend_draw_bind_ssbo_shader(ctx, PIPE_SHADER_COMPUTE);
if (indirect_handle) {
indirect_res = vrend_renderer_ctx_res_lookup(ctx, indirect_handle);
if (!indirect_res) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, indirect_handle);
return;
}
}
if (indirect_res)
glBindBuffer(GL_DISPATCH_INDIRECT_BUFFER, indirect_res->id);
else
glBindBuffer(GL_DISPATCH_INDIRECT_BUFFER, 0);
if (indirect_res) {
glDispatchComputeIndirect(indirect_offset);
} else {
glDispatchCompute(grid[0], grid[1], grid[2]);
}
}
static GLenum translate_blend_func(uint32_t pipe_blend)
{
switch(pipe_blend){

@ -243,6 +243,11 @@ void vrend_set_single_ssbo(struct vrend_context *ctx,
uint32_t handle);
void vrend_memory_barrier(struct vrend_context *ctx,
unsigned flags);
void vrend_launch_grid(struct vrend_context *ctx,
uint32_t *block,
uint32_t *grid,
uint32_t indirect_handle,
uint32_t indirect_offset);
#define VREND_TRANSFER_WRITE 1
#define VREND_TRANSFER_READ 2
int vrend_renderer_transfer_iov(const struct vrend_transfer_info *info, int transfer_mode);

Loading…
Cancel
Save