shader: extract functions to create and set string buffers and sinfo

These functions will later also be used to create a TC passthrough shader
on GLES of a GL guest doesn't send one.

Signed-off-by: Gert Wollny <gert.wollny@collabora.com>
Reviewed-by: Gurchetan Singh <gurchetansingh@chromium.org>
macos/master
Gert Wollny 6 years ago
parent 3fbeff90b9
commit f9f7bd64f7
  1. 233
      src/vrend_shader.c

@ -4322,26 +4322,8 @@ void renumber_io_arrays(unsigned nio, struct vrend_shader_io *io)
}
}
static boolean
iter_instruction(struct tgsi_iterate_context *iter,
struct tgsi_full_instruction *inst)
static void handle_io_arrays(struct dump_ctx *ctx)
{
struct dump_ctx *ctx = (struct dump_ctx *)iter;
struct dest_info dinfo = { 0 };
struct source_info sinfo = { 0 };
char srcs[4][255], dsts[3][255];
char fp64_dsts[3][255];
uint instno = ctx->instno++;
char writemask[6] = {0};
char src_swizzle0[10];
sinfo.svec4 = VEC4;
if (ctx->prog_type == -1)
ctx->prog_type = iter->processor.Processor;
if (instno == 0) {
bool require_enhanced_layouts = false;
/* If the guest sent real IO arrays then we declare them individually,
@ -4392,10 +4374,31 @@ iter_instruction(struct tgsi_iterate_context *iter,
if (require_enhanced_layouts) {
ctx->shader_req_bits |= SHADER_REQ_ENHANCED_LAYOUTS;
/* We could skip this for VS inputs and FS outputs, but if enhanced layouts
* are supported then separate shader objects are probably available too */
ctx->shader_req_bits |= SHADER_REQ_SEPERATE_SHADER_OBJECTS;
}
}
static boolean
iter_instruction(struct tgsi_iterate_context *iter,
struct tgsi_full_instruction *inst)
{
struct dump_ctx *ctx = (struct dump_ctx *)iter;
struct dest_info dinfo = { 0 };
struct source_info sinfo = { 0 };
char srcs[4][255], dsts[3][255];
char fp64_dsts[3][255];
uint instno = ctx->instno++;
char writemask[6] = {0};
char src_swizzle0[10];
sinfo.svec4 = VEC4;
if (ctx->prog_type == -1)
ctx->prog_type = iter->processor.Processor;
if (instno == 0) {
handle_io_arrays(ctx);
/* Vertex shader inputs are not send as arrays, but the access may still be
* indirect. so we have to deal with that */
@ -6204,6 +6207,108 @@ static boolean analyze_instruction(struct tgsi_iterate_context *iter,
return true;
}
static void fill_sinfo(struct dump_ctx *ctx, struct vrend_shader_info *sinfo)
{
sinfo->num_ucp = ctx->key->clip_plane_enable ? 8 : 0;
sinfo->has_pervertex_out = ctx->vs_has_pervertex;
sinfo->has_sample_input = ctx->has_sample_input;
bool has_prop = (ctx->num_clip_dist_prop + ctx->num_cull_dist_prop) > 0;
sinfo->num_clip_out = has_prop ? ctx->num_clip_dist_prop : (ctx->num_clip_dist ? ctx->num_clip_dist : 8);
sinfo->num_cull_out = has_prop ? ctx->num_cull_dist_prop : 0;
sinfo->samplers_used_mask = ctx->samplers_used;
sinfo->images_used_mask = ctx->images_used_mask;
sinfo->num_consts = ctx->num_consts;
sinfo->ubo_used_mask = ctx->ubo_used_mask;
sinfo->ssbo_used_mask = ctx->ssbo_used_mask;
sinfo->ubo_indirect = ctx->info.dimension_indirect_files & (1 << TGSI_FILE_CONSTANT);
if (ctx->generic_input_range.used)
sinfo->num_indirect_generic_inputs = ctx->generic_input_range.io.last - ctx->generic_input_range.io.sid + 1;
if (ctx->patch_input_range.used)
sinfo->num_indirect_patch_inputs = ctx->patch_input_range.io.last - ctx->patch_input_range.io.sid + 1;
if (ctx->generic_output_range.used)
sinfo->num_indirect_generic_outputs = ctx->generic_output_range.io.last - ctx->generic_output_range.io.sid + 1;
if (ctx->patch_output_range.used)
sinfo->num_indirect_patch_outputs = ctx->patch_output_range.io.last - ctx->patch_output_range.io.sid + 1;
sinfo->num_inputs = ctx->num_inputs;
sinfo->num_interps = ctx->num_interps;
sinfo->num_outputs = ctx->num_outputs;
sinfo->shadow_samp_mask = ctx->shadow_samp_mask;
sinfo->glsl_ver = ctx->glsl_ver_required;
sinfo->gs_out_prim = ctx->gs_out_prim;
sinfo->tes_prim = ctx->tes_prim_mode;
sinfo->tes_point_mode = ctx->tes_point_mode;
if (sinfo->so_names || ctx->so_names) {
if (sinfo->so_names) {
for (unsigned i = 0; i < sinfo->so_info.num_outputs; ++i)
free(sinfo->so_names[i]);
free(sinfo->so_names);
}
}
/* Record information about the layout of generics and patches for apssing it
* to the next shader stage. mesa/tgsi doesn't provide this information for
* TCS, TES, and GEOM shaders.
*/
sinfo->guest_sent_io_arrays = ctx->guest_sent_io_arrays;
sinfo->num_generic_and_patch_outputs = 0;
for(unsigned i = 0; i < ctx->num_outputs; i++) {
sinfo->generic_outputs_layout[sinfo->num_generic_and_patch_outputs].name = ctx->outputs[i].name;
sinfo->generic_outputs_layout[sinfo->num_generic_and_patch_outputs].sid = ctx->outputs[i].sid;
sinfo->generic_outputs_layout[sinfo->num_generic_and_patch_outputs].location = ctx->outputs[i].layout_location;
sinfo->generic_outputs_layout[sinfo->num_generic_and_patch_outputs].array_id = ctx->outputs[i].array_id;
sinfo->generic_outputs_layout[sinfo->num_generic_and_patch_outputs].usage_mask = ctx->outputs[i].usage_mask;
if (ctx->outputs[i].name == TGSI_SEMANTIC_GENERIC || ctx->outputs[i].name == TGSI_SEMANTIC_PATCH) {
sinfo->num_generic_and_patch_outputs++;
}
}
sinfo->so_names = ctx->so_names;
sinfo->attrib_input_mask = ctx->attrib_input_mask;
if (sinfo->sampler_arrays)
free(sinfo->sampler_arrays);
sinfo->sampler_arrays = ctx->sampler_arrays;
sinfo->num_sampler_arrays = ctx->num_sampler_arrays;
if (sinfo->image_arrays)
free(sinfo->image_arrays);
sinfo->image_arrays = ctx->image_arrays;
sinfo->num_image_arrays = ctx->num_image_arrays;
}
static bool allocate_strbuffers(struct dump_ctx* ctx)
{
if (!strbuf_alloc(&ctx->glsl_main, 4096))
return false;
if (strbuf_get_error(&ctx->glsl_main))
return false;
if (!strbuf_alloc(&ctx->glsl_hdr, 1024))
return false;
if (!strbuf_alloc(&ctx->glsl_ver_ext, 1024))
return false;
return true;
}
static bool set_strbuffers(struct vrend_context *rctx, struct dump_ctx* ctx,
struct vrend_strarray *shader)
{
strarray_addstrbuf(shader, &ctx->glsl_ver_ext);
strarray_addstrbuf(shader, &ctx->glsl_hdr);
strarray_addstrbuf(shader, &ctx->glsl_main);
VREND_DEBUG(dbg_shader_glsl, rctx, "GLSL:");
VREND_DEBUG_EXT(dbg_shader_glsl, rctx, strarray_dump(shader));
VREND_DEBUG(dbg_shader_glsl, rctx, "\n");
}
bool vrend_convert_shader(struct vrend_context *rctx,
struct vrend_shader_cfg *cfg,
const struct tgsi_token *tokens,
@ -6270,21 +6375,13 @@ bool vrend_convert_shader(struct vrend_context *rctx,
if (ctx.info.indirect_files & (1 << TGSI_FILE_SAMPLER))
ctx.shader_req_bits |= SHADER_REQ_GPU_SHADER5;
if (!strbuf_alloc(&ctx.glsl_main, 4096))
if (!allocate_strbuffers(&ctx))
goto fail;
bret = tgsi_iterate_shader(tokens, &ctx.iter);
if (bret == false)
goto fail;
if (strbuf_get_error(&ctx.glsl_main))
goto fail;
if (!strbuf_alloc(&ctx.glsl_hdr, 1024))
goto fail;
if (!strbuf_alloc(&ctx.glsl_ver_ext, 1024))
goto fail;
emit_header(&ctx);
emit_ios(&ctx);
@ -6297,82 +6394,10 @@ bool vrend_convert_shader(struct vrend_context *rctx,
goto fail;
free(ctx.temp_ranges);
sinfo->num_ucp = ctx.key->clip_plane_enable ? 8 : 0;
sinfo->has_pervertex_out = ctx.vs_has_pervertex;
sinfo->has_sample_input = ctx.has_sample_input;
bool has_prop = (ctx.num_clip_dist_prop + ctx.num_cull_dist_prop) > 0;
sinfo->num_clip_out = has_prop ? ctx.num_clip_dist_prop : (ctx.num_clip_dist ? ctx.num_clip_dist : 8);
sinfo->num_cull_out = has_prop ? ctx.num_cull_dist_prop : 0;
sinfo->samplers_used_mask = ctx.samplers_used;
sinfo->images_used_mask = ctx.images_used_mask;
sinfo->num_consts = ctx.num_consts;
sinfo->ubo_used_mask = ctx.ubo_used_mask;
sinfo->ssbo_used_mask = ctx.ssbo_used_mask;
sinfo->ubo_indirect = ctx.info.dimension_indirect_files & (1 << TGSI_FILE_CONSTANT);
if (ctx.generic_input_range.used)
sinfo->num_indirect_generic_inputs = ctx.generic_input_range.io.last - ctx.generic_input_range.io.sid + 1;
if (ctx.patch_input_range.used)
sinfo->num_indirect_patch_inputs = ctx.patch_input_range.io.last - ctx.patch_input_range.io.sid + 1;
if (ctx.generic_output_range.used)
sinfo->num_indirect_generic_outputs = ctx.generic_output_range.io.last - ctx.generic_output_range.io.sid + 1;
if (ctx.patch_output_range.used)
sinfo->num_indirect_patch_outputs = ctx.patch_output_range.io.last - ctx.patch_output_range.io.sid + 1;
sinfo->num_inputs = ctx.num_inputs;
sinfo->num_interps = ctx.num_interps;
sinfo->num_outputs = ctx.num_outputs;
sinfo->shadow_samp_mask = ctx.shadow_samp_mask;
sinfo->glsl_ver = ctx.glsl_ver_required;
sinfo->gs_out_prim = ctx.gs_out_prim;
sinfo->tes_prim = ctx.tes_prim_mode;
sinfo->tes_point_mode = ctx.tes_point_mode;
if (sinfo->so_names || ctx.so_names) {
if (sinfo->so_names) {
for (unsigned i = 0; i < sinfo->so_info.num_outputs; ++i)
free(sinfo->so_names[i]);
free(sinfo->so_names);
}
}
/* Record information about the layout of generics and patches for apssing it
* to the next shader stage. mesa/tgsi doesn't provide this information for
* TCS, TES, and GEOM shaders.
*/
sinfo->guest_sent_io_arrays = ctx.guest_sent_io_arrays;
sinfo->num_generic_and_patch_outputs = 0;
for(unsigned i = 0; i < ctx.num_outputs; i++) {
sinfo->generic_outputs_layout[sinfo->num_generic_and_patch_outputs].name = ctx.outputs[i].name;
sinfo->generic_outputs_layout[sinfo->num_generic_and_patch_outputs].sid = ctx.outputs[i].sid;
sinfo->generic_outputs_layout[sinfo->num_generic_and_patch_outputs].location = ctx.outputs[i].layout_location;
sinfo->generic_outputs_layout[sinfo->num_generic_and_patch_outputs].array_id = ctx.outputs[i].array_id;
sinfo->generic_outputs_layout[sinfo->num_generic_and_patch_outputs].usage_mask = ctx.outputs[i].usage_mask;
if (ctx.outputs[i].name == TGSI_SEMANTIC_GENERIC || ctx.outputs[i].name == TGSI_SEMANTIC_PATCH) {
sinfo->num_generic_and_patch_outputs++;
}
}
sinfo->so_names = ctx.so_names;
sinfo->attrib_input_mask = ctx.attrib_input_mask;
if (sinfo->sampler_arrays)
free(sinfo->sampler_arrays);
sinfo->sampler_arrays = ctx.sampler_arrays;
sinfo->num_sampler_arrays = ctx.num_sampler_arrays;
if (sinfo->image_arrays)
free(sinfo->image_arrays);
sinfo->image_arrays = ctx.image_arrays;
sinfo->num_image_arrays = ctx.num_image_arrays;
fill_sinfo(&ctx, sinfo);
set_strbuffers(rctx, &ctx, shader);
strarray_addstrbuf(shader, &ctx.glsl_ver_ext);
strarray_addstrbuf(shader, &ctx.glsl_hdr);
strarray_addstrbuf(shader, &ctx.glsl_main);
VREND_DEBUG(dbg_shader_glsl, rctx, "GLSL:");
VREND_DEBUG_EXT(dbg_shader_glsl, rctx, strarray_dump(shader));
VREND_DEBUG(dbg_shader_glsl, rctx, "\n");
return true;
fail:
strbuf_free(&ctx.glsl_main);

Loading…
Cancel
Save