virgl/shaders: handle large shaders.

the protocol failed to handle larger shaders, this allow
the renderer to reassemble large shaders and recombined
the chunks before passing them to the GLSL translation.

This also enhances the renderer protocol to allow
for some more info in the shader object, and removes
the separate vs/gs/fs variants in favour of a type field
in the shader.
macos/master
Dave Airlie 9 years ago
parent 88c47ce27c
commit 81b741a05c
  1. 55
      src/virgl_protocol.h
  2. 90
      src/vrend_decode.c
  3. 215
      src/vrend_renderer.c
  4. 15
      src/vrend_renderer.h
  5. 1416
      tests/large_shader.h
  6. 57
      tests/test_virgl_cmd.c
  7. 112
      tests/testvirgl_encode.c

@ -1,26 +1,25 @@
/************************************************************************** /*
* * Copyright 2014, 2015 Red Hat.
* Copyright (C) 2015 Red Hat Inc.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation * to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense, * on the rights to use, copy, modify, merge, publish, distribute, sub
* and/or sell copies of the Software, and to permit persons to whom the * license, and/or sell copies of the Software, and to permit persons to whom
* Software is furnished to do so, subject to the following conditions: * the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * The above copyright notice and this permission notice (including the next
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * paragraph) shall be included in all copies or substantial portions of the
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * Software.
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
* *
**************************************************************************/ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef VIRGL_PROTOCOL_H #ifndef VIRGL_PROTOCOL_H
#define VIRGL_PROTOCOL_H #define VIRGL_PROTOCOL_H
@ -39,15 +38,13 @@ enum virgl_object_type {
VIRGL_OBJECT_BLEND, VIRGL_OBJECT_BLEND,
VIRGL_OBJECT_RASTERIZER, VIRGL_OBJECT_RASTERIZER,
VIRGL_OBJECT_DSA, VIRGL_OBJECT_DSA,
VIRGL_OBJECT_VS, VIRGL_OBJECT_SHADER,
VIRGL_OBJECT_FS,
VIRGL_OBJECT_VERTEX_ELEMENTS, VIRGL_OBJECT_VERTEX_ELEMENTS,
VIRGL_OBJECT_SAMPLER_VIEW, VIRGL_OBJECT_SAMPLER_VIEW,
VIRGL_OBJECT_SAMPLER_STATE, VIRGL_OBJECT_SAMPLER_STATE,
VIRGL_OBJECT_SURFACE, VIRGL_OBJECT_SURFACE,
VIRGL_OBJECT_QUERY, VIRGL_OBJECT_QUERY,
VIRGL_OBJECT_STREAMOUT_TARGET, VIRGL_OBJECT_STREAMOUT_TARGET,
VIRGL_OBJECT_GS,
VIRGL_MAX_OBJECTS, VIRGL_MAX_OBJECTS,
}; };
@ -208,17 +205,25 @@ enum virgl_context_cmd {
#define VIRGL_OBJ_CLEAR_STENCIL 8 #define VIRGL_OBJ_CLEAR_STENCIL 8
/* shader object */ /* shader object */
#define VIRGL_OBJ_SHADER_HDR_SIZE(nso) (3 + ((nso) ? (nso) + 4 : 0)) #define VIRGL_OBJ_SHADER_HDR_SIZE(nso) (5 + ((nso) ? (2 * nso) + 4 : 0))
#define VIRGL_OBJ_SHADER_HANDLE 1 #define VIRGL_OBJ_SHADER_HANDLE 1
#define VIRGL_OBJ_SHADER_NUM_TOKENS 2 #define VIRGL_OBJ_SHADER_TYPE 2
#define VIRGL_OBJ_SHADER_SO_NUM_OUTPUTS 3 #define VIRGL_OBJ_SHADER_OFFSET 3
#define VIRGL_OBJ_SHADER_SO_STRIDE(x) (4 + (x)) #define VIRGL_OBJ_SHADER_OFFSET_VAL(x) (((x) & 0x7fffffff) << 0)
#define VIRGL_OBJ_SHADER_SO_OUTPUT0(x) (8 + (x)) /* start contains full length in VAL - also implies continuations */
/* continuation contains offset in VAL */
#define VIRGL_OBJ_SHADER_OFFSET_CONT (0x1 << 31)
#define VIRGL_OBJ_SHADER_NUM_TOKENS 4
#define VIRGL_OBJ_SHADER_SO_NUM_OUTPUTS 5
#define VIRGL_OBJ_SHADER_SO_STRIDE(x) (6 + (x))
#define VIRGL_OBJ_SHADER_SO_OUTPUT0(x) (10 + (x * 2))
#define VIRGL_OBJ_SHADER_SO_OUTPUT_REGISTER_INDEX(x) (((x) & 0xff) << 0) #define VIRGL_OBJ_SHADER_SO_OUTPUT_REGISTER_INDEX(x) (((x) & 0xff) << 0)
#define VIRGL_OBJ_SHADER_SO_OUTPUT_START_COMPONENT(x) (((x) & 0x3) << 8) #define VIRGL_OBJ_SHADER_SO_OUTPUT_START_COMPONENT(x) (((x) & 0x3) << 8)
#define VIRGL_OBJ_SHADER_SO_OUTPUT_NUM_COMPONENTS(x) (((x) & 0x7) << 10) #define VIRGL_OBJ_SHADER_SO_OUTPUT_NUM_COMPONENTS(x) (((x) & 0x7) << 10)
#define VIRGL_OBJ_SHADER_SO_OUTPUT_BUFFER(x) (((x) & 0x7) << 13) #define VIRGL_OBJ_SHADER_SO_OUTPUT_BUFFER(x) (((x) & 0x7) << 13)
#define VIRGL_OBJ_SHADER_SO_OUTPUT_DST_OFFSET(x) (((x) & 0xffff) << 16) #define VIRGL_OBJ_SHADER_SO_OUTPUT_DST_OFFSET(x) (((x) & 0xffff) << 16)
#define VIRGL_OBJ_SHADER_SO_OUTPUT0_SO(x) (11 + (x * 2))
#define VIRGL_OBJ_SHADER_SO_OUTPUT_STREAM(x) (((x) & 0x03) << 0)
/* viewport state */ /* viewport state */
#define VIRGL_SET_VIEWPORT_STATE_SIZE(num_viewports) ((6 * num_viewports) + 1) #define VIRGL_SET_VIEWPORT_STATE_SIZE(num_viewports) ((6 * num_viewports) + 1)

@ -61,68 +61,48 @@ static inline void *get_buf_ptr(struct vrend_decode_ctx *ctx,
return &ctx->ds->buf[ctx->ds->buf_offset + offset]; return &ctx->ds->buf[ctx->ds->buf_offset + offset];
} }
static int vrend_decode_create_shader(struct vrend_decode_ctx *ctx, uint32_t type, static int vrend_decode_create_shader(struct vrend_decode_ctx *ctx,
uint32_t handle, uint32_t handle,
uint16_t length) uint16_t length)
{ {
struct pipe_shader_state *state; struct pipe_stream_output_info so_info;
struct tgsi_token *tokens;
int i, ret; int i, ret;
uint32_t shader_offset; uint32_t shader_offset;
unsigned num_tokens; unsigned num_tokens, num_so_outputs, offlen;
uint8_t *shd_text; uint8_t *shd_text;
uint32_t type;
if (length < 3) if (length < 5)
return EINVAL; return EINVAL;
state = CALLOC_STRUCT(pipe_shader_state); type = get_buf_entry(ctx, VIRGL_OBJ_SHADER_TYPE);
if (!state)
return ENOMEM;
num_tokens = get_buf_entry(ctx, VIRGL_OBJ_SHADER_NUM_TOKENS); num_tokens = get_buf_entry(ctx, VIRGL_OBJ_SHADER_NUM_TOKENS);
offlen = get_buf_entry(ctx, VIRGL_OBJ_SHADER_OFFSET);
if (num_tokens == 0) num_so_outputs = get_buf_entry(ctx, VIRGL_OBJ_SHADER_SO_NUM_OUTPUTS);
num_tokens = 300;
shader_offset = 6;
tokens = calloc(num_tokens + 10, sizeof(struct tgsi_token)); if (num_so_outputs) {
if (!tokens) { so_info.num_outputs = num_so_outputs;
free(state); if (so_info.num_outputs) {
return ENOMEM; for (i = 0; i < 4; i++)
} so_info.stride[i] = get_buf_entry(ctx, VIRGL_OBJ_SHADER_SO_STRIDE(i));
for (i = 0; i < so_info.num_outputs; i++) {
state->stream_output.num_outputs = get_buf_entry(ctx, VIRGL_OBJ_SHADER_SO_NUM_OUTPUTS); uint32_t tmp = get_buf_entry(ctx, VIRGL_OBJ_SHADER_SO_OUTPUT0(i));
if (state->stream_output.num_outputs) {
for (i = 0; i < 4; i++) so_info.output[i].register_index = tmp & 0xff;
state->stream_output.stride[i] = get_buf_entry(ctx, VIRGL_OBJ_SHADER_SO_STRIDE(i)); so_info.output[i].start_component = (tmp >> 8) & 0x3;
for (i = 0; i < state->stream_output.num_outputs; i++) { so_info.output[i].num_components = (tmp >> 10) & 0x7;
uint32_t tmp = get_buf_entry(ctx, VIRGL_OBJ_SHADER_SO_OUTPUT0(i)); so_info.output[i].output_buffer = (tmp >> 13) & 0x7;
so_info.output[i].dst_offset = (tmp >> 16) & 0xffff;
state->stream_output.output[i].register_index = tmp & 0xff; }
state->stream_output.output[i].start_component = (tmp >> 8) & 0x3;
state->stream_output.output[i].num_components = (tmp >> 10) & 0x7;
state->stream_output.output[i].output_buffer = (tmp >> 13) & 0x7;
state->stream_output.output[i].dst_offset = (tmp >> 16) & 0xffff;
} }
shader_offset = 8 + state->stream_output.num_outputs; shader_offset += 4 + (2 * num_so_outputs);
} else } else
shader_offset = 4; memset(&so_info, 0, sizeof(so_info));
shd_text = get_buf_ptr(ctx, shader_offset); shd_text = get_buf_ptr(ctx, shader_offset);
if (vrend_dump_shaders) ret = vrend_create_shader(ctx->grctx, handle, &so_info, (const char *)shd_text, offlen, num_tokens, type, length - shader_offset + 1);
fprintf(stderr,"shader\n%s\n", shd_text);
if (!tgsi_text_translate((const char *)shd_text, tokens, num_tokens + 10)) {
fprintf(stderr,"failed to translate\n %s\n", shd_text);
free(tokens);
free(state);
return EINVAL;
}
state->tokens = tokens;
ret = vrend_create_shader(ctx->grctx, handle, state, type);
free(tokens);
free(state);
return ret; return ret;
} }
@ -655,10 +635,8 @@ static int vrend_decode_create_object(struct vrend_decode_ctx *ctx, int length)
case VIRGL_OBJECT_RASTERIZER: case VIRGL_OBJECT_RASTERIZER:
ret = vrend_decode_create_rasterizer(ctx, handle, length); ret = vrend_decode_create_rasterizer(ctx, handle, length);
break; break;
case VIRGL_OBJECT_VS: case VIRGL_OBJECT_SHADER:
case VIRGL_OBJECT_GS: ret = vrend_decode_create_shader(ctx, handle, length);
case VIRGL_OBJECT_FS:
ret = vrend_decode_create_shader(ctx, obj_type, handle, length);
break; break;
case VIRGL_OBJECT_VERTEX_ELEMENTS: case VIRGL_OBJECT_VERTEX_ELEMENTS:
ret = vrend_decode_create_ve(ctx, handle, length); ret = vrend_decode_create_ve(ctx, handle, length);
@ -704,14 +682,8 @@ static int vrend_decode_bind_object(struct vrend_decode_ctx *ctx, uint16_t lengt
case VIRGL_OBJECT_RASTERIZER: case VIRGL_OBJECT_RASTERIZER:
vrend_object_bind_rasterizer(ctx->grctx, handle); vrend_object_bind_rasterizer(ctx->grctx, handle);
break; break;
case VIRGL_OBJECT_VS: case VIRGL_OBJECT_SHADER:
vrend_bind_vs(ctx->grctx, handle); vrend_bind_shader(ctx->grctx, handle);
break;
case VIRGL_OBJECT_GS:
vrend_bind_gs(ctx->grctx, handle);
break;
case VIRGL_OBJECT_FS:
vrend_bind_fs(ctx->grctx, handle);
break; break;
case VIRGL_OBJECT_VERTEX_ELEMENTS: case VIRGL_OBJECT_VERTEX_ELEMENTS:
vrend_bind_vertex_elements_state(ctx->grctx, handle); vrend_bind_vertex_elements_state(ctx->grctx, handle);

@ -47,6 +47,7 @@
#include "virgl_hw.h" #include "virgl_hw.h"
#include "tgsi/tgsi_text.h"
/* debugging aid to dump shaders */ /* debugging aid to dump shaders */
int vrend_dump_shaders; int vrend_dump_shaders;
@ -153,6 +154,10 @@ struct vrend_shader_selector {
struct vrend_shader *current; struct vrend_shader *current;
struct tgsi_token *tokens; struct tgsi_token *tokens;
char *tmp_buf;
uint32_t buf_len;
uint32_t buf_offset;
}; };
struct vrend_texture { struct vrend_texture {
@ -281,10 +286,8 @@ struct vrend_sub_context {
bool sampler_state_dirty; bool sampler_state_dirty;
bool stencil_state_dirty; bool stencil_state_dirty;
struct vrend_shader_selector *vs; uint32_t long_shader_in_progress_handle[PIPE_SHADER_TYPES];
struct vrend_shader_selector *gs; struct vrend_shader_selector *shaders[PIPE_SHADER_TYPES];
struct vrend_shader_selector *fs;
struct vrend_linked_shader_program *prog; struct vrend_linked_shader_program *prog;
struct vrend_shader_view views[PIPE_SHADER_TYPES]; struct vrend_shader_view views[PIPE_SHADER_TYPES];
@ -530,6 +533,7 @@ static void vrend_destroy_shader_selector(struct vrend_shader_selector *sel)
} }
for (i = 0; i < sel->sinfo.so_info.num_outputs; i++) for (i = 0; i < sel->sinfo.so_info.num_outputs; i++)
free(sel->sinfo.so_names[i]); free(sel->sinfo.so_names[i]);
free(sel->tmp_buf);
free(sel->sinfo.so_names); free(sel->sinfo.so_names);
free(sel->sinfo.interpinfo); free(sel->sinfo.interpinfo);
free(sel->tokens); free(sel->tokens);
@ -1941,7 +1945,7 @@ static inline void vrend_fill_shader_key(struct vrend_context *ctx,
key->invert_fs_origin = !ctx->sub->inverted_fbo_content; key->invert_fs_origin = !ctx->sub->inverted_fbo_content;
key->coord_replace = ctx->sub->rs_state.point_quad_rasterization ? ctx->sub->rs_state.sprite_coord_enable : 0; key->coord_replace = ctx->sub->rs_state.point_quad_rasterization ? ctx->sub->rs_state.sprite_coord_enable : 0;
if (ctx->sub->gs) if (ctx->sub->shaders[PIPE_SHADER_GEOMETRY])
key->gs_present = true; key->gs_present = true;
} }
@ -2030,96 +2034,148 @@ static int vrend_shader_select(struct vrend_context *ctx,
} }
static void *vrend_create_shader_state(struct vrend_context *ctx, static void *vrend_create_shader_state(struct vrend_context *ctx,
const struct pipe_shader_state *state, const struct pipe_stream_output_info *so_info,
unsigned pipe_shader_type) unsigned pipe_shader_type)
{ {
struct vrend_shader_selector *sel = CALLOC_STRUCT(vrend_shader_selector); struct vrend_shader_selector *sel = CALLOC_STRUCT(vrend_shader_selector);
int r;
if (!sel) if (!sel)
return NULL; return NULL;
sel->type = pipe_shader_type; sel->type = pipe_shader_type;
sel->sinfo.so_info = state->stream_output; sel->sinfo.so_info = *so_info;
sel->tokens = tgsi_dup_tokens(state->tokens);
pipe_reference_init(&sel->reference, 1); pipe_reference_init(&sel->reference, 1);
r = vrend_shader_select(ctx, sel, NULL);
if (r) {
vrend_destroy_shader_selector(sel);
return NULL;
}
return sel; return sel;
} }
static inline int shader_type_to_pipe_type(int type) static int vrend_finish_shader(struct vrend_context *ctx,
struct vrend_shader_selector *sel,
const struct tgsi_token *tokens)
{ {
switch (type) { int r;
case VIRGL_OBJECT_GS:
return PIPE_SHADER_GEOMETRY; sel->tokens = tgsi_dup_tokens(tokens);
case VIRGL_OBJECT_VS:
return PIPE_SHADER_VERTEX; r = vrend_shader_select(ctx, sel, NULL);
case VIRGL_OBJECT_FS: if (r) {
return PIPE_SHADER_FRAGMENT; vrend_destroy_shader_selector(sel);
return EINVAL;
} }
return 0; return 0;
} }
int vrend_create_shader(struct vrend_context *ctx, int vrend_create_shader(struct vrend_context *ctx,
uint32_t handle, const struct pipe_shader_state *ss, uint32_t handle,
int type) const struct pipe_stream_output_info *so_info,
const char *shd_text, uint32_t offlen, uint32_t num_tokens,
int type, uint32_t pkt_length)
{ {
struct vrend_shader_selector *sel; struct vrend_shader_selector *sel;
int ret_handle; int ret_handle;
bool new_shader = true, long_shader = false;
bool finished = false;
sel = vrend_create_shader_state(ctx, ss, shader_type_to_pipe_type(type)); if (type > PIPE_SHADER_GEOMETRY)
if (sel == NULL) return EINVAL;
return ENOMEM;
ret_handle = vrend_renderer_object_insert(ctx, sel, sizeof(*sel), handle, type); if (offlen & VIRGL_OBJ_SHADER_OFFSET_CONT)
if (ret_handle == 0) { new_shader = false;
vrend_destroy_shader_selector(sel); else if (((offlen + 3) / 4) > pkt_length)
return ENOMEM; long_shader = true;
/* if we have an in progress one - don't allow a new shader
of that type or a different handle. */
if (ctx->sub->long_shader_in_progress_handle[type]) {
if (new_shader == true)
return EINVAL;
if (handle != ctx->sub->long_shader_in_progress_handle[type])
return EINVAL;
} }
return 0; if (new_shader) {
sel = vrend_create_shader_state(ctx, so_info, type);
if (sel == NULL)
return ENOMEM;
if (long_shader) {
sel->tmp_buf = malloc(offlen);
if (!sel->tmp_buf) {
free(sel);
return ENOMEM;
}
sel->buf_len = offlen;
memcpy(sel->tmp_buf, shd_text, pkt_length * 4);
sel->buf_offset = pkt_length * 4;
ctx->sub->long_shader_in_progress_handle[type] = handle;
} else
finished = true;
} else {
sel = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_SHADER);
if (!sel) {
fprintf(stderr, "got continuation without original shader %d\n", handle);
return EINVAL;
}
} offlen &= ~VIRGL_OBJ_SHADER_OFFSET_CONT;
if (offlen != sel->buf_offset) {
fprintf(stderr, "Got mismatched shader continuation %d vs %d\n",
offlen, sel->buf_offset);
vrend_renderer_object_destroy(ctx, handle);
return EINVAL;
}
memcpy(sel->tmp_buf + sel->buf_offset, shd_text, pkt_length * 4);
void vrend_bind_vs(struct vrend_context *ctx, sel->buf_offset += pkt_length * 4;
uint32_t handle) if (sel->buf_offset >= sel->buf_len) {
{ finished = true;
struct vrend_shader_selector *sel; shd_text = sel->tmp_buf;
}
}
sel = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_VS); if (finished) {
struct tgsi_token *tokens;
if (ctx->sub->vs != sel) tokens = calloc(num_tokens + 10, sizeof(struct tgsi_token));
ctx->sub->shader_dirty = true; if (!tokens) {
vrend_shader_state_reference(&ctx->sub->vs, sel); return ENOMEM;
} }
void vrend_bind_gs(struct vrend_context *ctx, if (vrend_dump_shaders)
uint32_t handle) fprintf(stderr,"shader\n%s\n", shd_text);
{ if (!tgsi_text_translate((const char *)shd_text, tokens, num_tokens + 10))
struct vrend_shader_selector *sel; return EINVAL;
sel = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_GS); vrend_finish_shader(ctx, sel, tokens);
free(sel->tmp_buf);
sel->tmp_buf = NULL;
free(tokens);
ctx->sub->long_shader_in_progress_handle[type] = 0;
}
if (new_shader) {
ret_handle = vrend_renderer_object_insert(ctx, sel, sizeof(*sel), handle, VIRGL_OBJECT_SHADER);
if (ret_handle == 0) {
vrend_destroy_shader_selector(sel);
return ENOMEM;
}
}
return 0;
if (ctx->sub->gs != sel)
ctx->sub->shader_dirty = true;
vrend_shader_state_reference(&ctx->sub->gs, sel);
} }
void vrend_bind_fs(struct vrend_context *ctx, void vrend_bind_shader(struct vrend_context *ctx,
uint32_t handle) uint32_t handle)
{ {
struct vrend_shader_selector *sel; struct vrend_shader_selector *sel;
sel = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_FS); sel = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_SHADER);
if (!sel)
if (ctx->sub->fs != sel) return;
if (ctx->sub->shaders[sel->type] != sel)
ctx->sub->shader_dirty = true; ctx->sub->shader_dirty = true;
vrend_shader_state_reference(&ctx->sub->fs, sel); vrend_shader_state_reference(&ctx->sub->shaders[sel->type], sel);
} }
void vrend_clear(struct vrend_context *ctx, void vrend_clear(struct vrend_context *ctx,
@ -2395,7 +2451,7 @@ static void vrend_draw_bind_samplers(struct vrend_context *ctx)
int shader_type; int shader_type;
sampler_id = 0; sampler_id = 0;
for (shader_type = PIPE_SHADER_VERTEX; shader_type <= (ctx->sub->gs ? PIPE_SHADER_GEOMETRY : PIPE_SHADER_FRAGMENT); shader_type++) { for (shader_type = PIPE_SHADER_VERTEX; shader_type <= (ctx->sub->shaders[PIPE_SHADER_GEOMETRY] ? PIPE_SHADER_GEOMETRY : PIPE_SHADER_FRAGMENT); shader_type++) {
int index = 0; int index = 0;
for (i = 0; i < ctx->sub->views[shader_type].num_views; i++) { for (i = 0; i < ctx->sub->views[shader_type].num_views; i++) {
struct vrend_resource *texture = NULL; struct vrend_resource *texture = NULL;
@ -2467,7 +2523,7 @@ static void vrend_draw_bind_ubo(struct vrend_context *ctx)
int shader_type; int shader_type;
ubo_id = 0; ubo_id = 0;
for (shader_type = PIPE_SHADER_VERTEX; shader_type <= (ctx->sub->gs ? PIPE_SHADER_GEOMETRY : PIPE_SHADER_FRAGMENT); shader_type++) { for (shader_type = PIPE_SHADER_VERTEX; shader_type <= (ctx->sub->shaders[PIPE_SHADER_GEOMETRY] ? PIPE_SHADER_GEOMETRY : PIPE_SHADER_FRAGMENT); shader_type++) {
uint32_t mask; uint32_t mask;
int shader_ubo_idx = 0; int shader_ubo_idx = 0;
struct pipe_constant_buffer *cb; struct pipe_constant_buffer *cb;
@ -2522,23 +2578,25 @@ void vrend_draw_vbo(struct vrend_context *ctx,
struct vrend_linked_shader_program *prog; struct vrend_linked_shader_program *prog;
bool fs_dirty, vs_dirty, gs_dirty; bool fs_dirty, vs_dirty, gs_dirty;
bool dual_src = util_blend_state_is_dual(&ctx->sub->blend_state, 0); bool dual_src = util_blend_state_is_dual(&ctx->sub->blend_state, 0);
if (!ctx->sub->vs || !ctx->sub->fs) { if (!ctx->sub->shaders[PIPE_SHADER_VERTEX] || !ctx->sub->shaders[PIPE_SHADER_FRAGMENT]) {
fprintf(stderr,"dropping rendering due to missing shaders: %s\n", ctx->debug_name); fprintf(stderr,"dropping rendering due to missing shaders: %s\n", ctx->debug_name);
return; return;
} }
vrend_shader_select(ctx, ctx->sub->fs, &fs_dirty); vrend_shader_select(ctx, ctx->sub->shaders[PIPE_SHADER_FRAGMENT], &fs_dirty);
vrend_shader_select(ctx, ctx->sub->vs, &vs_dirty); vrend_shader_select(ctx, ctx->sub->shaders[PIPE_SHADER_VERTEX], &vs_dirty);
if (ctx->sub->gs) if (ctx->sub->shaders[PIPE_SHADER_GEOMETRY])
vrend_shader_select(ctx, ctx->sub->gs, &gs_dirty); vrend_shader_select(ctx, ctx->sub->shaders[PIPE_SHADER_GEOMETRY], &gs_dirty);
if (!ctx->sub->vs->current || !ctx->sub->fs->current || (ctx->sub->gs && !ctx->sub->gs->current)) { if (!ctx->sub->shaders[PIPE_SHADER_VERTEX]->current || !ctx->sub->shaders[PIPE_SHADER_FRAGMENT]->current || (ctx->sub->shaders[PIPE_SHADER_GEOMETRY] && !ctx->sub->shaders[PIPE_SHADER_GEOMETRY]->current)) {
fprintf(stderr, "failure to compile shader variants: %s\n", ctx->debug_name); fprintf(stderr, "failure to compile shader variants: %s\n", ctx->debug_name);
return; return;
} }
prog = lookup_shader_program(ctx, ctx->sub->vs->current->id, ctx->sub->fs->current->id, ctx->sub->gs ? ctx->sub->gs->current->id : 0, dual_src); prog = lookup_shader_program(ctx, ctx->sub->shaders[PIPE_SHADER_VERTEX]->current->id, ctx->sub->shaders[PIPE_SHADER_FRAGMENT]->current->id, ctx->sub->shaders[PIPE_SHADER_GEOMETRY] ? ctx->sub->shaders[PIPE_SHADER_GEOMETRY]->current->id : 0, dual_src);
if (!prog) { if (!prog) {
prog = add_shader_program(ctx, ctx->sub->vs->current, ctx->sub->fs->current, ctx->sub->gs ? ctx->sub->gs->current : NULL); prog = add_shader_program(ctx,
ctx->sub->shaders[PIPE_SHADER_VERTEX]->current,
ctx->sub->shaders[PIPE_SHADER_FRAGMENT]->current, ctx->sub->shaders[PIPE_SHADER_GEOMETRY] ? ctx->sub->shaders[PIPE_SHADER_GEOMETRY]->current : NULL);
if (!prog) if (!prog)
return; return;
} }
@ -2555,16 +2613,10 @@ void vrend_draw_vbo(struct vrend_context *ctx,
vrend_use_program(ctx, ctx->sub->prog->id); vrend_use_program(ctx, ctx->sub->prog->id);
for (shader_type = PIPE_SHADER_VERTEX; shader_type <= (ctx->sub->gs ? PIPE_SHADER_GEOMETRY : PIPE_SHADER_FRAGMENT); shader_type++) { for (shader_type = PIPE_SHADER_VERTEX; shader_type <= (ctx->sub->shaders[PIPE_SHADER_GEOMETRY] ? PIPE_SHADER_GEOMETRY : PIPE_SHADER_FRAGMENT); shader_type++) {
if (ctx->sub->prog->const_locs[shader_type] && (ctx->sub->const_dirty[shader_type] || new_program)) { if (ctx->sub->prog->const_locs[shader_type] && (ctx->sub->const_dirty[shader_type] || new_program)) {
int nc; int nc;
if (shader_type == PIPE_SHADER_VERTEX) { nc = ctx->sub->shaders[shader_type]->sinfo.num_consts;
nc = ctx->sub->vs->sinfo.num_consts;
} else if (shader_type == PIPE_SHADER_GEOMETRY) {
nc = ctx->sub->gs->sinfo.num_consts;
} else if (shader_type == PIPE_SHADER_FRAGMENT) {
nc = ctx->sub->fs->sinfo.num_consts;
}
for (i = 0; i < nc; i++) { for (i = 0; i < nc; i++) {
if (ctx->sub->prog->const_locs[shader_type][i] != -1 && ctx->sub->consts[shader_type].consts) if (ctx->sub->prog->const_locs[shader_type][i] != -1 && ctx->sub->consts[shader_type].consts)
glUniform4uiv(ctx->sub->prog->const_locs[shader_type][i], 1, &ctx->sub->consts[shader_type].consts[i * 4]); glUniform4uiv(ctx->sub->prog->const_locs[shader_type][i], 1, &ctx->sub->consts[shader_type].consts[i * 4]);
@ -2602,8 +2654,8 @@ void vrend_draw_vbo(struct vrend_context *ctx,
if (ctx->sub->current_so) { if (ctx->sub->current_so) {
if (ctx->sub->current_so->xfb_state == XFB_STATE_STARTED_NEED_BEGIN) { if (ctx->sub->current_so->xfb_state == XFB_STATE_STARTED_NEED_BEGIN) {
if (ctx->sub->gs) if (ctx->sub->shaders[PIPE_SHADER_GEOMETRY])
glBeginTransformFeedback(get_gs_xfb_mode(ctx->sub->gs->sinfo.gs_out_prim)); glBeginTransformFeedback(get_gs_xfb_mode(ctx->sub->shaders[PIPE_SHADER_GEOMETRY]->sinfo.gs_out_prim));
else else
glBeginTransformFeedback(get_xfb_mode(info->mode)); glBeginTransformFeedback(get_xfb_mode(info->mode));
ctx->sub->current_so->xfb_state = XFB_STATE_STARTED; ctx->sub->current_so->xfb_state = XFB_STATE_STARTED;
@ -3427,9 +3479,7 @@ void vrend_renderer_init(struct vrend_if_cbs *cbs)
vrend_resource_set_destroy_callback(vrend_destroy_resource_object); vrend_resource_set_destroy_callback(vrend_destroy_resource_object);
vrend_object_set_destroy_callback(VIRGL_OBJECT_QUERY, vrend_destroy_query_object); vrend_object_set_destroy_callback(VIRGL_OBJECT_QUERY, vrend_destroy_query_object);
vrend_object_set_destroy_callback(VIRGL_OBJECT_SURFACE, vrend_destroy_surface_object); vrend_object_set_destroy_callback(VIRGL_OBJECT_SURFACE, vrend_destroy_surface_object);
vrend_object_set_destroy_callback(VIRGL_OBJECT_VS, vrend_destroy_shader_object); vrend_object_set_destroy_callback(VIRGL_OBJECT_SHADER, vrend_destroy_shader_object);
vrend_object_set_destroy_callback(VIRGL_OBJECT_FS, vrend_destroy_shader_object);
vrend_object_set_destroy_callback(VIRGL_OBJECT_GS, vrend_destroy_shader_object);
vrend_object_set_destroy_callback(VIRGL_OBJECT_SAMPLER_VIEW, vrend_destroy_sampler_view_object); vrend_object_set_destroy_callback(VIRGL_OBJECT_SAMPLER_VIEW, vrend_destroy_sampler_view_object);
vrend_object_set_destroy_callback(VIRGL_OBJECT_STREAMOUT_TARGET, vrend_destroy_so_target_object); vrend_object_set_destroy_callback(VIRGL_OBJECT_STREAMOUT_TARGET, vrend_destroy_so_target_object);
vrend_object_set_destroy_callback(VIRGL_OBJECT_SAMPLER_STATE, vrend_destroy_sampler_state_object); vrend_object_set_destroy_callback(VIRGL_OBJECT_SAMPLER_STATE, vrend_destroy_sampler_state_object);
@ -3488,9 +3538,10 @@ static void vrend_destroy_sub_context(struct vrend_sub_context *sub)
LIST_FOR_EACH_ENTRY_SAFE(obj, tmp, &sub->streamout_list, head) { LIST_FOR_EACH_ENTRY_SAFE(obj, tmp, &sub->streamout_list, head) {
vrend_destroy_streamout_object(obj); vrend_destroy_streamout_object(obj);
} }
vrend_shader_state_reference(&sub->vs, NULL);
vrend_shader_state_reference(&sub->fs, NULL); vrend_shader_state_reference(&sub->shaders[PIPE_SHADER_VERTEX], NULL);
vrend_shader_state_reference(&sub->gs, NULL); vrend_shader_state_reference(&sub->shaders[PIPE_SHADER_FRAGMENT], NULL);
vrend_shader_state_reference(&sub->shaders[PIPE_SHADER_GEOMETRY], NULL);
vrend_free_programs(sub); vrend_free_programs(sub);

@ -104,17 +104,12 @@ void vrend_insert_format(struct vrend_format_table *entry, uint32_t bindings);
void vrend_insert_format_swizzle(int override_format, struct vrend_format_table *entry, uint32_t bindings, uint8_t swizzle[4]); void vrend_insert_format_swizzle(int override_format, struct vrend_format_table *entry, uint32_t bindings, uint8_t swizzle[4]);
int vrend_create_shader(struct vrend_context *ctx, int vrend_create_shader(struct vrend_context *ctx,
uint32_t handle, uint32_t handle,
const struct pipe_shader_state *vs, const struct pipe_stream_output_info *stream_output,
int type); const char *shd_text, uint32_t offlen, uint32_t num_tokens,
int type, uint32_t pkt_length);
void vrend_bind_vs(struct vrend_context *ctx, void vrend_bind_shader(struct vrend_context *ctx,
uint32_t handle); uint32_t handle);
void vrend_bind_gs(struct vrend_context *ctx,
uint32_t handle);
void vrend_bind_fs(struct vrend_context *ctx,
uint32_t handle);
void vrend_bind_vs_so(struct vrend_context *ctx, void vrend_bind_vs_so(struct vrend_context *ctx,
uint32_t handle); uint32_t handle);

File diff suppressed because it is too large Load Diff

@ -33,6 +33,7 @@
#include "virgl_protocol.h" #include "virgl_protocol.h"
#include "util/u_memory.h" #include "util/u_memory.h"
#include "large_shader.h"
/* test creating objects with same ID causes context err */ /* test creating objects with same ID causes context err */
START_TEST(virgl_test_overlap_obj_id) START_TEST(virgl_test_overlap_obj_id)
{ {
@ -343,9 +344,9 @@ START_TEST(virgl_test_render_simple)
" 2: END\n"; " 2: END\n";
memset(&vs, 0, sizeof(vs)); memset(&vs, 0, sizeof(vs));
vs_handle = ctx_handle++; vs_handle = ctx_handle++;
virgl_encode_shader_state(&ctx, vs_handle, VIRGL_OBJECT_VS, virgl_encode_shader_state(&ctx, vs_handle, PIPE_SHADER_VERTEX,
&vs, text); &vs, text);
virgl_encode_bind_object(&ctx, vs_handle, VIRGL_OBJECT_VS); virgl_encode_bind_object(&ctx, vs_handle, VIRGL_OBJECT_SHADER);
} }
/* create fragment shader */ /* create fragment shader */
@ -359,10 +360,10 @@ START_TEST(virgl_test_render_simple)
" 1: END\n"; " 1: END\n";
memset(&fs, 0, sizeof(fs)); memset(&fs, 0, sizeof(fs));
fs_handle = ctx_handle++; fs_handle = ctx_handle++;
virgl_encode_shader_state(&ctx, fs_handle, VIRGL_OBJECT_FS, virgl_encode_shader_state(&ctx, fs_handle, PIPE_SHADER_FRAGMENT,
&fs, text); &fs, text);
virgl_encode_bind_object(&ctx, fs_handle, VIRGL_OBJECT_FS); virgl_encode_bind_object(&ctx, fs_handle, VIRGL_OBJECT_SHADER);
} }
/* set blend state */ /* set blend state */
@ -568,9 +569,9 @@ START_TEST(virgl_test_render_geom_simple)
" 2: END\n"; " 2: END\n";
memset(&vs, 0, sizeof(vs)); memset(&vs, 0, sizeof(vs));
vs_handle = ctx_handle++; vs_handle = ctx_handle++;
virgl_encode_shader_state(&ctx, vs_handle, VIRGL_OBJECT_VS, virgl_encode_shader_state(&ctx, vs_handle, PIPE_SHADER_VERTEX,
&vs, text); &vs, text);
virgl_encode_bind_object(&ctx, vs_handle, VIRGL_OBJECT_VS); virgl_encode_bind_object(&ctx, vs_handle, VIRGL_OBJECT_SHADER);
} }
/* create geometry shader */ /* create geometry shader */
@ -599,9 +600,9 @@ START_TEST(virgl_test_render_geom_simple)
"9:END\n"; "9:END\n";
memset(&gs, 0, sizeof(gs)); memset(&gs, 0, sizeof(gs));
gs_handle = ctx_handle++; gs_handle = ctx_handle++;
virgl_encode_shader_state(&ctx, gs_handle, VIRGL_OBJECT_GS, virgl_encode_shader_state(&ctx, gs_handle, PIPE_SHADER_GEOMETRY,
&gs, text); &gs, text);
virgl_encode_bind_object(&ctx, gs_handle, VIRGL_OBJECT_GS); virgl_encode_bind_object(&ctx, gs_handle, VIRGL_OBJECT_SHADER);
} }
/* create fragment shader */ /* create fragment shader */
@ -615,10 +616,10 @@ START_TEST(virgl_test_render_geom_simple)
" 1: END\n"; " 1: END\n";
memset(&fs, 0, sizeof(fs)); memset(&fs, 0, sizeof(fs));
fs_handle = ctx_handle++; fs_handle = ctx_handle++;
virgl_encode_shader_state(&ctx, fs_handle, VIRGL_OBJECT_FS, virgl_encode_shader_state(&ctx, fs_handle, PIPE_SHADER_FRAGMENT,
&fs, text); &fs, text);
virgl_encode_bind_object(&ctx, fs_handle, VIRGL_OBJECT_FS); virgl_encode_bind_object(&ctx, fs_handle, VIRGL_OBJECT_SHADER);
} }
/* set blend state */ /* set blend state */
@ -844,9 +845,9 @@ START_TEST(virgl_test_render_xfb)
vs.stream_output.num_outputs = 1; vs.stream_output.num_outputs = 1;
vs.stream_output.stride[0] = 4; vs.stream_output.stride[0] = 4;
vs.stream_output.output[0].num_components = 4; vs.stream_output.output[0].num_components = 4;
virgl_encode_shader_state(&ctx, vs_handle, VIRGL_OBJECT_VS, virgl_encode_shader_state(&ctx, vs_handle, PIPE_SHADER_VERTEX,
&vs, text); &vs, text);
virgl_encode_bind_object(&ctx, vs_handle, VIRGL_OBJECT_VS); virgl_encode_bind_object(&ctx, vs_handle, VIRGL_OBJECT_SHADER);
} }
/* create fragment shader */ /* create fragment shader */
@ -860,10 +861,10 @@ START_TEST(virgl_test_render_xfb)
" 1: END\n"; " 1: END\n";
memset(&fs, 0, sizeof(fs)); memset(&fs, 0, sizeof(fs));
fs_handle = ctx_handle++; fs_handle = ctx_handle++;
virgl_encode_shader_state(&ctx, fs_handle, VIRGL_OBJECT_FS, virgl_encode_shader_state(&ctx, fs_handle, PIPE_SHADER_FRAGMENT,
&fs, text); &fs, text);
virgl_encode_bind_object(&ctx, fs_handle, VIRGL_OBJECT_FS); virgl_encode_bind_object(&ctx, fs_handle, VIRGL_OBJECT_SHADER);
} }
/* set blend state */ /* set blend state */
@ -977,6 +978,33 @@ START_TEST(virgl_test_render_xfb)
} }
END_TEST END_TEST
/* send a large shader across */
START_TEST(virgl_test_large_shader)
{
int ret;
struct virgl_context ctx;
int ctx_handle = 1;
int fs_handle;
ret = testvirgl_init_ctx_cmdbuf(&ctx);
ck_assert_int_eq(ret, 0);
/* create large fragment shader */
{
struct pipe_shader_state fs;
const char *text = large_frag;
memset(&fs, 0, sizeof(fs));
fs_handle = ctx_handle++;
virgl_encode_shader_state(&ctx, fs_handle, PIPE_SHADER_FRAGMENT,
&fs, text);
virgl_encode_bind_object(&ctx, fs_handle, VIRGL_OBJECT_SHADER);
}
testvirgl_fini_ctx_cmdbuf(&ctx);
}
END_TEST
Suite *virgl_init_suite(void) Suite *virgl_init_suite(void)
{ {
Suite *s; Suite *s;
@ -987,6 +1015,7 @@ Suite *virgl_init_suite(void)
tcase_add_test(tc_core, virgl_test_clear); tcase_add_test(tc_core, virgl_test_clear);
tcase_add_test(tc_core, virgl_test_blit_simple); tcase_add_test(tc_core, virgl_test_blit_simple);
tcase_add_test(tc_core, virgl_test_overlap_obj_id); tcase_add_test(tc_core, virgl_test_overlap_obj_id);
tcase_add_test(tc_core, virgl_test_large_shader);
tcase_add_test(tc_core, virgl_test_render_simple); tcase_add_test(tc_core, virgl_test_render_simple);
tcase_add_test(tc_core, virgl_test_render_geom_simple); tcase_add_test(tc_core, virgl_test_render_geom_simple);
tcase_add_test(tc_core, virgl_test_render_xfb); tcase_add_test(tc_core, virgl_test_render_xfb);

@ -189,59 +189,111 @@ int virgl_encode_rasterizer_state(struct virgl_context *ctx,
return 0; return 0;
} }
static void virgl_emit_shader_header(struct virgl_context *ctx,
uint32_t handle, uint32_t len,
uint32_t type, uint32_t offlen,
uint32_t num_tokens)
{
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SHADER, len));
virgl_encoder_write_dword(ctx->cbuf, handle);
virgl_encoder_write_dword(ctx->cbuf, type);
virgl_encoder_write_dword(ctx->cbuf, offlen);
virgl_encoder_write_dword(ctx->cbuf, num_tokens);
}
static void virgl_emit_shader_streamout(struct virgl_context *ctx,
const struct pipe_stream_output_info *so_info)
{
int num_outputs = 0;
int i;
uint32_t tmp;
if (so_info)
num_outputs = so_info->num_outputs;
virgl_encoder_write_dword(ctx->cbuf, num_outputs);
if (num_outputs) {
for (i = 0; i < 4; i++)
virgl_encoder_write_dword(ctx->cbuf, so_info->stride[i]);
for (i = 0; i < so_info->num_outputs; i++) {
tmp =
VIRGL_OBJ_SHADER_SO_OUTPUT_REGISTER_INDEX(so_info->output[i].register_index) |
VIRGL_OBJ_SHADER_SO_OUTPUT_START_COMPONENT(so_info->output[i].start_component) |
VIRGL_OBJ_SHADER_SO_OUTPUT_NUM_COMPONENTS(so_info->output[i].num_components) |
VIRGL_OBJ_SHADER_SO_OUTPUT_BUFFER(so_info->output[i].output_buffer) |
VIRGL_OBJ_SHADER_SO_OUTPUT_DST_OFFSET(so_info->output[i].dst_offset);
virgl_encoder_write_dword(ctx->cbuf, tmp);
virgl_encoder_write_dword(ctx->cbuf, 0);
}
}
}
int virgl_encode_shader_state(struct virgl_context *ctx, int virgl_encode_shader_state(struct virgl_context *ctx,
uint32_t handle, uint32_t handle,
uint32_t type, uint32_t type,
const struct pipe_shader_state *shader, const struct pipe_shader_state *shader,
const char *shad_str) const char *shad_str)
{ {
char *str; char *str, *sptr;
uint32_t shader_len, len; uint32_t shader_len, len;
int i;
int ret; int ret;
uint32_t tmp;
int num_tokens; int num_tokens;
int str_total_size = 65536; int str_total_size = 65536;
uint32_t left_bytes, base_hdr_size, strm_hdr_size, thispass;
bool first_pass;
if (!shad_str) { if (!shad_str) {
num_tokens = tgsi_num_tokens(shader->tokens); num_tokens = tgsi_num_tokens(shader->tokens);
str = CALLOC(1, str_total_size); str = CALLOC(1, str_total_size);
if (!str) if (!str)
return -1; return -1;
ret = tgsi_dump_str(shader->tokens, TGSI_DUMP_FLOAT_AS_HEX, str, str_total_size); ret = tgsi_dump_str(shader->tokens, TGSI_DUMP_FLOAT_AS_HEX, str, str_total_size);
if (ret == -1) { if (ret == -1) {
fprintf(stderr, "Failed to translate shader in available space\n"); fprintf(stderr, "Failed to translate shader in available space\n");
FREE(str); FREE(str);
return -1; return -1;
} }
} else { } else {
num_tokens = 0; num_tokens = 300;
str = (char *)shad_str; str = (char *)shad_str;
} }
shader_len = strlen(str) + 1; shader_len = strlen(str) + 1;
len = ((shader_len + 3) / 4) + VIRGL_OBJ_SHADER_HDR_SIZE(shader->stream_output.num_outputs);
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, type, len)); left_bytes = shader_len;
virgl_encoder_write_dword(ctx->cbuf, handle);
virgl_encoder_write_dword(ctx->cbuf, num_tokens);
virgl_encoder_write_dword(ctx->cbuf, shader->stream_output.num_outputs);
if (shader->stream_output.num_outputs) {
for (i = 0; i < 4; i++)
virgl_encoder_write_dword(ctx->cbuf, shader->stream_output.stride[i]);
for (i = 0; i < shader->stream_output.num_outputs; i++) { base_hdr_size = 5;
tmp = strm_hdr_size = shader->stream_output.num_outputs ? shader->stream_output.num_outputs * 2 + 4 : 0;
VIRGL_OBJ_SHADER_SO_OUTPUT_REGISTER_INDEX(shader->stream_output.output[i].register_index) | first_pass = true;
VIRGL_OBJ_SHADER_SO_OUTPUT_START_COMPONENT(shader->stream_output.output[i].start_component) | sptr = str;
VIRGL_OBJ_SHADER_SO_OUTPUT_NUM_COMPONENTS(shader->stream_output.output[i].num_components) | while (left_bytes) {
VIRGL_OBJ_SHADER_SO_OUTPUT_BUFFER(shader->stream_output.output[i].output_buffer) | uint32_t length, offlen;
VIRGL_OBJ_SHADER_SO_OUTPUT_DST_OFFSET(shader->stream_output.output[i].dst_offset); int hdr_len = base_hdr_size + (first_pass ? strm_hdr_size : 0);
virgl_encoder_write_dword(ctx->cbuf, tmp); if (ctx->cbuf->cdw + hdr_len + 1 > VIRGL_MAX_CMDBUF_DWORDS)
} ctx->flush(ctx);
thispass = (VIRGL_MAX_CMDBUF_DWORDS - ctx->cbuf->cdw - hdr_len - 1) * 4;
length = MIN2(thispass, left_bytes);
len = ((length + 3) / 4) + hdr_len;
if (first_pass)
offlen = VIRGL_OBJ_SHADER_OFFSET_VAL(shader_len);
else
offlen = VIRGL_OBJ_SHADER_OFFSET_VAL((uintptr_t)sptr - (uintptr_t)str) | VIRGL_OBJ_SHADER_OFFSET_CONT;
virgl_emit_shader_header(ctx, handle, len, type, offlen, num_tokens);
virgl_emit_shader_streamout(ctx, first_pass ? &shader->stream_output : NULL);
virgl_encoder_write_block(ctx->cbuf, (uint8_t *)sptr, length);
sptr += length;
first_pass = false;
left_bytes -= length;
} }
virgl_encoder_write_block(ctx->cbuf, (uint8_t *)str, shader_len);
if (str != shad_str) if (str != shad_str)
FREE(str); FREE(str);

Loading…
Cancel
Save