The render server is a daemon that sits idle waiting for commands. When requested to create a context, it forks and creates the context in a subprocess. This isolates contexts from each other, from the server process, and from the client process. Because a context process can execute untrusted commands and depends on GPU drivers, the isolation should improve the security. There is also a multi-thread mode where each context is executed by a thread. This mode is used to ease debugging, but maybe someone will find a use case for it. Signed-off-by: Chia-I Wu <olvaffe@gmail.com> Reviewed-by: Yiwei Zhang <zzyiwei@chromium.org> Reviewed-by: Ryan Neph <ryanneph@google.com>macos/master
parent
f0b4e97975
commit
670d3a6b23
@ -0,0 +1,39 @@ |
|||||||
|
/*
|
||||||
|
* Copyright 2021 Google LLC |
||||||
|
* SPDX-License-Identifier: MIT |
||||||
|
*/ |
||||||
|
|
||||||
|
#include "render_context.h" |
||||||
|
#include "render_server.h" |
||||||
|
|
||||||
|
/* The main process is the server process. It enters render_server_main and
|
||||||
|
* never returns except on fatal errors. |
||||||
|
* |
||||||
|
* The server process supports only one connection currently. It creates a |
||||||
|
* render_client to manage the connection. There is a client process at the |
||||||
|
* other end of the connection. When the client process requests a new |
||||||
|
* context to be created, the server process creates a worker. It also sets |
||||||
|
* up a socket pair, with one end owned by the worker and the other end sent |
||||||
|
* to and owned by the client process. |
||||||
|
* |
||||||
|
* A worker can be a subprocess forked from the server process, or a thread |
||||||
|
* created by the server process. When a worker is a subprocess, the |
||||||
|
* subprocess returns from render_server_main and enters render_context_main. |
||||||
|
* |
||||||
|
* When a worker is a thread, the thread enters render_context_main directly |
||||||
|
* from its start function. In this case, render_context_main must be |
||||||
|
* thread-safe. |
||||||
|
*/ |
||||||
|
int |
||||||
|
main(int argc, char **argv) |
||||||
|
{ |
||||||
|
struct render_context_args ctx_args; |
||||||
|
|
||||||
|
bool ok = render_server_main(argc, argv, &ctx_args); |
||||||
|
|
||||||
|
/* this is a subprocess */ |
||||||
|
if (ok && ctx_args.valid) |
||||||
|
ok = render_context_main(&ctx_args); |
||||||
|
|
||||||
|
return ok ? 0 : -1; |
||||||
|
} |
@ -0,0 +1,29 @@ |
|||||||
|
# Copyright 2021 Google LLC |
||||||
|
# SPDX-License-Identifier: MIT |
||||||
|
|
||||||
|
virgl_render_server_sources = [ |
||||||
|
'main.c', |
||||||
|
'render_client.c', |
||||||
|
'render_common.c', |
||||||
|
'render_context.c', |
||||||
|
'render_server.c', |
||||||
|
'render_socket.c', |
||||||
|
'render_virgl.c', |
||||||
|
'render_worker.c', |
||||||
|
] |
||||||
|
|
||||||
|
virgl_render_server_depends = [libvirglrenderer_dep] |
||||||
|
|
||||||
|
if with_render_server_worker == 'thread' |
||||||
|
virgl_render_server_depends += [thread_dep] |
||||||
|
elif with_render_server_worker == 'minijail' |
||||||
|
virgl_render_server_depends += [minijail_dep] |
||||||
|
endif |
||||||
|
|
||||||
|
virgl_render_server = executable( |
||||||
|
'virgl_render_server', |
||||||
|
virgl_render_server_sources, |
||||||
|
dependencies : virgl_render_server_depends, |
||||||
|
install : true, |
||||||
|
install_dir : render_server_install_dir, |
||||||
|
) |
@ -0,0 +1,345 @@ |
|||||||
|
/*
|
||||||
|
* Copyright 2021 Google LLC |
||||||
|
* SPDX-License-Identifier: MIT |
||||||
|
*/ |
||||||
|
|
||||||
|
#include "render_client.h" |
||||||
|
|
||||||
|
#include <unistd.h> |
||||||
|
|
||||||
|
#include "render_context.h" |
||||||
|
#include "render_server.h" |
||||||
|
#include "render_virgl.h" |
||||||
|
#include "render_worker.h" |
||||||
|
|
||||||
|
/* There is a render_context_record for each worker.
|
||||||
|
* |
||||||
|
* When the client process destroys a context, it closes the connection to the |
||||||
|
* worker, which leads to worker termination. It also sends a |
||||||
|
* RENDER_CLIENT_OP_DESTROY_CONTEXT to us to remove the record. Because we |
||||||
|
* are responsible for cleaning up the worker, we don't care if the worker has |
||||||
|
* terminated or not. We always kill, reap, and remove the record. |
||||||
|
* |
||||||
|
* TODO We reap with WNOHANG in render_client_dispatch currently. We should |
||||||
|
* use SIGCHLD instead. |
||||||
|
*/ |
||||||
|
struct render_context_record { |
||||||
|
uint32_t ctx_id; |
||||||
|
struct render_worker *worker; |
||||||
|
|
||||||
|
struct list_head head; |
||||||
|
}; |
||||||
|
|
||||||
|
static struct render_context_record * |
||||||
|
render_client_find_record(struct render_client *client, uint32_t ctx_id) |
||||||
|
{ |
||||||
|
list_for_each_entry (struct render_context_record, rec, &client->context_records, |
||||||
|
head) { |
||||||
|
if (rec->ctx_id == ctx_id) |
||||||
|
return rec; |
||||||
|
} |
||||||
|
return NULL; |
||||||
|
} |
||||||
|
|
||||||
|
static void |
||||||
|
render_client_kill_one_record(struct render_client *client, |
||||||
|
struct render_context_record *rec) |
||||||
|
{ |
||||||
|
render_worker_kill(rec->worker); |
||||||
|
|
||||||
|
list_del(&rec->head); |
||||||
|
list_addtail(&rec->head, &client->reap_records); |
||||||
|
} |
||||||
|
|
||||||
|
static void |
||||||
|
render_client_kill_all_records(struct render_client *client) |
||||||
|
{ |
||||||
|
list_for_each_entry (struct render_context_record, rec, &client->context_records, head) |
||||||
|
render_worker_kill(rec->worker); |
||||||
|
|
||||||
|
list_splicetail(&client->context_records, &client->reap_records); |
||||||
|
list_inithead(&client->context_records); |
||||||
|
} |
||||||
|
|
||||||
|
static void |
||||||
|
render_client_reap_all_records(struct render_client *client, bool wait) |
||||||
|
{ |
||||||
|
struct render_server *srv = client->server; |
||||||
|
|
||||||
|
list_for_each_entry_safe (struct render_context_record, rec, &client->reap_records, |
||||||
|
head) { |
||||||
|
if (!render_worker_reap(rec->worker, wait)) |
||||||
|
continue; |
||||||
|
|
||||||
|
render_worker_destroy(rec->worker); |
||||||
|
list_del(&rec->head); |
||||||
|
free(rec); |
||||||
|
|
||||||
|
srv->current_worker_count--; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
static void |
||||||
|
init_context_args(struct render_context_args *ctx_args, |
||||||
|
uint32_t init_flags, |
||||||
|
const struct render_client_op_create_context_request *req, |
||||||
|
int ctx_fd) |
||||||
|
{ |
||||||
|
*ctx_args = (struct render_context_args){ |
||||||
|
.valid = true, |
||||||
|
.init_flags = init_flags, |
||||||
|
.ctx_id = req->ctx_id, |
||||||
|
.ctx_fd = ctx_fd, |
||||||
|
}; |
||||||
|
|
||||||
|
static_assert(sizeof(ctx_args->ctx_name) == sizeof(req->ctx_name), ""); |
||||||
|
memcpy(ctx_args->ctx_name, req->ctx_name, sizeof(req->ctx_name) - 1); |
||||||
|
} |
||||||
|
|
||||||
|
#ifdef ENABLE_RENDER_SERVER_WORKER_THREAD |
||||||
|
|
||||||
|
static int |
||||||
|
render_client_worker_thread(void *thread_data) |
||||||
|
{ |
||||||
|
const struct render_context_args *ctx_args = thread_data; |
||||||
|
return render_context_main(ctx_args) ? 0 : -1; |
||||||
|
} |
||||||
|
|
||||||
|
#endif /* ENABLE_RENDER_SERVER_WORKER_THREAD */ |
||||||
|
|
||||||
|
static bool |
||||||
|
render_client_create_context(struct render_client *client, |
||||||
|
const struct render_client_op_create_context_request *req, |
||||||
|
int *out_remote_fd) |
||||||
|
{ |
||||||
|
struct render_server *srv = client->server; |
||||||
|
|
||||||
|
if (srv->current_worker_count >= srv->max_worker_count) { |
||||||
|
render_log("too many context workers"); |
||||||
|
return false; |
||||||
|
} |
||||||
|
|
||||||
|
struct render_context_record *rec = calloc(1, sizeof(*rec)); |
||||||
|
if (!rec) |
||||||
|
return false; |
||||||
|
|
||||||
|
int socket_fds[2]; |
||||||
|
if (!render_socket_pair(socket_fds)) { |
||||||
|
free(rec); |
||||||
|
return false; |
||||||
|
} |
||||||
|
int ctx_fd = socket_fds[0]; |
||||||
|
int remote_fd = socket_fds[1]; |
||||||
|
|
||||||
|
struct render_context_args ctx_args; |
||||||
|
init_context_args(&ctx_args, client->init_flags, req, ctx_fd); |
||||||
|
|
||||||
|
#ifdef ENABLE_RENDER_SERVER_WORKER_THREAD |
||||||
|
rec->worker = render_worker_create(srv->worker_jail, render_client_worker_thread, |
||||||
|
&ctx_args, sizeof(ctx_args)); |
||||||
|
if (rec->worker) |
||||||
|
ctx_fd = -1; /* ownership transferred */ |
||||||
|
#else |
||||||
|
rec->worker = render_worker_create(srv->worker_jail, NULL, NULL, 0); |
||||||
|
#endif |
||||||
|
if (!rec->worker) { |
||||||
|
render_log("failed to create a context worker"); |
||||||
|
close(ctx_fd); |
||||||
|
close(remote_fd); |
||||||
|
free(rec); |
||||||
|
return false; |
||||||
|
} |
||||||
|
|
||||||
|
if (!render_worker_is_record(rec->worker)) { |
||||||
|
/* this is the child process */ |
||||||
|
render_worker_destroy(rec->worker); |
||||||
|
free(rec); |
||||||
|
|
||||||
|
srv->state = RENDER_SERVER_STATE_SUBPROCESS; |
||||||
|
*srv->context_args = ctx_args; |
||||||
|
|
||||||
|
/* ctx_fd ownership transferred */ |
||||||
|
assert(srv->context_args->ctx_fd == ctx_fd); |
||||||
|
|
||||||
|
close(remote_fd); |
||||||
|
*out_remote_fd = -1; |
||||||
|
|
||||||
|
return true; |
||||||
|
} |
||||||
|
|
||||||
|
/* this is the parent process */ |
||||||
|
rec->ctx_id = req->ctx_id; |
||||||
|
list_addtail(&rec->head, &client->context_records); |
||||||
|
srv->current_worker_count++; |
||||||
|
|
||||||
|
if (ctx_fd >= 0) |
||||||
|
close(ctx_fd); |
||||||
|
*out_remote_fd = remote_fd; |
||||||
|
|
||||||
|
return true; |
||||||
|
} |
||||||
|
|
||||||
|
static bool |
||||||
|
render_client_dispatch_destroy_context(struct render_client *client, |
||||||
|
const union render_client_op_request *req) |
||||||
|
{ |
||||||
|
const uint32_t ctx_id = req->destroy_context.ctx_id; |
||||||
|
struct render_context_record *rec = render_client_find_record(client, ctx_id); |
||||||
|
if (rec) |
||||||
|
render_client_kill_one_record(client, rec); |
||||||
|
|
||||||
|
return true; |
||||||
|
} |
||||||
|
|
||||||
|
static bool |
||||||
|
render_client_dispatch_create_context(struct render_client *client, |
||||||
|
const union render_client_op_request *req) |
||||||
|
{ |
||||||
|
struct render_server *srv = client->server; |
||||||
|
|
||||||
|
int remote_fd; |
||||||
|
bool ok = render_client_create_context(client, &req->create_context, &remote_fd); |
||||||
|
if (!ok) |
||||||
|
return false; |
||||||
|
|
||||||
|
if (srv->state == RENDER_SERVER_STATE_SUBPROCESS) { |
||||||
|
assert(remote_fd < 0); |
||||||
|
return true; |
||||||
|
} |
||||||
|
|
||||||
|
const struct render_client_op_create_context_reply reply = { |
||||||
|
.ok = ok, |
||||||
|
}; |
||||||
|
if (!ok) |
||||||
|
return render_socket_send_reply(&client->socket, &reply, sizeof(reply)); |
||||||
|
|
||||||
|
ok = render_socket_send_reply_with_fds(&client->socket, &reply, sizeof(reply), |
||||||
|
&remote_fd, 1); |
||||||
|
close(remote_fd); |
||||||
|
|
||||||
|
return ok; |
||||||
|
} |
||||||
|
|
||||||
|
static bool |
||||||
|
render_client_dispatch_reset(struct render_client *client, |
||||||
|
UNUSED const union render_client_op_request *req) |
||||||
|
{ |
||||||
|
render_client_kill_all_records(client); |
||||||
|
render_client_reap_all_records(client, true /* wait */); |
||||||
|
return true; |
||||||
|
} |
||||||
|
|
||||||
|
static bool |
||||||
|
render_client_dispatch_init(struct render_client *client, |
||||||
|
const union render_client_op_request *req) |
||||||
|
{ |
||||||
|
client->init_flags = req->init.flags; |
||||||
|
|
||||||
|
/* init now to avoid doing it in each worker
|
||||||
|
* |
||||||
|
* TODO this does very little, and might confuse perfetto. It might be |
||||||
|
* more interesting to preload Vulkan ICDs, by calling |
||||||
|
* vkEnumerateInstanceExtensionProperties. |
||||||
|
*/ |
||||||
|
render_virgl_init(client->init_flags); |
||||||
|
|
||||||
|
return true; |
||||||
|
} |
||||||
|
|
||||||
|
static bool |
||||||
|
render_client_dispatch_nop(UNUSED struct render_client *client, |
||||||
|
UNUSED const union render_client_op_request *req) |
||||||
|
{ |
||||||
|
return true; |
||||||
|
} |
||||||
|
|
||||||
|
struct render_client_dispatch_entry { |
||||||
|
size_t expect_size; |
||||||
|
bool (*dispatch)(struct render_client *client, |
||||||
|
const union render_client_op_request *req); |
||||||
|
}; |
||||||
|
|
||||||
|
static const struct render_client_dispatch_entry |
||||||
|
render_client_dispatch_table[RENDER_CLIENT_OP_COUNT] = { |
||||||
|
#define RENDER_CLIENT_DISPATCH(NAME, name) \ |
||||||
|
[RENDER_CLIENT_OP_## \
|
||||||
|
NAME] = { .expect_size = sizeof(struct render_client_op_##name##_request), \
|
||||||
|
.dispatch = render_client_dispatch_##name } |
||||||
|
RENDER_CLIENT_DISPATCH(NOP, nop), |
||||||
|
RENDER_CLIENT_DISPATCH(INIT, init), |
||||||
|
RENDER_CLIENT_DISPATCH(RESET, reset), |
||||||
|
RENDER_CLIENT_DISPATCH(CREATE_CONTEXT, create_context), |
||||||
|
RENDER_CLIENT_DISPATCH(DESTROY_CONTEXT, destroy_context), |
||||||
|
#undef RENDER_CLIENT_DISPATCH |
||||||
|
}; |
||||||
|
|
||||||
|
bool |
||||||
|
render_client_dispatch(struct render_client *client) |
||||||
|
{ |
||||||
|
union render_client_op_request req; |
||||||
|
size_t req_size; |
||||||
|
if (!render_socket_receive_request(&client->socket, &req, sizeof(req), &req_size)) |
||||||
|
return false; |
||||||
|
|
||||||
|
if (req.header.op >= RENDER_CLIENT_OP_COUNT) { |
||||||
|
render_log("invalid client op %d", req.header.op); |
||||||
|
return false; |
||||||
|
} |
||||||
|
|
||||||
|
const struct render_client_dispatch_entry *entry = |
||||||
|
&render_client_dispatch_table[req.header.op]; |
||||||
|
if (entry->expect_size != req_size) { |
||||||
|
render_log("invalid request size %zu for client op %d", req_size, req.header.op); |
||||||
|
return false; |
||||||
|
} |
||||||
|
|
||||||
|
if (!entry->dispatch(client, &req)) |
||||||
|
render_log("failed to dispatch client op %d", req.header.op); |
||||||
|
|
||||||
|
/* TODO this should be triggered by SIGCHLD */ |
||||||
|
render_client_reap_all_records(client, false /* wait */); |
||||||
|
|
||||||
|
return true; |
||||||
|
} |
||||||
|
|
||||||
|
void |
||||||
|
render_client_destroy(struct render_client *client) |
||||||
|
{ |
||||||
|
struct render_server *srv = client->server; |
||||||
|
|
||||||
|
if (srv->state == RENDER_SERVER_STATE_SUBPROCESS) { |
||||||
|
/* destroy all records without killing nor reaping */ |
||||||
|
list_splicetail(&client->context_records, &client->reap_records); |
||||||
|
list_for_each_entry_safe (struct render_context_record, rec, &client->reap_records, |
||||||
|
head) { |
||||||
|
render_worker_destroy(rec->worker); |
||||||
|
free(rec); |
||||||
|
srv->current_worker_count--; |
||||||
|
} |
||||||
|
} else { |
||||||
|
render_client_kill_all_records(client); |
||||||
|
render_client_reap_all_records(client, true /* wait */); |
||||||
|
|
||||||
|
render_virgl_fini(); |
||||||
|
} |
||||||
|
|
||||||
|
render_socket_fini(&client->socket); |
||||||
|
free(client); |
||||||
|
} |
||||||
|
|
||||||
|
struct render_client * |
||||||
|
render_client_create(struct render_server *srv, int client_fd) |
||||||
|
{ |
||||||
|
struct render_client *client = calloc(1, sizeof(*client)); |
||||||
|
|
||||||
|
if (!client) |
||||||
|
return NULL; |
||||||
|
|
||||||
|
client->server = srv; |
||||||
|
render_socket_init(&client->socket, client_fd); |
||||||
|
|
||||||
|
list_inithead(&client->context_records); |
||||||
|
list_inithead(&client->reap_records); |
||||||
|
|
||||||
|
return client; |
||||||
|
} |
@ -0,0 +1,30 @@ |
|||||||
|
/*
|
||||||
|
* Copyright 2021 Google LLC |
||||||
|
* SPDX-License-Identifier: MIT |
||||||
|
*/ |
||||||
|
|
||||||
|
#ifndef RENDER_CLIENT_H |
||||||
|
#define RENDER_CLIENT_H |
||||||
|
|
||||||
|
#include "render_common.h" |
||||||
|
|
||||||
|
struct render_client { |
||||||
|
struct render_server *server; |
||||||
|
struct render_socket socket; |
||||||
|
|
||||||
|
uint32_t init_flags; |
||||||
|
|
||||||
|
struct list_head context_records; |
||||||
|
struct list_head reap_records; |
||||||
|
}; |
||||||
|
|
||||||
|
struct render_client * |
||||||
|
render_client_create(struct render_server *srv, int client_fd); |
||||||
|
|
||||||
|
void |
||||||
|
render_client_destroy(struct render_client *client); |
||||||
|
|
||||||
|
bool |
||||||
|
render_client_dispatch(struct render_client *client); |
||||||
|
|
||||||
|
#endif /* RENDER_CLIENT_H */ |
@ -0,0 +1,23 @@ |
|||||||
|
/*
|
||||||
|
* Copyright 2021 Google LLC |
||||||
|
* SPDX-License-Identifier: MIT |
||||||
|
*/ |
||||||
|
|
||||||
|
#include "render_common.h" |
||||||
|
|
||||||
|
#include <stdarg.h> |
||||||
|
#include <stdio.h> |
||||||
|
|
||||||
|
void |
||||||
|
render_log(const char *fmt, ...) |
||||||
|
{ |
||||||
|
va_list va; |
||||||
|
|
||||||
|
va_start(va, fmt); |
||||||
|
|
||||||
|
fprintf(stderr, "render: "); |
||||||
|
vfprintf(stderr, fmt, va); |
||||||
|
fprintf(stderr, "\n"); |
||||||
|
|
||||||
|
va_end(va); |
||||||
|
} |
@ -0,0 +1,35 @@ |
|||||||
|
/*
|
||||||
|
* Copyright 2021 Google LLC |
||||||
|
* SPDX-License-Identifier: MIT |
||||||
|
*/ |
||||||
|
|
||||||
|
#ifndef RENDER_COMMON_H |
||||||
|
#define RENDER_COMMON_H |
||||||
|
|
||||||
|
#include <assert.h> |
||||||
|
#include <stdbool.h> |
||||||
|
#include <stddef.h> |
||||||
|
#include <stdint.h> |
||||||
|
#include <stdlib.h> |
||||||
|
#include <string.h> |
||||||
|
|
||||||
|
#include "util/compiler.h" |
||||||
|
#include "util/list.h" |
||||||
|
#include "util/macros.h" |
||||||
|
#include "util/u_pointer.h" |
||||||
|
|
||||||
|
#include "render_protocol.h" |
||||||
|
#include "render_socket.h" |
||||||
|
|
||||||
|
struct render_client; |
||||||
|
struct render_context; |
||||||
|
struct render_context_args; |
||||||
|
struct render_server; |
||||||
|
struct render_virgl; |
||||||
|
struct render_worker; |
||||||
|
struct render_worker_jail; |
||||||
|
|
||||||
|
void |
||||||
|
render_log(const char *fmt, ...); |
||||||
|
|
||||||
|
#endif /* RENDER_COMMON_H */ |
@ -0,0 +1,510 @@ |
|||||||
|
/*
|
||||||
|
* Copyright 2021 Google LLC |
||||||
|
* SPDX-License-Identifier: MIT |
||||||
|
*/ |
||||||
|
|
||||||
|
#include "render_context.h" |
||||||
|
|
||||||
|
#include <sys/mman.h> |
||||||
|
|
||||||
|
#include "util/u_thread.h" |
||||||
|
#include "virgl_util.h" |
||||||
|
#include "virglrenderer.h" |
||||||
|
#include "vrend_iov.h" |
||||||
|
|
||||||
|
#include "render_virgl.h" |
||||||
|
|
||||||
|
struct render_context_resource { |
||||||
|
uint32_t res_id; |
||||||
|
struct list_head head; |
||||||
|
}; |
||||||
|
|
||||||
|
/* XXX we need a unique res_id to export a blob
|
||||||
|
* |
||||||
|
* virglrenderer.h does not have the right APIs for us. We should use vkr |
||||||
|
* (and vrend, if that makes sense) directly. |
||||||
|
*/ |
||||||
|
#define BLOB_RES_ID (~0u) |
||||||
|
|
||||||
|
static void |
||||||
|
render_context_resource_destroy(struct render_context_resource *res) |
||||||
|
{ |
||||||
|
list_del(&res->head); |
||||||
|
|
||||||
|
virgl_renderer_resource_unref(res->res_id); |
||||||
|
free(res); |
||||||
|
} |
||||||
|
|
||||||
|
static struct render_context_resource * |
||||||
|
render_context_resource_import(uint32_t res_id, |
||||||
|
enum virgl_resource_fd_type fd_type, |
||||||
|
int res_fd, |
||||||
|
uint64_t size) |
||||||
|
{ |
||||||
|
/* TODO pool alloc if resources come and go frequently */ |
||||||
|
struct render_context_resource *res = calloc(1, sizeof(*res)); |
||||||
|
if (!res) |
||||||
|
return NULL; |
||||||
|
|
||||||
|
res->res_id = res_id; |
||||||
|
|
||||||
|
uint32_t import_fd_type; |
||||||
|
switch (fd_type) { |
||||||
|
case VIRGL_RESOURCE_FD_DMABUF: |
||||||
|
import_fd_type = VIRGL_RENDERER_BLOB_FD_TYPE_DMABUF; |
||||||
|
break; |
||||||
|
case VIRGL_RESOURCE_FD_OPAQUE: |
||||||
|
import_fd_type = VIRGL_RENDERER_BLOB_FD_TYPE_OPAQUE; |
||||||
|
break; |
||||||
|
case VIRGL_RESOURCE_FD_SHM: |
||||||
|
import_fd_type = VIRGL_RENDERER_BLOB_FD_TYPE_SHM; |
||||||
|
break; |
||||||
|
default: |
||||||
|
import_fd_type = 0; |
||||||
|
break; |
||||||
|
} |
||||||
|
const struct virgl_renderer_resource_import_blob_args import_args = { |
||||||
|
.res_handle = res->res_id, |
||||||
|
.blob_mem = VIRGL_RENDERER_BLOB_MEM_HOST3D, |
||||||
|
.fd_type = import_fd_type, |
||||||
|
.fd = res_fd, |
||||||
|
.size = size, |
||||||
|
}; |
||||||
|
|
||||||
|
int ret = virgl_renderer_resource_import_blob(&import_args); |
||||||
|
if (ret) { |
||||||
|
free(res); |
||||||
|
return NULL; |
||||||
|
} |
||||||
|
|
||||||
|
return res; |
||||||
|
} |
||||||
|
|
||||||
|
void |
||||||
|
render_context_update_timeline(struct render_context *ctx, |
||||||
|
uint32_t ring_idx, |
||||||
|
uint32_t seqno) |
||||||
|
{ |
||||||
|
/* this can be called by the context's main thread and sync threads */ |
||||||
|
atomic_store(&ctx->shmem_timelines[ring_idx], seqno); |
||||||
|
if (ctx->fence_eventfd >= 0) |
||||||
|
write_eventfd(ctx->fence_eventfd, 1); |
||||||
|
} |
||||||
|
|
||||||
|
static struct render_context_resource * |
||||||
|
render_context_find_resource(struct render_context *ctx, uint32_t res_id) |
||||||
|
{ |
||||||
|
list_for_each_entry (struct render_context_resource, res, &ctx->resources, head) { |
||||||
|
if (res->res_id == res_id) |
||||||
|
return res; |
||||||
|
} |
||||||
|
|
||||||
|
return NULL; |
||||||
|
} |
||||||
|
|
||||||
|
static bool |
||||||
|
render_context_init_virgl_context(struct render_context *ctx, |
||||||
|
const struct render_context_op_init_request *req, |
||||||
|
int shmem_fd, |
||||||
|
int fence_eventfd) |
||||||
|
{ |
||||||
|
const int timeline_count = req->shmem_size / sizeof(*ctx->shmem_timelines); |
||||||
|
|
||||||
|
void *shmem_ptr = mmap(NULL, req->shmem_size, PROT_WRITE, MAP_SHARED, shmem_fd, 0); |
||||||
|
if (shmem_ptr == MAP_FAILED) |
||||||
|
return false; |
||||||
|
|
||||||
|
int ret = virgl_renderer_context_create_with_flags(ctx->ctx_id, req->flags, |
||||||
|
ctx->name_len, ctx->name); |
||||||
|
if (ret) { |
||||||
|
munmap(shmem_ptr, req->shmem_size); |
||||||
|
return false; |
||||||
|
} |
||||||
|
|
||||||
|
ctx->shmem_fd = shmem_fd; |
||||||
|
ctx->shmem_size = req->shmem_size; |
||||||
|
ctx->shmem_ptr = shmem_ptr; |
||||||
|
ctx->shmem_timelines = shmem_ptr; |
||||||
|
|
||||||
|
for (int i = 0; i < timeline_count; i++) |
||||||
|
atomic_store(&ctx->shmem_timelines[i], 0); |
||||||
|
|
||||||
|
ctx->timeline_count = timeline_count; |
||||||
|
|
||||||
|
ctx->fence_eventfd = fence_eventfd; |
||||||
|
|
||||||
|
return true; |
||||||
|
} |
||||||
|
|
||||||
|
static bool |
||||||
|
render_context_export_blob(struct render_context *ctx, |
||||||
|
const struct render_context_op_get_blob_request *req, |
||||||
|
enum virgl_resource_fd_type *out_fd_type, |
||||||
|
uint32_t *out_map_info, |
||||||
|
int *out_res_fd) |
||||||
|
{ |
||||||
|
const uint32_t res_id = BLOB_RES_ID; |
||||||
|
const struct virgl_renderer_resource_create_blob_args blob_args = { |
||||||
|
.res_handle = res_id, |
||||||
|
.ctx_id = ctx->ctx_id, |
||||||
|
.blob_mem = VIRGL_RENDERER_BLOB_MEM_HOST3D, |
||||||
|
.blob_flags = req->blob_flags, |
||||||
|
.blob_id = req->blob_id, |
||||||
|
.size = req->blob_size, |
||||||
|
}; |
||||||
|
int ret = virgl_renderer_resource_create_blob(&blob_args); |
||||||
|
if (ret) { |
||||||
|
render_log("failed to create blob resource"); |
||||||
|
return false; |
||||||
|
} |
||||||
|
|
||||||
|
uint32_t map_info; |
||||||
|
virgl_renderer_resource_get_map_info(res_id, &map_info); |
||||||
|
|
||||||
|
uint32_t fd_type; |
||||||
|
int res_fd; |
||||||
|
ret = virgl_renderer_resource_export_blob(res_id, &fd_type, &res_fd); |
||||||
|
virgl_renderer_resource_unref(res_id); |
||||||
|
|
||||||
|
if (ret) |
||||||
|
return false; |
||||||
|
|
||||||
|
switch (fd_type) { |
||||||
|
case VIRGL_RENDERER_BLOB_FD_TYPE_DMABUF: |
||||||
|
*out_fd_type = VIRGL_RESOURCE_FD_DMABUF; |
||||||
|
break; |
||||||
|
case VIRGL_RENDERER_BLOB_FD_TYPE_OPAQUE: |
||||||
|
*out_fd_type = VIRGL_RESOURCE_FD_OPAQUE; |
||||||
|
break; |
||||||
|
case VIRGL_RENDERER_BLOB_FD_TYPE_SHM: |
||||||
|
*out_fd_type = VIRGL_RESOURCE_FD_SHM; |
||||||
|
break; |
||||||
|
default: |
||||||
|
*out_fd_type = 0; |
||||||
|
} |
||||||
|
|
||||||
|
*out_map_info = map_info; |
||||||
|
*out_res_fd = res_fd; |
||||||
|
|
||||||
|
return true; |
||||||
|
} |
||||||
|
|
||||||
|
static bool |
||||||
|
render_context_dispatch_submit_fence(struct render_context *ctx, |
||||||
|
const union render_context_op_request *req, |
||||||
|
UNUSED const int *fds, |
||||||
|
UNUSED int fd_count) |
||||||
|
{ |
||||||
|
/* always merge fences */ |
||||||
|
assert(!(req->submit_fence.flags & ~VIRGL_RENDERER_FENCE_FLAG_MERGEABLE)); |
||||||
|
const uint32_t flags = VIRGL_RENDERER_FENCE_FLAG_MERGEABLE; |
||||||
|
const uint32_t ring_idx = req->submit_fence.ring_index; |
||||||
|
const uint32_t seqno = req->submit_fence.seqno; |
||||||
|
|
||||||
|
assert(ring_idx < (uint32_t)ctx->timeline_count); |
||||||
|
int ret = virgl_renderer_context_create_fence(ctx->ctx_id, flags, ring_idx, |
||||||
|
uintptr_to_pointer(seqno)); |
||||||
|
|
||||||
|
return !ret; |
||||||
|
} |
||||||
|
|
||||||
|
static bool |
||||||
|
render_context_dispatch_submit_cmd(struct render_context *ctx, |
||||||
|
const union render_context_op_request *req, |
||||||
|
UNUSED const int *fds, |
||||||
|
UNUSED int fd_count) |
||||||
|
{ |
||||||
|
const int ndw = req->submit_cmd.size / sizeof(uint32_t); |
||||||
|
void *cmd = (void *)req->submit_cmd.cmd; |
||||||
|
if (req->submit_cmd.size > sizeof(req->submit_cmd.cmd)) { |
||||||
|
cmd = malloc(req->submit_cmd.size); |
||||||
|
if (!cmd) |
||||||
|
return true; |
||||||
|
|
||||||
|
const size_t inlined = sizeof(req->submit_cmd.cmd); |
||||||
|
const size_t remain = req->submit_cmd.size - inlined; |
||||||
|
|
||||||
|
memcpy(cmd, req->submit_cmd.cmd, inlined); |
||||||
|
if (!render_socket_receive_data(&ctx->socket, (char *)cmd + inlined, remain)) { |
||||||
|
free(cmd); |
||||||
|
return false; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
int ret = virgl_renderer_submit_cmd(cmd, ctx->ctx_id, ndw); |
||||||
|
|
||||||
|
if (cmd != req->submit_cmd.cmd) |
||||||
|
free(cmd); |
||||||
|
|
||||||
|
const struct render_context_op_submit_cmd_reply reply = { |
||||||
|
.ok = !ret, |
||||||
|
}; |
||||||
|
if (!render_socket_send_reply(&ctx->socket, &reply, sizeof(reply))) |
||||||
|
return false; |
||||||
|
|
||||||
|
return true; |
||||||
|
} |
||||||
|
|
||||||
|
static bool |
||||||
|
render_context_dispatch_get_blob(struct render_context *ctx, |
||||||
|
const union render_context_op_request *req, |
||||||
|
UNUSED const int *fds, |
||||||
|
UNUSED int fd_count) |
||||||
|
{ |
||||||
|
struct render_context_op_get_blob_reply reply = { |
||||||
|
.fd_type = VIRGL_RESOURCE_FD_INVALID, |
||||||
|
}; |
||||||
|
int res_fd; |
||||||
|
bool ok = render_context_export_blob(ctx, &req->get_blob, &reply.fd_type, |
||||||
|
&reply.map_info, &res_fd); |
||||||
|
if (!ok) |
||||||
|
return render_socket_send_reply(&ctx->socket, &reply, sizeof(reply)); |
||||||
|
|
||||||
|
ok = |
||||||
|
render_socket_send_reply_with_fds(&ctx->socket, &reply, sizeof(reply), &res_fd, 1); |
||||||
|
close(res_fd); |
||||||
|
|
||||||
|
return ok; |
||||||
|
} |
||||||
|
|
||||||
|
static bool |
||||||
|
render_context_dispatch_detach_resource(struct render_context *ctx, |
||||||
|
const union render_context_op_request *req, |
||||||
|
UNUSED const int *fds, |
||||||
|
UNUSED int fd_count) |
||||||
|
{ |
||||||
|
const uint32_t res_id = req->detach_resource.res_id; |
||||||
|
|
||||||
|
struct render_context_resource *res = render_context_find_resource(ctx, res_id); |
||||||
|
if (res) |
||||||
|
render_context_resource_destroy(res); |
||||||
|
|
||||||
|
return true; |
||||||
|
} |
||||||
|
|
||||||
|
static bool |
||||||
|
render_context_dispatch_attach_resource(struct render_context *ctx, |
||||||
|
const union render_context_op_request *req, |
||||||
|
const int *fds, |
||||||
|
int fd_count) |
||||||
|
{ |
||||||
|
const uint32_t res_id = req->attach_resource.res_id; |
||||||
|
const enum virgl_resource_fd_type fd_type = req->attach_resource.fd_type; |
||||||
|
const uint64_t size = req->attach_resource.size; |
||||||
|
|
||||||
|
if (res_id == BLOB_RES_ID) { |
||||||
|
render_log("XXX res_id is %u, which is reserved for blob export", res_id); |
||||||
|
return false; |
||||||
|
} |
||||||
|
if (fd_type == VIRGL_RESOURCE_FD_INVALID || !size || fd_count != 1) { |
||||||
|
render_log("failed to attach invalid resource %d", res_id); |
||||||
|
return false; |
||||||
|
} |
||||||
|
|
||||||
|
struct render_context_resource *res = |
||||||
|
render_context_resource_import(res_id, fd_type, fds[0], size); |
||||||
|
if (!res) { |
||||||
|
render_log("failed to import resource %d", res_id); |
||||||
|
return false; |
||||||
|
} |
||||||
|
|
||||||
|
list_addtail(&res->head, &ctx->resources); |
||||||
|
virgl_renderer_ctx_attach_resource(ctx->ctx_id, res_id); |
||||||
|
|
||||||
|
return true; |
||||||
|
} |
||||||
|
|
||||||
|
static bool |
||||||
|
render_context_dispatch_init(struct render_context *ctx, |
||||||
|
const union render_context_op_request *req, |
||||||
|
const int *fds, |
||||||
|
int fd_count) |
||||||
|
{ |
||||||
|
if (fd_count != 1 && fd_count != 2) |
||||||
|
return false; |
||||||
|
|
||||||
|
const int shmem_fd = fds[0]; |
||||||
|
const int fence_eventfd = fd_count == 2 ? fds[1] : -1; |
||||||
|
return render_context_init_virgl_context(ctx, &req->init, shmem_fd, fence_eventfd); |
||||||
|
} |
||||||
|
|
||||||
|
static bool |
||||||
|
render_context_dispatch_nop(UNUSED struct render_context *ctx, |
||||||
|
UNUSED const union render_context_op_request *req, |
||||||
|
UNUSED const int *fds, |
||||||
|
UNUSED int fd_count) |
||||||
|
{ |
||||||
|
return true; |
||||||
|
} |
||||||
|
|
||||||
|
struct render_context_dispatch_entry { |
||||||
|
size_t expect_size; |
||||||
|
int max_fd_count; |
||||||
|
bool (*dispatch)(struct render_context *ctx, |
||||||
|
const union render_context_op_request *req, |
||||||
|
const int *fds, |
||||||
|
int fd_count); |
||||||
|
}; |
||||||
|
|
||||||
|
static const struct render_context_dispatch_entry |
||||||
|
render_context_dispatch_table[RENDER_CONTEXT_OP_COUNT] = { |
||||||
|
#define RENDER_CONTEXT_DISPATCH(NAME, name, max_fd) \ |
||||||
|
[RENDER_CONTEXT_OP_## \
|
||||||
|
NAME] = { .expect_size = sizeof(struct render_context_op_##name##_request), \
|
||||||
|
.max_fd_count = (max_fd), \
|
||||||
|
.dispatch = render_context_dispatch_##name } |
||||||
|
RENDER_CONTEXT_DISPATCH(NOP, nop, 0), |
||||||
|
RENDER_CONTEXT_DISPATCH(INIT, init, 2), |
||||||
|
RENDER_CONTEXT_DISPATCH(ATTACH_RESOURCE, attach_resource, 1), |
||||||
|
RENDER_CONTEXT_DISPATCH(DETACH_RESOURCE, detach_resource, 0), |
||||||
|
RENDER_CONTEXT_DISPATCH(GET_BLOB, get_blob, 0), |
||||||
|
RENDER_CONTEXT_DISPATCH(SUBMIT_CMD, submit_cmd, 0), |
||||||
|
RENDER_CONTEXT_DISPATCH(SUBMIT_FENCE, submit_fence, 0), |
||||||
|
#undef RENDER_CONTEXT_DISPATCH |
||||||
|
}; |
||||||
|
|
||||||
|
static bool |
||||||
|
render_context_dispatch(struct render_context *ctx) |
||||||
|
{ |
||||||
|
union render_context_op_request req; |
||||||
|
size_t req_size; |
||||||
|
int req_fds[8]; |
||||||
|
int req_fd_count; |
||||||
|
if (!render_socket_receive_request_with_fds(&ctx->socket, &req, sizeof(req), &req_size, |
||||||
|
req_fds, ARRAY_SIZE(req_fds), |
||||||
|
&req_fd_count)) |
||||||
|
return false; |
||||||
|
|
||||||
|
assert((unsigned int)req_fd_count <= ARRAY_SIZE(req_fds)); |
||||||
|
|
||||||
|
if (req.header.op >= RENDER_CONTEXT_OP_COUNT) { |
||||||
|
render_log("invalid context op %d", req.header.op); |
||||||
|
goto fail; |
||||||
|
} |
||||||
|
|
||||||
|
const struct render_context_dispatch_entry *entry = |
||||||
|
&render_context_dispatch_table[req.header.op]; |
||||||
|
if (entry->expect_size != req_size || entry->max_fd_count < req_fd_count) { |
||||||
|
render_log("invalid request size (%zu) or fd count (%d) for context op %d", |
||||||
|
req_size, req_fd_count, req.header.op); |
||||||
|
goto fail; |
||||||
|
} |
||||||
|
|
||||||
|
render_virgl_lock_dispatch(); |
||||||
|
const bool ok = entry->dispatch(ctx, &req, req_fds, req_fd_count); |
||||||
|
render_virgl_unlock_dispatch(); |
||||||
|
if (!ok) { |
||||||
|
render_log("failed to dispatch context op %d", req.header.op); |
||||||
|
goto fail; |
||||||
|
} |
||||||
|
|
||||||
|
return true; |
||||||
|
|
||||||
|
fail: |
||||||
|
for (int i = 0; i < req_fd_count; i++) |
||||||
|
close(req_fds[i]); |
||||||
|
return false; |
||||||
|
} |
||||||
|
|
||||||
|
static bool |
||||||
|
render_context_run(struct render_context *ctx) |
||||||
|
{ |
||||||
|
while (true) { |
||||||
|
if (!render_context_dispatch(ctx)) |
||||||
|
return false; |
||||||
|
} |
||||||
|
|
||||||
|
return true; |
||||||
|
} |
||||||
|
|
||||||
|
static void |
||||||
|
render_context_fini(struct render_context *ctx) |
||||||
|
{ |
||||||
|
render_virgl_lock_dispatch(); |
||||||
|
|
||||||
|
/* destroy the context first to join its sync threads and ring threads */ |
||||||
|
virgl_renderer_context_destroy(ctx->ctx_id); |
||||||
|
|
||||||
|
list_for_each_entry_safe (struct render_context_resource, res, &ctx->resources, head) |
||||||
|
render_context_resource_destroy(res); |
||||||
|
|
||||||
|
render_virgl_unlock_dispatch(); |
||||||
|
|
||||||
|
render_virgl_remove_context(ctx); |
||||||
|
|
||||||
|
if (ctx->shmem_ptr) |
||||||
|
munmap(ctx->shmem_ptr, ctx->shmem_size); |
||||||
|
if (ctx->shmem_fd >= 0) |
||||||
|
close(ctx->shmem_fd); |
||||||
|
|
||||||
|
if (ctx->fence_eventfd >= 0) |
||||||
|
close(ctx->fence_eventfd); |
||||||
|
|
||||||
|
if (ctx->name) |
||||||
|
free(ctx->name); |
||||||
|
|
||||||
|
render_socket_fini(&ctx->socket); |
||||||
|
} |
||||||
|
|
||||||
|
static bool |
||||||
|
render_context_init_name(struct render_context *ctx, |
||||||
|
uint32_t ctx_id, |
||||||
|
const char *ctx_name) |
||||||
|
{ |
||||||
|
const size_t name_size = strlen(ctx_name) + 16; |
||||||
|
ctx->name = malloc(name_size); |
||||||
|
if (!ctx->name) |
||||||
|
return false; |
||||||
|
|
||||||
|
ctx->name_len = snprintf(ctx->name, name_size, "virgl-%d-%s", ctx_id, ctx_name); |
||||||
|
if (ctx->name_len >= name_size) |
||||||
|
ctx->name_len = name_size - 1; |
||||||
|
|
||||||
|
u_thread_setname(ctx->name); |
||||||
|
|
||||||
|
return true; |
||||||
|
} |
||||||
|
|
||||||
|
static bool |
||||||
|
render_context_init(struct render_context *ctx, const struct render_context_args *args) |
||||||
|
{ |
||||||
|
memset(ctx, 0, sizeof(*ctx)); |
||||||
|
ctx->ctx_id = args->ctx_id; |
||||||
|
render_socket_init(&ctx->socket, args->ctx_fd); |
||||||
|
list_inithead(&ctx->resources); |
||||||
|
ctx->shmem_fd = -1; |
||||||
|
ctx->fence_eventfd = -1; |
||||||
|
|
||||||
|
if (!render_context_init_name(ctx, args->ctx_id, args->ctx_name)) |
||||||
|
return false; |
||||||
|
|
||||||
|
render_virgl_add_context(ctx); |
||||||
|
|
||||||
|
return true; |
||||||
|
} |
||||||
|
|
||||||
|
bool |
||||||
|
render_context_main(const struct render_context_args *args) |
||||||
|
{ |
||||||
|
struct render_context ctx; |
||||||
|
|
||||||
|
assert(args->valid && args->ctx_id && args->ctx_fd >= 0); |
||||||
|
|
||||||
|
if (!render_virgl_init(args->init_flags)) { |
||||||
|
close(args->ctx_fd); |
||||||
|
return false; |
||||||
|
} |
||||||
|
|
||||||
|
if (!render_context_init(&ctx, args)) { |
||||||
|
render_virgl_fini(); |
||||||
|
close(args->ctx_fd); |
||||||
|
return false; |
||||||
|
} |
||||||
|
|
||||||
|
const bool ok = render_context_run(&ctx); |
||||||
|
render_context_fini(&ctx); |
||||||
|
|
||||||
|
render_virgl_fini(); |
||||||
|
|
||||||
|
return ok; |
||||||
|
} |
@ -0,0 +1,54 @@ |
|||||||
|
/*
|
||||||
|
* Copyright 2021 Google LLC |
||||||
|
* SPDX-License-Identifier: MIT |
||||||
|
*/ |
||||||
|
|
||||||
|
#ifndef RENDER_CONTEXT_H |
||||||
|
#define RENDER_CONTEXT_H |
||||||
|
|
||||||
|
#include "render_common.h" |
||||||
|
|
||||||
|
#include <stdatomic.h> |
||||||
|
|
||||||
|
struct render_context { |
||||||
|
uint32_t ctx_id; |
||||||
|
struct render_socket socket; |
||||||
|
struct list_head head; |
||||||
|
|
||||||
|
char *name; |
||||||
|
size_t name_len; |
||||||
|
|
||||||
|
struct list_head resources; |
||||||
|
|
||||||
|
int shmem_fd; |
||||||
|
size_t shmem_size; |
||||||
|
void *shmem_ptr; |
||||||
|
atomic_uint *shmem_timelines; |
||||||
|
|
||||||
|
int timeline_count; |
||||||
|
|
||||||
|
/* optional */ |
||||||
|
int fence_eventfd; |
||||||
|
}; |
||||||
|
|
||||||
|
struct render_context_args { |
||||||
|
bool valid; |
||||||
|
|
||||||
|
uint32_t init_flags; |
||||||
|
|
||||||
|
uint32_t ctx_id; |
||||||
|
char ctx_name[32]; |
||||||
|
|
||||||
|
/* render_context_main always takes ownership even on errors */ |
||||||
|
int ctx_fd; |
||||||
|
}; |
||||||
|
|
||||||
|
bool |
||||||
|
render_context_main(const struct render_context_args *args); |
||||||
|
|
||||||
|
void |
||||||
|
render_context_update_timeline(struct render_context *ctx, |
||||||
|
uint32_t ring_idx, |
||||||
|
uint32_t val); |
||||||
|
|
||||||
|
#endif /* RENDER_CONTEXT_H */ |
@ -0,0 +1,218 @@ |
|||||||
|
/*
|
||||||
|
* Copyright 2021 Google LLC |
||||||
|
* SPDX-License-Identifier: MIT |
||||||
|
*/ |
||||||
|
|
||||||
|
#ifndef RENDER_PROTOCOL_H |
||||||
|
#define RENDER_PROTOCOL_H |
||||||
|
|
||||||
|
#include <stdint.h> |
||||||
|
|
||||||
|
#include "virgl_resource.h" |
||||||
|
#include "virglrenderer.h" |
||||||
|
#include "virglrenderer_hw.h" |
||||||
|
|
||||||
|
/* this covers the command line options and the socket type */ |
||||||
|
#define RENDER_SERVER_VERSION 0 |
||||||
|
|
||||||
|
/* The protocol itself is internal to virglrenderer. There is no backward
|
||||||
|
* compatibility to be kept. |
||||||
|
*/ |
||||||
|
|
||||||
|
/* client ops, which are handled by the server process */ |
||||||
|
enum render_client_op { |
||||||
|
RENDER_CLIENT_OP_NOP = 0, |
||||||
|
RENDER_CLIENT_OP_INIT, |
||||||
|
RENDER_CLIENT_OP_RESET, |
||||||
|
RENDER_CLIENT_OP_CREATE_CONTEXT, |
||||||
|
RENDER_CLIENT_OP_DESTROY_CONTEXT, |
||||||
|
|
||||||
|
RENDER_CLIENT_OP_COUNT, |
||||||
|
}; |
||||||
|
|
||||||
|
/* context ops, which are handled by workers (subprocesses or threads) created
|
||||||
|
* by the server process |
||||||
|
*/ |
||||||
|
enum render_context_op { |
||||||
|
RENDER_CONTEXT_OP_NOP = 0, |
||||||
|
RENDER_CONTEXT_OP_INIT, |
||||||
|
RENDER_CONTEXT_OP_ATTACH_RESOURCE, |
||||||
|
RENDER_CONTEXT_OP_DETACH_RESOURCE, |
||||||
|
RENDER_CONTEXT_OP_GET_BLOB, |
||||||
|
RENDER_CONTEXT_OP_SUBMIT_CMD, |
||||||
|
RENDER_CONTEXT_OP_SUBMIT_FENCE, |
||||||
|
|
||||||
|
RENDER_CONTEXT_OP_COUNT, |
||||||
|
}; |
||||||
|
|
||||||
|
struct render_client_op_header { |
||||||
|
enum render_client_op op; |
||||||
|
}; |
||||||
|
|
||||||
|
struct render_client_op_nop_request { |
||||||
|
struct render_client_op_header header; |
||||||
|
}; |
||||||
|
|
||||||
|
/* Initialize virglrenderer.
|
||||||
|
* |
||||||
|
* This roughly corresponds to virgl_renderer_init. |
||||||
|
*/ |
||||||
|
struct render_client_op_init_request { |
||||||
|
struct render_client_op_header header; |
||||||
|
uint32_t flags; /* VIRGL_RENDERER_USE_* and others */ |
||||||
|
}; |
||||||
|
|
||||||
|
/* Remove all contexts.
|
||||||
|
* |
||||||
|
* This roughly corresponds to virgl_renderer_reset. |
||||||
|
*/ |
||||||
|
struct render_client_op_reset_request { |
||||||
|
struct render_client_op_header header; |
||||||
|
}; |
||||||
|
|
||||||
|
/* Create a context, which will be serviced by a worker.
|
||||||
|
* |
||||||
|
* See also the comment before main() for the process model. |
||||||
|
* |
||||||
|
* This roughly corresponds to virgl_renderer_context_create_with_flags. |
||||||
|
*/ |
||||||
|
struct render_client_op_create_context_request { |
||||||
|
struct render_client_op_header header; |
||||||
|
uint32_t ctx_id; |
||||||
|
char ctx_name[32]; |
||||||
|
}; |
||||||
|
|
||||||
|
struct render_client_op_create_context_reply { |
||||||
|
bool ok; |
||||||
|
/* followed by 1 socket fd if ok */ |
||||||
|
}; |
||||||
|
|
||||||
|
/* Destroy a context, including the worker.
|
||||||
|
* |
||||||
|
* This roughly corresponds to virgl_renderer_context_destroy. |
||||||
|
*/ |
||||||
|
struct render_client_op_destroy_context_request { |
||||||
|
struct render_client_op_header header; |
||||||
|
uint32_t ctx_id; |
||||||
|
}; |
||||||
|
|
||||||
|
union render_client_op_request { |
||||||
|
struct render_client_op_header header; |
||||||
|
struct render_client_op_nop_request nop; |
||||||
|
struct render_client_op_init_request init; |
||||||
|
struct render_client_op_reset_request reset; |
||||||
|
struct render_client_op_create_context_request create_context; |
||||||
|
struct render_client_op_destroy_context_request destroy_context; |
||||||
|
}; |
||||||
|
|
||||||
|
struct render_context_op_header { |
||||||
|
enum render_context_op op; |
||||||
|
}; |
||||||
|
|
||||||
|
struct render_context_op_nop_request { |
||||||
|
struct render_context_op_header header; |
||||||
|
}; |
||||||
|
|
||||||
|
/* Initialize the context.
|
||||||
|
* |
||||||
|
* The shmem is required and currently holds an array of atomic_uint. Each |
||||||
|
* atomic_uint represents the current sequence number of a ring (as defined by |
||||||
|
* the virtio-gpu spec). |
||||||
|
* |
||||||
|
* The eventfd is optional. When given, it will be written to when there are |
||||||
|
* changes to any of the sequence numbers. |
||||||
|
* |
||||||
|
* This roughly corresponds to virgl_renderer_context_create_with_flags. |
||||||
|
*/ |
||||||
|
struct render_context_op_init_request { |
||||||
|
struct render_context_op_header header; |
||||||
|
uint32_t flags; /* VIRGL_RENDERER_CONTEXT_FLAG_*/ |
||||||
|
size_t shmem_size; |
||||||
|
/* followed by 1 shmem fd and optionally 1 eventfd */ |
||||||
|
}; |
||||||
|
|
||||||
|
/* Attach a resource to the context.
|
||||||
|
* |
||||||
|
* This roughly corresponds to virgl_renderer_ctx_attach_resource. |
||||||
|
*/ |
||||||
|
struct render_context_op_attach_resource_request { |
||||||
|
struct render_context_op_header header; |
||||||
|
uint32_t res_id; |
||||||
|
enum virgl_resource_fd_type fd_type; |
||||||
|
uint64_t size; |
||||||
|
/* followed by 1 fd */ |
||||||
|
}; |
||||||
|
|
||||||
|
/* Detach a resource from the context.
|
||||||
|
* |
||||||
|
* This roughly corresponds to virgl_renderer_ctx_detach_resource. |
||||||
|
*/ |
||||||
|
struct render_context_op_detach_resource_request { |
||||||
|
struct render_context_op_header header; |
||||||
|
uint32_t res_id; |
||||||
|
}; |
||||||
|
|
||||||
|
/* Export a blob from the context.
|
||||||
|
* |
||||||
|
* This roughly corresponds to virgl_renderer_resource_create_blob. |
||||||
|
*/ |
||||||
|
struct render_context_op_get_blob_request { |
||||||
|
struct render_context_op_header header; |
||||||
|
uint64_t blob_id; |
||||||
|
uint64_t blob_size; |
||||||
|
uint32_t blob_flags; /* VIRGL_RENDERER_BLOB_FLAG_* */ |
||||||
|
}; |
||||||
|
|
||||||
|
struct render_context_op_get_blob_reply { |
||||||
|
enum virgl_resource_fd_type fd_type; |
||||||
|
uint32_t map_info; /* VIRGL_RENDERER_MAP_* */ |
||||||
|
/* followed by 1 fd if not VIRGL_RESOURCE_FD_INVALID */ |
||||||
|
}; |
||||||
|
|
||||||
|
/* Submit a small command stream to the context.
|
||||||
|
* |
||||||
|
* The size limit depends on the socket type. Currently, SOCK_SEQPACKET is |
||||||
|
* used and the size limit is best treated as one page. |
||||||
|
* |
||||||
|
* This roughly corresponds to virgl_renderer_submit_cmd. |
||||||
|
*/ |
||||||
|
struct render_context_op_submit_cmd_request { |
||||||
|
struct render_context_op_header header; |
||||||
|
size_t size; |
||||||
|
char cmd[256]; |
||||||
|
/* if size > sizeof(cmd), followed by (size - sizeof(cmd)) bytes in another
|
||||||
|
* message; size still must be small |
||||||
|
*/ |
||||||
|
}; |
||||||
|
|
||||||
|
struct render_context_op_submit_cmd_reply { |
||||||
|
bool ok; |
||||||
|
}; |
||||||
|
|
||||||
|
/* Submit a fence to the context.
|
||||||
|
* |
||||||
|
* This submits a fence to the specified ring. When the fence signals, the |
||||||
|
* current sequence number of the ring in the shmem is updated. |
||||||
|
* |
||||||
|
* This roughly corresponds to virgl_renderer_context_create_fence. |
||||||
|
*/ |
||||||
|
struct render_context_op_submit_fence_request { |
||||||
|
struct render_context_op_header header; |
||||||
|
uint32_t flags; /* VIRGL_RENDERER_FENCE_FLAG_* */ |
||||||
|
/* TODO fix virgl_renderer_context_create_fence to use ring_index */ |
||||||
|
uint32_t ring_index; |
||||||
|
uint32_t seqno; |
||||||
|
}; |
||||||
|
|
||||||
|
union render_context_op_request { |
||||||
|
struct render_context_op_header header; |
||||||
|
struct render_context_op_nop_request nop; |
||||||
|
struct render_context_op_init_request init; |
||||||
|
struct render_context_op_attach_resource_request attach_resource; |
||||||
|
struct render_context_op_detach_resource_request detach_resource; |
||||||
|
struct render_context_op_get_blob_request get_blob; |
||||||
|
struct render_context_op_submit_cmd_request submit_cmd; |
||||||
|
struct render_context_op_submit_fence_request submit_fence; |
||||||
|
}; |
||||||
|
|
||||||
|
#endif /* RENDER_PROTOCOL_H */ |
@ -0,0 +1,163 @@ |
|||||||
|
/*
|
||||||
|
* Copyright 2021 Google LLC |
||||||
|
* SPDX-License-Identifier: MIT |
||||||
|
*/ |
||||||
|
|
||||||
|
#include "render_server.h" |
||||||
|
|
||||||
|
#include <getopt.h> |
||||||
|
#include <unistd.h> |
||||||
|
|
||||||
|
#include "render_client.h" |
||||||
|
#include "render_worker.h" |
||||||
|
|
||||||
|
#define RENDER_SERVER_MAX_WORKER_COUNT 256 |
||||||
|
|
||||||
|
static bool |
||||||
|
render_server_run(struct render_server *srv) |
||||||
|
{ |
||||||
|
while (srv->state == RENDER_SERVER_STATE_RUN) { |
||||||
|
/* TODO handle SIGCHLD */ |
||||||
|
struct render_client *client = srv->client; |
||||||
|
if (!render_client_dispatch(client)) |
||||||
|
return false; |
||||||
|
} |
||||||
|
|
||||||
|
return true; |
||||||
|
} |
||||||
|
|
||||||
|
static void |
||||||
|
render_server_fini(struct render_server *srv) |
||||||
|
{ |
||||||
|
if (srv->client) |
||||||
|
render_client_destroy(srv->client); |
||||||
|
assert(srv->current_worker_count == 0); |
||||||
|
|
||||||
|
if (srv->worker_jail) |
||||||
|
render_worker_jail_destroy(srv->worker_jail); |
||||||
|
|
||||||
|
if (srv->client_fd >= 0) |
||||||
|
close(srv->client_fd); |
||||||
|
} |
||||||
|
|
||||||
|
static bool |
||||||
|
render_server_parse_options(struct render_server *srv, int argc, char **argv) |
||||||
|
{ |
||||||
|
enum { |
||||||
|
OPT_SOCKET_FD = 'a', |
||||||
|
OPT_WORKER_SECCOMP_BPF, |
||||||
|
OPT_WORKER_SECCOMP_MINIJAIL_POLICY, |
||||||
|
OPT_WORKER_SECCOMP_MINIJAIL_LOG, |
||||||
|
OPT_COUNT, |
||||||
|
}; |
||||||
|
static const struct option options[] = { |
||||||
|
{ "socket-fd", required_argument, NULL, OPT_SOCKET_FD }, |
||||||
|
{ "worker-seccomp-bpf", required_argument, NULL, OPT_WORKER_SECCOMP_BPF }, |
||||||
|
{ "worker-seccomp-minijail-policy", required_argument, NULL, |
||||||
|
OPT_WORKER_SECCOMP_MINIJAIL_POLICY }, |
||||||
|
{ "worker-seccomp-minijail-log", no_argument, NULL, |
||||||
|
OPT_WORKER_SECCOMP_MINIJAIL_LOG }, |
||||||
|
{ NULL, 0, NULL, 0 } |
||||||
|
}; |
||||||
|
static_assert(OPT_COUNT <= 'z', ""); |
||||||
|
|
||||||
|
while (true) { |
||||||
|
const int ret = getopt_long(argc, argv, "", options, NULL); |
||||||
|
if (ret == -1) |
||||||
|
break; |
||||||
|
|
||||||
|
switch (ret) { |
||||||
|
case OPT_SOCKET_FD: |
||||||
|
srv->client_fd = atoi(optarg); |
||||||
|
break; |
||||||
|
case OPT_WORKER_SECCOMP_BPF: |
||||||
|
srv->worker_seccomp_bpf = optarg; |
||||||
|
break; |
||||||
|
case OPT_WORKER_SECCOMP_MINIJAIL_POLICY: |
||||||
|
srv->worker_seccomp_minijail_policy = optarg; |
||||||
|
break; |
||||||
|
case OPT_WORKER_SECCOMP_MINIJAIL_LOG: |
||||||
|
srv->worker_seccomp_minijail_log = true; |
||||||
|
break; |
||||||
|
default: |
||||||
|
render_log("unknown option specified"); |
||||||
|
return false; |
||||||
|
break; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
if (optind < argc) { |
||||||
|
render_log("non-option arguments specified"); |
||||||
|
return false; |
||||||
|
} |
||||||
|
|
||||||
|
if (srv->client_fd < 0 || !render_socket_is_seqpacket(srv->client_fd)) { |
||||||
|
render_log("no valid client fd specified"); |
||||||
|
return false; |
||||||
|
} |
||||||
|
|
||||||
|
return true; |
||||||
|
} |
||||||
|
|
||||||
|
static bool |
||||||
|
render_server_init(struct render_server *srv, |
||||||
|
int argc, |
||||||
|
char **argv, |
||||||
|
struct render_context_args *ctx_args) |
||||||
|
{ |
||||||
|
memset(srv, 0, sizeof(*srv)); |
||||||
|
srv->state = RENDER_SERVER_STATE_RUN; |
||||||
|
srv->context_args = ctx_args; |
||||||
|
srv->client_fd = -1; |
||||||
|
srv->max_worker_count = RENDER_SERVER_MAX_WORKER_COUNT; |
||||||
|
|
||||||
|
if (!render_server_parse_options(srv, argc, argv)) |
||||||
|
return false; |
||||||
|
|
||||||
|
enum render_worker_jail_seccomp_filter seccomp_filter = |
||||||
|
RENDER_WORKER_JAIL_SECCOMP_NONE; |
||||||
|
const char *seccomp_path = NULL; |
||||||
|
if (srv->worker_seccomp_minijail_log && srv->worker_seccomp_minijail_policy) { |
||||||
|
seccomp_filter = RENDER_WORKER_JAIL_SECCOMP_MINIJAIL_POLICY_LOG; |
||||||
|
seccomp_path = srv->worker_seccomp_minijail_policy; |
||||||
|
} else if (srv->worker_seccomp_bpf) { |
||||||
|
seccomp_filter = RENDER_WORKER_JAIL_SECCOMP_BPF; |
||||||
|
seccomp_path = srv->worker_seccomp_bpf; |
||||||
|
} else if (srv->worker_seccomp_minijail_policy) { |
||||||
|
seccomp_filter = RENDER_WORKER_JAIL_SECCOMP_MINIJAIL_POLICY; |
||||||
|
seccomp_path = srv->worker_seccomp_minijail_policy; |
||||||
|
} |
||||||
|
|
||||||
|
srv->worker_jail = render_worker_jail_create(seccomp_filter, seccomp_path); |
||||||
|
if (!srv->worker_jail) { |
||||||
|
render_log("failed to create worker jail"); |
||||||
|
goto fail; |
||||||
|
} |
||||||
|
|
||||||
|
srv->client = render_client_create(srv, srv->client_fd); |
||||||
|
if (!srv->client) { |
||||||
|
render_log("failed to create client"); |
||||||
|
goto fail; |
||||||
|
} |
||||||
|
/* ownership transferred */ |
||||||
|
srv->client_fd = -1; |
||||||
|
|
||||||
|
return true; |
||||||
|
|
||||||
|
fail: |
||||||
|
render_server_fini(srv); |
||||||
|
return false; |
||||||
|
} |
||||||
|
|
||||||
|
bool |
||||||
|
render_server_main(int argc, char **argv, struct render_context_args *ctx_args) |
||||||
|
{ |
||||||
|
struct render_server srv; |
||||||
|
if (!render_server_init(&srv, argc, argv, ctx_args)) |
||||||
|
return false; |
||||||
|
|
||||||
|
const bool ok = render_server_run(&srv); |
||||||
|
render_server_fini(&srv); |
||||||
|
|
||||||
|
return ok; |
||||||
|
} |
@ -0,0 +1,39 @@ |
|||||||
|
/*
|
||||||
|
* Copyright 2021 Google LLC |
||||||
|
* SPDX-License-Identifier: MIT |
||||||
|
*/ |
||||||
|
|
||||||
|
#ifndef RENDER_SERVER_H |
||||||
|
#define RENDER_SERVER_H |
||||||
|
|
||||||
|
#include "render_common.h" |
||||||
|
|
||||||
|
enum render_server_state { |
||||||
|
RENDER_SERVER_STATE_RUN, |
||||||
|
RENDER_SERVER_STATE_SUBPROCESS, |
||||||
|
}; |
||||||
|
|
||||||
|
struct render_server { |
||||||
|
enum render_server_state state; |
||||||
|
|
||||||
|
/* only initialized in subprocesses */ |
||||||
|
struct render_context_args *context_args; |
||||||
|
|
||||||
|
/* options */ |
||||||
|
int client_fd; |
||||||
|
const char *worker_seccomp_bpf; |
||||||
|
const char *worker_seccomp_minijail_policy; |
||||||
|
bool worker_seccomp_minijail_log; |
||||||
|
|
||||||
|
struct render_worker_jail *worker_jail; |
||||||
|
int max_worker_count; |
||||||
|
int current_worker_count; |
||||||
|
|
||||||
|
/* only one client in the current design */ |
||||||
|
struct render_client *client; |
||||||
|
}; |
||||||
|
|
||||||
|
bool |
||||||
|
render_server_main(int argc, char **argv, struct render_context_args *ctx_args); |
||||||
|
|
||||||
|
#endif /* RENDER_SERVER_H */ |
@ -0,0 +1,262 @@ |
|||||||
|
/*
|
||||||
|
* Copyright 2021 Google LLC |
||||||
|
* SPDX-License-Identifier: MIT |
||||||
|
*/ |
||||||
|
|
||||||
|
#include "render_socket.h" |
||||||
|
|
||||||
|
#include <errno.h> |
||||||
|
#include <sys/socket.h> |
||||||
|
#include <sys/types.h> |
||||||
|
#include <unistd.h> |
||||||
|
|
||||||
|
#define RENDER_SOCKET_MAX_FD_COUNT 8 |
||||||
|
|
||||||
|
/* The socket pair between the server process and the client process is set up
|
||||||
|
* by the client process (or yet another process). Because render_server_run |
||||||
|
* does not poll yet, the fd is expected to be blocking. |
||||||
|
* |
||||||
|
* We also expect the fd to be always valid. If the client process dies, the |
||||||
|
* fd becomes invalid and is considered a fatal error. |
||||||
|
* |
||||||
|
* There is also a socket pair between each context worker and the client |
||||||
|
* process. The pair is set up by render_socket_pair here. |
||||||
|
* |
||||||
|
* The fd is also expected to be blocking. When the client process closes its |
||||||
|
* end of the socket pair, the context worker terminates. |
||||||
|
*/ |
||||||
|
bool |
||||||
|
render_socket_pair(int out_fds[static 2]) |
||||||
|
{ |
||||||
|
int ret = socketpair(AF_UNIX, SOCK_SEQPACKET | SOCK_CLOEXEC, 0, out_fds); |
||||||
|
if (ret) { |
||||||
|
render_log("failed to create socket pair"); |
||||||
|
return false; |
||||||
|
} |
||||||
|
|
||||||
|
return true; |
||||||
|
} |
||||||
|
|
||||||
|
bool |
||||||
|
render_socket_is_seqpacket(int fd) |
||||||
|
{ |
||||||
|
int type; |
||||||
|
socklen_t len = sizeof(type); |
||||||
|
if (getsockopt(fd, SOL_SOCKET, SO_TYPE, &type, &len)) |
||||||
|
return false; |
||||||
|
return type == SOCK_SEQPACKET; |
||||||
|
} |
||||||
|
|
||||||
|
void |
||||||
|
render_socket_init(struct render_socket *socket, int fd) |
||||||
|
{ |
||||||
|
assert(fd >= 0); |
||||||
|
*socket = (struct render_socket){ |
||||||
|
.fd = fd, |
||||||
|
}; |
||||||
|
} |
||||||
|
|
||||||
|
void |
||||||
|
render_socket_fini(struct render_socket *socket) |
||||||
|
{ |
||||||
|
close(socket->fd); |
||||||
|
} |
||||||
|
|
||||||
|
static const int * |
||||||
|
get_received_fds(const struct msghdr *msg, int *out_count) |
||||||
|
{ |
||||||
|
const struct cmsghdr *cmsg = CMSG_FIRSTHDR(msg); |
||||||
|
if (unlikely(!cmsg || cmsg->cmsg_level != SOL_SOCKET || |
||||||
|
cmsg->cmsg_type != SCM_RIGHTS || cmsg->cmsg_len < CMSG_LEN(0))) { |
||||||
|
*out_count = 0; |
||||||
|
return NULL; |
||||||
|
} |
||||||
|
|
||||||
|
*out_count = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(int); |
||||||
|
return (const int *)CMSG_DATA(cmsg); |
||||||
|
} |
||||||
|
|
||||||
|
static bool |
||||||
|
render_socket_recvmsg(struct render_socket *socket, struct msghdr *msg, size_t *out_size) |
||||||
|
{ |
||||||
|
do { |
||||||
|
const ssize_t s = recvmsg(socket->fd, msg, MSG_CMSG_CLOEXEC); |
||||||
|
if (unlikely(s <= 0)) { |
||||||
|
if (!s) |
||||||
|
return false; |
||||||
|
|
||||||
|
if (errno == EAGAIN || errno == EINTR) |
||||||
|
continue; |
||||||
|
|
||||||
|
render_log("failed to receive message: %s", strerror(errno)); |
||||||
|
return false; |
||||||
|
} |
||||||
|
|
||||||
|
if (unlikely(msg->msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { |
||||||
|
render_log("failed to receive message: truncated"); |
||||||
|
|
||||||
|
int fd_count; |
||||||
|
const int *fds = get_received_fds(msg, &fd_count); |
||||||
|
for (int i = 0; i < fd_count; i++) |
||||||
|
close(fds[i]); |
||||||
|
|
||||||
|
return false; |
||||||
|
} |
||||||
|
|
||||||
|
*out_size = s; |
||||||
|
return true; |
||||||
|
} while (true); |
||||||
|
} |
||||||
|
|
||||||
|
static bool |
||||||
|
render_socket_receive_request_internal(struct render_socket *socket, |
||||||
|
void *data, |
||||||
|
size_t max_size, |
||||||
|
size_t *out_size, |
||||||
|
int *fds, |
||||||
|
int max_fd_count, |
||||||
|
int *out_fd_count) |
||||||
|
{ |
||||||
|
assert(data && max_size); |
||||||
|
struct msghdr msg = { |
||||||
|
.msg_iov = |
||||||
|
&(struct iovec){ |
||||||
|
.iov_base = data, |
||||||
|
.iov_len = max_size, |
||||||
|
}, |
||||||
|
.msg_iovlen = 1, |
||||||
|
}; |
||||||
|
|
||||||
|
char cmsg_buf[CMSG_SPACE(sizeof(*fds) * RENDER_SOCKET_MAX_FD_COUNT)]; |
||||||
|
if (max_fd_count) { |
||||||
|
assert(fds && max_fd_count <= RENDER_SOCKET_MAX_FD_COUNT); |
||||||
|
msg.msg_control = cmsg_buf; |
||||||
|
msg.msg_controllen = CMSG_SPACE(sizeof(*fds) * max_fd_count); |
||||||
|
|
||||||
|
struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg); |
||||||
|
memset(cmsg, 0, sizeof(*cmsg)); |
||||||
|
} |
||||||
|
|
||||||
|
if (!render_socket_recvmsg(socket, &msg, out_size)) |
||||||
|
return false; |
||||||
|
|
||||||
|
if (max_fd_count) { |
||||||
|
int received_fd_count; |
||||||
|
const int *received_fds = get_received_fds(&msg, &received_fd_count); |
||||||
|
assert(received_fd_count <= max_fd_count); |
||||||
|
|
||||||
|
memcpy(fds, received_fds, sizeof(*fds) * received_fd_count); |
||||||
|
*out_fd_count = received_fd_count; |
||||||
|
} else if (out_fd_count) { |
||||||
|
*out_fd_count = 0; |
||||||
|
} |
||||||
|
|
||||||
|
return true; |
||||||
|
} |
||||||
|
|
||||||
|
bool |
||||||
|
render_socket_receive_request(struct render_socket *socket, |
||||||
|
void *data, |
||||||
|
size_t max_size, |
||||||
|
size_t *out_size) |
||||||
|
{ |
||||||
|
return render_socket_receive_request_internal(socket, data, max_size, out_size, NULL, |
||||||
|
0, NULL); |
||||||
|
} |
||||||
|
|
||||||
|
bool |
||||||
|
render_socket_receive_request_with_fds(struct render_socket *socket, |
||||||
|
void *data, |
||||||
|
size_t max_size, |
||||||
|
size_t *out_size, |
||||||
|
int *fds, |
||||||
|
int max_fd_count, |
||||||
|
int *out_fd_count) |
||||||
|
{ |
||||||
|
return render_socket_receive_request_internal(socket, data, max_size, out_size, fds, |
||||||
|
max_fd_count, out_fd_count); |
||||||
|
} |
||||||
|
|
||||||
|
bool |
||||||
|
render_socket_receive_data(struct render_socket *socket, void *data, size_t size) |
||||||
|
{ |
||||||
|
size_t received_size; |
||||||
|
if (!render_socket_receive_request(socket, data, size, &received_size)) |
||||||
|
return false; |
||||||
|
|
||||||
|
if (size != received_size) { |
||||||
|
render_log("failed to receive data: expected %zu but received %zu", size, |
||||||
|
received_size); |
||||||
|
return false; |
||||||
|
} |
||||||
|
|
||||||
|
return true; |
||||||
|
} |
||||||
|
|
||||||
|
static bool |
||||||
|
render_socket_sendmsg(struct render_socket *socket, const struct msghdr *msg) |
||||||
|
{ |
||||||
|
do { |
||||||
|
const ssize_t s = sendmsg(socket->fd, msg, MSG_NOSIGNAL); |
||||||
|
if (unlikely(s < 0)) { |
||||||
|
if (errno == EAGAIN || errno == EINTR) |
||||||
|
continue; |
||||||
|
|
||||||
|
render_log("failed to send message: %s", strerror(errno)); |
||||||
|
return false; |
||||||
|
} |
||||||
|
|
||||||
|
/* no partial send since the socket type is SOCK_SEQPACKET */ |
||||||
|
assert(msg->msg_iovlen == 1 && msg->msg_iov[0].iov_len == (size_t)s); |
||||||
|
return true; |
||||||
|
} while (true); |
||||||
|
} |
||||||
|
|
||||||
|
static inline bool |
||||||
|
render_socket_send_reply_internal(struct render_socket *socket, |
||||||
|
const void *data, |
||||||
|
size_t size, |
||||||
|
const int *fds, |
||||||
|
int fd_count) |
||||||
|
{ |
||||||
|
assert(data && size); |
||||||
|
struct msghdr msg = { |
||||||
|
.msg_iov = |
||||||
|
&(struct iovec){ |
||||||
|
.iov_base = (void *)data, |
||||||
|
.iov_len = size, |
||||||
|
}, |
||||||
|
.msg_iovlen = 1, |
||||||
|
}; |
||||||
|
|
||||||
|
char cmsg_buf[CMSG_SPACE(sizeof(*fds) * RENDER_SOCKET_MAX_FD_COUNT)]; |
||||||
|
if (fd_count) { |
||||||
|
assert(fds && fd_count <= RENDER_SOCKET_MAX_FD_COUNT); |
||||||
|
msg.msg_control = cmsg_buf; |
||||||
|
msg.msg_controllen = CMSG_SPACE(sizeof(*fds) * fd_count); |
||||||
|
|
||||||
|
struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg); |
||||||
|
cmsg->cmsg_level = SOL_SOCKET; |
||||||
|
cmsg->cmsg_type = SCM_RIGHTS; |
||||||
|
cmsg->cmsg_len = CMSG_LEN(sizeof(*fds) * fd_count); |
||||||
|
memcpy(CMSG_DATA(cmsg), fds, sizeof(*fds) * fd_count); |
||||||
|
} |
||||||
|
|
||||||
|
return render_socket_sendmsg(socket, &msg); |
||||||
|
} |
||||||
|
|
||||||
|
bool |
||||||
|
render_socket_send_reply(struct render_socket *socket, const void *data, size_t size) |
||||||
|
{ |
||||||
|
return render_socket_send_reply_internal(socket, data, size, NULL, 0); |
||||||
|
} |
||||||
|
|
||||||
|
bool |
||||||
|
render_socket_send_reply_with_fds(struct render_socket *socket, |
||||||
|
const void *data, |
||||||
|
size_t size, |
||||||
|
const int *fds, |
||||||
|
int fd_count) |
||||||
|
{ |
||||||
|
return render_socket_send_reply_internal(socket, data, size, fds, fd_count); |
||||||
|
} |
@ -0,0 +1,55 @@ |
|||||||
|
/*
|
||||||
|
* Copyright 2021 Google LLC |
||||||
|
* SPDX-License-Identifier: MIT |
||||||
|
*/ |
||||||
|
|
||||||
|
#ifndef RENDER_SOCKET_H |
||||||
|
#define RENDER_SOCKET_H |
||||||
|
|
||||||
|
#include "render_common.h" |
||||||
|
|
||||||
|
struct render_socket { |
||||||
|
int fd; |
||||||
|
}; |
||||||
|
|
||||||
|
bool |
||||||
|
render_socket_pair(int out_fds[static 2]); |
||||||
|
|
||||||
|
bool |
||||||
|
render_socket_is_seqpacket(int fd); |
||||||
|
|
||||||
|
void |
||||||
|
render_socket_init(struct render_socket *socket, int fd); |
||||||
|
|
||||||
|
void |
||||||
|
render_socket_fini(struct render_socket *socket); |
||||||
|
|
||||||
|
bool |
||||||
|
render_socket_receive_request(struct render_socket *socket, |
||||||
|
void *data, |
||||||
|
size_t max_size, |
||||||
|
size_t *out_size); |
||||||
|
|
||||||
|
bool |
||||||
|
render_socket_receive_request_with_fds(struct render_socket *socket, |
||||||
|
void *data, |
||||||
|
size_t max_size, |
||||||
|
size_t *out_size, |
||||||
|
int *fds, |
||||||
|
int max_fd_count, |
||||||
|
int *out_fd_count); |
||||||
|
|
||||||
|
bool |
||||||
|
render_socket_receive_data(struct render_socket *socket, void *data, size_t size); |
||||||
|
|
||||||
|
bool |
||||||
|
render_socket_send_reply(struct render_socket *socket, const void *data, size_t size); |
||||||
|
|
||||||
|
bool |
||||||
|
render_socket_send_reply_with_fds(struct render_socket *socket, |
||||||
|
const void *data, |
||||||
|
size_t size, |
||||||
|
const int *fds, |
||||||
|
int fd_count); |
||||||
|
|
||||||
|
#endif /* RENDER_SOCKET_H */ |
@ -0,0 +1,154 @@ |
|||||||
|
/*
|
||||||
|
* Copyright 2021 Google LLC |
||||||
|
* SPDX-License-Identifier: MIT |
||||||
|
*/ |
||||||
|
|
||||||
|
#include "render_virgl.h" |
||||||
|
|
||||||
|
#include "virglrenderer.h" |
||||||
|
|
||||||
|
#include "render_context.h" |
||||||
|
|
||||||
|
struct render_virgl render_virgl_internal = { |
||||||
|
#ifdef ENABLE_RENDER_SERVER_WORKER_THREAD |
||||||
|
.struct_mutex = _MTX_INITIALIZER_NP, |
||||||
|
.dispatch_mutex = _MTX_INITIALIZER_NP, |
||||||
|
#endif |
||||||
|
.init_count = 0, |
||||||
|
}; |
||||||
|
|
||||||
|
static struct render_virgl * |
||||||
|
render_virgl_lock_struct(void) |
||||||
|
{ |
||||||
|
#ifdef ENABLE_RENDER_SERVER_WORKER_THREAD |
||||||
|
mtx_lock(&render_virgl_internal.struct_mutex); |
||||||
|
#endif |
||||||
|
return &render_virgl_internal; |
||||||
|
} |
||||||
|
|
||||||
|
static void |
||||||
|
render_virgl_unlock_struct(void) |
||||||
|
{ |
||||||
|
#ifdef ENABLE_RENDER_SERVER_WORKER_THREAD |
||||||
|
mtx_unlock(&render_virgl_internal.struct_mutex); |
||||||
|
#endif |
||||||
|
} |
||||||
|
|
||||||
|
static struct render_context * |
||||||
|
render_virgl_lookup_context(uint32_t ctx_id) |
||||||
|
{ |
||||||
|
const struct render_virgl *virgl = render_virgl_lock_struct(); |
||||||
|
struct render_context *ctx = NULL; |
||||||
|
|
||||||
|
#ifdef ENABLE_RENDER_SERVER_WORKER_THREAD |
||||||
|
list_for_each_entry (struct render_context, iter, &virgl->contexts, head) { |
||||||
|
if (iter->ctx_id == ctx_id) { |
||||||
|
ctx = iter; |
||||||
|
break; |
||||||
|
} |
||||||
|
} |
||||||
|
#else |
||||||
|
assert(list_is_singular(&virgl->contexts)); |
||||||
|
ctx = list_first_entry(&virgl->contexts, struct render_context, head); |
||||||
|
assert(ctx->ctx_id == ctx_id); |
||||||
|
(void)ctx_id; |
||||||
|
#endif |
||||||
|
|
||||||
|
render_virgl_unlock_struct(); |
||||||
|
|
||||||
|
return ctx; |
||||||
|
} |
||||||
|
|
||||||
|
static void |
||||||
|
render_virgl_cb_write_context_fence(UNUSED void *cookie, |
||||||
|
uint32_t ctx_id, |
||||||
|
uint64_t queue_id, |
||||||
|
void *fence_cookie) |
||||||
|
{ |
||||||
|
struct render_context *ctx = render_virgl_lookup_context(ctx_id); |
||||||
|
assert(ctx); |
||||||
|
|
||||||
|
const uint32_t ring_idx = queue_id; |
||||||
|
const uint32_t seqno = (uint32_t)pointer_to_uintptr(fence_cookie); |
||||||
|
render_context_update_timeline(ctx, ring_idx, seqno); |
||||||
|
} |
||||||
|
|
||||||
|
static const struct virgl_renderer_callbacks render_virgl_cbs = { |
||||||
|
.version = VIRGL_RENDERER_CALLBACKS_VERSION, |
||||||
|
.write_context_fence = render_virgl_cb_write_context_fence, |
||||||
|
}; |
||||||
|
|
||||||
|
void |
||||||
|
render_virgl_add_context(struct render_context *ctx) |
||||||
|
{ |
||||||
|
struct render_virgl *virgl = render_virgl_lock_struct(); |
||||||
|
list_addtail(&ctx->head, &virgl->contexts); |
||||||
|
render_virgl_unlock_struct(); |
||||||
|
} |
||||||
|
|
||||||
|
void |
||||||
|
render_virgl_remove_context(struct render_context *ctx) |
||||||
|
{ |
||||||
|
render_virgl_lock_struct(); |
||||||
|
list_del(&ctx->head); |
||||||
|
render_virgl_unlock_struct(); |
||||||
|
} |
||||||
|
|
||||||
|
void |
||||||
|
render_virgl_fini(void) |
||||||
|
{ |
||||||
|
struct render_virgl *virgl = render_virgl_lock_struct(); |
||||||
|
|
||||||
|
if (virgl->init_count) { |
||||||
|
virgl->init_count--; |
||||||
|
if (!virgl->init_count) { |
||||||
|
render_virgl_lock_dispatch(); |
||||||
|
virgl_renderer_cleanup(virgl); |
||||||
|
render_virgl_unlock_dispatch(); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
render_virgl_unlock_struct(); |
||||||
|
} |
||||||
|
|
||||||
|
bool |
||||||
|
render_virgl_init(uint32_t init_flags) |
||||||
|
{ |
||||||
|
/* we only care if virgl and/or venus are enabled */ |
||||||
|
init_flags &= VIRGL_RENDERER_VENUS | VIRGL_RENDERER_NO_VIRGL; |
||||||
|
|
||||||
|
/* always use sync thread and async fence cb for low latency */ |
||||||
|
init_flags |= VIRGL_RENDERER_THREAD_SYNC | VIRGL_RENDERER_ASYNC_FENCE_CB | |
||||||
|
VIRGL_RENDERER_USE_EXTERNAL_BLOB; |
||||||
|
|
||||||
|
struct render_virgl *virgl = render_virgl_lock_struct(); |
||||||
|
|
||||||
|
if (virgl->init_count) { |
||||||
|
if (virgl->init_flags != init_flags) { |
||||||
|
render_log("failed to re-initialize with flags 0x%x", init_flags); |
||||||
|
goto fail; |
||||||
|
} |
||||||
|
} else { |
||||||
|
render_virgl_lock_dispatch(); |
||||||
|
int ret = virgl_renderer_init(virgl, init_flags, |
||||||
|
(struct virgl_renderer_callbacks *)&render_virgl_cbs); |
||||||
|
render_virgl_unlock_dispatch(); |
||||||
|
if (ret) { |
||||||
|
render_log("failed to initialize virglrenderer"); |
||||||
|
goto fail; |
||||||
|
} |
||||||
|
|
||||||
|
list_inithead(&virgl->contexts); |
||||||
|
virgl->init_flags = init_flags; |
||||||
|
} |
||||||
|
|
||||||
|
virgl->init_count++; |
||||||
|
|
||||||
|
render_virgl_unlock_struct(); |
||||||
|
|
||||||
|
return true; |
||||||
|
|
||||||
|
fail: |
||||||
|
render_virgl_unlock_struct(); |
||||||
|
return false; |
||||||
|
} |
@ -0,0 +1,70 @@ |
|||||||
|
/*
|
||||||
|
* Copyright 2021 Google LLC |
||||||
|
* SPDX-License-Identifier: MIT |
||||||
|
*/ |
||||||
|
|
||||||
|
#ifndef RENDER_VIRGL_H |
||||||
|
#define RENDER_VIRGL_H |
||||||
|
|
||||||
|
#include "render_common.h" |
||||||
|
|
||||||
|
#ifdef ENABLE_RENDER_SERVER_WORKER_THREAD |
||||||
|
#include "c11/threads.h" |
||||||
|
#endif |
||||||
|
|
||||||
|
/* Workers call into virglrenderer. When they are processes, not much care is
|
||||||
|
* required. We just want to be careful that the server process might have |
||||||
|
* initialized viglrenderer before workers are forked. |
||||||
|
* |
||||||
|
* But when workers are threads, we need to grab a lock to protect |
||||||
|
* virglrenderer. |
||||||
|
* |
||||||
|
* TODO skip virglrenderer.h and go straight to vkr_renderer.h. That allows |
||||||
|
* us to remove this file. |
||||||
|
*/ |
||||||
|
struct render_virgl { |
||||||
|
#ifdef ENABLE_RENDER_SERVER_WORKER_THREAD |
||||||
|
/* this protects the struct */ |
||||||
|
mtx_t struct_mutex; |
||||||
|
/* this protects virglrenderer */ |
||||||
|
mtx_t dispatch_mutex; |
||||||
|
#endif |
||||||
|
|
||||||
|
/* for nested initialization */ |
||||||
|
int init_count; |
||||||
|
uint32_t init_flags; |
||||||
|
|
||||||
|
struct list_head contexts; |
||||||
|
}; |
||||||
|
|
||||||
|
extern struct render_virgl render_virgl_internal; |
||||||
|
|
||||||
|
bool |
||||||
|
render_virgl_init(uint32_t init_flags); |
||||||
|
|
||||||
|
void |
||||||
|
render_virgl_fini(void); |
||||||
|
|
||||||
|
void |
||||||
|
render_virgl_add_context(struct render_context *ctx); |
||||||
|
|
||||||
|
void |
||||||
|
render_virgl_remove_context(struct render_context *ctx); |
||||||
|
|
||||||
|
static inline void |
||||||
|
render_virgl_lock_dispatch(void) |
||||||
|
{ |
||||||
|
#ifdef ENABLE_RENDER_SERVER_WORKER_THREAD |
||||||
|
mtx_lock(&render_virgl_internal.dispatch_mutex); |
||||||
|
#endif |
||||||
|
} |
||||||
|
|
||||||
|
static inline void |
||||||
|
render_virgl_unlock_dispatch(void) |
||||||
|
{ |
||||||
|
#ifdef ENABLE_RENDER_SERVER_WORKER_THREAD |
||||||
|
mtx_unlock(&render_virgl_internal.dispatch_mutex); |
||||||
|
#endif |
||||||
|
} |
||||||
|
|
||||||
|
#endif /* RENDER_VIRGL_H */ |
@ -0,0 +1,254 @@ |
|||||||
|
/*
|
||||||
|
* Copyright 2021 Google LLC |
||||||
|
* SPDX-License-Identifier: MIT |
||||||
|
*/ |
||||||
|
|
||||||
|
#include "render_worker.h" |
||||||
|
|
||||||
|
/* One and only one of ENABLE_RENDER_SERVER_WORKER_* must be set.
|
||||||
|
* |
||||||
|
* With ENABLE_RENDER_SERVER_WORKER_PROCESS, each worker is a subprocess |
||||||
|
* forked from the server process. |
||||||
|
* |
||||||
|
* With ENABLE_RENDER_SERVER_WORKER_THREAD, each worker is a thread of the |
||||||
|
* server process. |
||||||
|
* |
||||||
|
* With ENABLE_RENDER_SERVER_WORKER_MINIJAIL, each worker is a subprocess |
||||||
|
* forked from the server process, jailed with minijail. |
||||||
|
*/ |
||||||
|
#if (ENABLE_RENDER_SERVER_WORKER_PROCESS + ENABLE_RENDER_SERVER_WORKER_THREAD + \ |
||||||
|
ENABLE_RENDER_SERVER_WORKER_MINIJAIL) != 1 |
||||||
|
#error "no worker defined" |
||||||
|
#endif |
||||||
|
|
||||||
|
#include <fcntl.h> |
||||||
|
#include <signal.h> |
||||||
|
#include <sys/types.h> |
||||||
|
#include <sys/wait.h> |
||||||
|
#include <threads.h> |
||||||
|
#include <unistd.h> |
||||||
|
|
||||||
|
struct render_worker { |
||||||
|
#ifdef ENABLE_RENDER_SERVER_WORKER_THREAD |
||||||
|
thrd_t thread; |
||||||
|
#else |
||||||
|
pid_t pid; |
||||||
|
#endif |
||||||
|
bool reaped; |
||||||
|
|
||||||
|
char thread_data[]; |
||||||
|
}; |
||||||
|
|
||||||
|
#ifdef ENABLE_RENDER_SERVER_WORKER_MINIJAIL |
||||||
|
|
||||||
|
#include <fcntl.h> |
||||||
|
#include <libminijail.h> |
||||||
|
#include <linux/filter.h> |
||||||
|
#include <linux/seccomp.h> |
||||||
|
#include <stdio.h> |
||||||
|
#include <sys/stat.h> |
||||||
|
|
||||||
|
static bool |
||||||
|
load_bpf_program(struct sock_fprog *prog, const char *path) |
||||||
|
{ |
||||||
|
int fd = -1; |
||||||
|
void *data = NULL; |
||||||
|
|
||||||
|
fd = open(path, O_RDONLY); |
||||||
|
if (fd < 0) |
||||||
|
goto fail; |
||||||
|
|
||||||
|
const off_t size = lseek(fd, 0, SEEK_END); |
||||||
|
if (size <= 0 || size % sizeof(struct sock_filter)) |
||||||
|
goto fail; |
||||||
|
lseek(fd, 0, SEEK_SET); |
||||||
|
|
||||||
|
data = malloc(size); |
||||||
|
if (!data) |
||||||
|
goto fail; |
||||||
|
|
||||||
|
off_t cur = 0; |
||||||
|
while (cur < size) { |
||||||
|
const ssize_t r = read(fd, (char *)data + cur, size - cur); |
||||||
|
if (r <= 0) |
||||||
|
goto fail; |
||||||
|
cur += r; |
||||||
|
} |
||||||
|
|
||||||
|
close(fd); |
||||||
|
|
||||||
|
prog->len = size / sizeof(struct sock_filter); |
||||||
|
prog->filter = data; |
||||||
|
|
||||||
|
return true; |
||||||
|
|
||||||
|
fail: |
||||||
|
if (data) |
||||||
|
free(data); |
||||||
|
if (fd >= 0) |
||||||
|
close(fd); |
||||||
|
return false; |
||||||
|
} |
||||||
|
|
||||||
|
static struct render_worker_jail * |
||||||
|
create_minijail(enum render_worker_jail_seccomp_filter seccomp_filter, |
||||||
|
const char *seccomp_path) |
||||||
|
{ |
||||||
|
struct minijail *j = minijail_new(); |
||||||
|
|
||||||
|
/* TODO namespaces and many more */ |
||||||
|
minijail_no_new_privs(j); |
||||||
|
|
||||||
|
if (seccomp_filter != RENDER_WORKER_JAIL_SECCOMP_NONE) { |
||||||
|
if (seccomp_filter == RENDER_WORKER_JAIL_SECCOMP_BPF) { |
||||||
|
struct sock_fprog prog; |
||||||
|
if (!load_bpf_program(&prog, seccomp_path)) { |
||||||
|
minijail_destroy(j); |
||||||
|
return NULL; |
||||||
|
} |
||||||
|
|
||||||
|
minijail_set_seccomp_filters(j, &prog); |
||||||
|
free(prog.filter); |
||||||
|
} else { |
||||||
|
if (seccomp_filter == RENDER_WORKER_JAIL_SECCOMP_POLICY_LOG) |
||||||
|
minijail_log_seccomp_filter_failures(j); |
||||||
|
minijail_parse_seccomp_filters(j, seccomp_path); |
||||||
|
} |
||||||
|
|
||||||
|
minijail_use_seccomp_filter(j); |
||||||
|
} |
||||||
|
|
||||||
|
return (struct render_worker_jail *)j; |
||||||
|
} |
||||||
|
|
||||||
|
static pid_t |
||||||
|
fork_minijail(const struct render_worker_jail *jail) |
||||||
|
{ |
||||||
|
struct minijail *j = minijail_new(); |
||||||
|
if (!j) |
||||||
|
return -1; |
||||||
|
|
||||||
|
/* is this faster? */ |
||||||
|
if (minijail_copy_jail((const struct minijail *)jail, j)) { |
||||||
|
minijail_destroy(j); |
||||||
|
return -1; |
||||||
|
} |
||||||
|
|
||||||
|
pid_t pid = minijail_fork(j); |
||||||
|
minijail_destroy(j); |
||||||
|
|
||||||
|
return pid; |
||||||
|
} |
||||||
|
|
||||||
|
#endif /* ENABLE_RENDER_SERVER_WORKER_MINIJAIL */ |
||||||
|
|
||||||
|
struct render_worker_jail * |
||||||
|
render_worker_jail_create(enum render_worker_jail_seccomp_filter seccomp_filter, |
||||||
|
const char *seccomp_path) |
||||||
|
{ |
||||||
|
#if defined(ENABLE_RENDER_SERVER_WORKER_MINIJAIL) |
||||||
|
return create_minijail(seccomp_filter, seccomp_path); |
||||||
|
#else |
||||||
|
/* TODO RENDER_WORKER_JAIL_SECCOMP_BPF */ |
||||||
|
if (seccomp_filter != RENDER_WORKER_JAIL_SECCOMP_NONE) |
||||||
|
return NULL; |
||||||
|
(void)seccomp_path; |
||||||
|
return (void *)1; |
||||||
|
#endif |
||||||
|
} |
||||||
|
|
||||||
|
void |
||||||
|
render_worker_jail_destroy(struct render_worker_jail *jail) |
||||||
|
{ |
||||||
|
#if defined(ENABLE_RENDER_SERVER_WORKER_MINIJAIL) |
||||||
|
minijail_destroy((struct minijail *)jail); |
||||||
|
#else |
||||||
|
(void)jail; |
||||||
|
#endif |
||||||
|
} |
||||||
|
|
||||||
|
struct render_worker * |
||||||
|
render_worker_create(struct render_worker_jail *jail, |
||||||
|
int (*thread_func)(void *thread_data), |
||||||
|
void *thread_data, |
||||||
|
size_t thread_data_size) |
||||||
|
{ |
||||||
|
struct render_worker *worker = calloc(1, sizeof(*worker) + thread_data_size); |
||||||
|
if (!worker) |
||||||
|
return NULL; |
||||||
|
|
||||||
|
memcpy(worker->thread_data, thread_data, thread_data_size); |
||||||
|
|
||||||
|
bool ok; |
||||||
|
#if defined(ENABLE_RENDER_SERVER_WORKER_PROCESS) |
||||||
|
worker->pid = fork(); |
||||||
|
ok = worker->pid >= 0; |
||||||
|
(void)jail; |
||||||
|
(void)thread_func; |
||||||
|
#elif defined(ENABLE_RENDER_SERVER_WORKER_THREAD) |
||||||
|
ok = thrd_create(&worker->thread, thread_func, worker->thread_data) == thrd_success; |
||||||
|
(void)jail; |
||||||
|
#elif defined(ENABLE_RENDER_SERVER_WORKER_MINIJAIL) |
||||||
|
worker->pid = fork_minijail(jail); |
||||||
|
ok = worker->pid >= 0; |
||||||
|
(void)thread_func; |
||||||
|
#endif |
||||||
|
if (!ok) { |
||||||
|
free(worker); |
||||||
|
return NULL; |
||||||
|
} |
||||||
|
|
||||||
|
return worker; |
||||||
|
} |
||||||
|
|
||||||
|
bool |
||||||
|
render_worker_is_record(const struct render_worker *worker) |
||||||
|
{ |
||||||
|
/* return false if called from the worker itself */ |
||||||
|
#ifdef ENABLE_RENDER_SERVER_WORKER_THREAD |
||||||
|
return !thrd_equal(worker->thread, thrd_current()); |
||||||
|
#else |
||||||
|
return worker->pid > 0; |
||||||
|
#endif |
||||||
|
} |
||||||
|
|
||||||
|
void |
||||||
|
render_worker_kill(struct render_worker *worker) |
||||||
|
{ |
||||||
|
assert(render_worker_is_record(worker)); |
||||||
|
|
||||||
|
#ifdef ENABLE_RENDER_SERVER_WORKER_THREAD |
||||||
|
/* we trust the thread to clean up and exit in finite time */ |
||||||
|
#else |
||||||
|
kill(worker->pid, SIGKILL); |
||||||
|
#endif |
||||||
|
} |
||||||
|
|
||||||
|
bool |
||||||
|
render_worker_reap(struct render_worker *worker, bool wait) |
||||||
|
{ |
||||||
|
assert(render_worker_is_record(worker)); |
||||||
|
|
||||||
|
if (worker->reaped) |
||||||
|
return true; |
||||||
|
|
||||||
|
bool ok; |
||||||
|
#ifdef ENABLE_RENDER_SERVER_WORKER_THREAD |
||||||
|
(void)wait; |
||||||
|
ok = thrd_join(worker->thread, NULL) == thrd_success; |
||||||
|
#else |
||||||
|
const int options = WEXITED | (wait ? 0 : WNOHANG); |
||||||
|
siginfo_t siginfo = { 0 }; |
||||||
|
const int ret = waitid(P_PID, worker->pid, &siginfo, options); |
||||||
|
ok = !ret && siginfo.si_pid == worker->pid; |
||||||
|
#endif |
||||||
|
|
||||||
|
worker->reaped = ok; |
||||||
|
return ok; |
||||||
|
} |
||||||
|
|
||||||
|
void |
||||||
|
render_worker_destroy(struct render_worker *worker) |
||||||
|
{ |
||||||
|
free(worker); |
||||||
|
} |
@ -0,0 +1,46 @@ |
|||||||
|
/*
|
||||||
|
* Copyright 2021 Google LLC |
||||||
|
* SPDX-License-Identifier: MIT |
||||||
|
*/ |
||||||
|
|
||||||
|
#ifndef RENDER_WORKER_H |
||||||
|
#define RENDER_WORKER_H |
||||||
|
|
||||||
|
#include "render_common.h" |
||||||
|
|
||||||
|
enum render_worker_jail_seccomp_filter { |
||||||
|
/* seccomp_path is ignored and seccomp is disabled */ |
||||||
|
RENDER_WORKER_JAIL_SECCOMP_NONE, |
||||||
|
/* seccomp_path is a file containing a BPF program */ |
||||||
|
RENDER_WORKER_JAIL_SECCOMP_BPF, |
||||||
|
/* seccomp_path is a file containing a minijail policy */ |
||||||
|
RENDER_WORKER_JAIL_SECCOMP_MINIJAIL_POLICY, |
||||||
|
RENDER_WORKER_JAIL_SECCOMP_MINIJAIL_POLICY_LOG, |
||||||
|
}; |
||||||
|
|
||||||
|
struct render_worker_jail * |
||||||
|
render_worker_jail_create(enum render_worker_jail_seccomp_filter seccomp_filter, |
||||||
|
const char *seccomp_path); |
||||||
|
|
||||||
|
void |
||||||
|
render_worker_jail_destroy(struct render_worker_jail *jail); |
||||||
|
|
||||||
|
struct render_worker * |
||||||
|
render_worker_create(struct render_worker_jail *jail, |
||||||
|
int (*thread_func)(void *thread_data), |
||||||
|
void *thread_data, |
||||||
|
size_t thread_data_size); |
||||||
|
|
||||||
|
bool |
||||||
|
render_worker_is_record(const struct render_worker *worker); |
||||||
|
|
||||||
|
void |
||||||
|
render_worker_kill(struct render_worker *worker); |
||||||
|
|
||||||
|
bool |
||||||
|
render_worker_reap(struct render_worker *worker, bool wait); |
||||||
|
|
||||||
|
void |
||||||
|
render_worker_destroy(struct render_worker *worker); |
||||||
|
|
||||||
|
#endif /* RENDER_WORKER_H */ |
Loading…
Reference in new issue