|
|
|
/**************************************************************************
|
|
|
|
*
|
|
|
|
* Copyright (C) 2014 Red Hat Inc.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included
|
|
|
|
* in all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
|
|
|
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
|
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
|
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
|
|
* OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
**************************************************************************/
|
|
|
|
|
|
|
|
/* library interface from QEMU to virglrenderer */
|
|
|
|
|
|
|
|
#ifndef VIRGLRENDERER_H
|
|
|
|
#define VIRGLRENDERER_H
|
|
|
|
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <stdbool.h>
|
|
|
|
#include <stdarg.h>
|
|
|
|
|
|
|
|
struct virgl_box;
|
|
|
|
struct iovec;
|
|
|
|
|
|
|
|
#define VIRGL_EXPORT __attribute__((visibility("default")))
|
|
|
|
|
|
|
|
typedef void *virgl_renderer_gl_context;
|
|
|
|
|
|
|
|
struct virgl_renderer_gl_ctx_param {
|
|
|
|
int version;
|
|
|
|
bool shared;
|
|
|
|
int major_ver;
|
|
|
|
int minor_ver;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define VIRGL_RENDERER_CALLBACKS_VERSION 2
|
|
|
|
|
|
|
|
struct virgl_renderer_callbacks {
|
|
|
|
int version;
|
|
|
|
void (*write_fence)(void *cookie, uint32_t fence);
|
|
|
|
|
|
|
|
/* interact with GL implementation */
|
|
|
|
virgl_renderer_gl_context (*create_gl_context)(void *cookie, int scanout_idx, struct virgl_renderer_gl_ctx_param *param);
|
|
|
|
void (*destroy_gl_context)(void *cookie, virgl_renderer_gl_context ctx);
|
|
|
|
int (*make_current)(void *cookie, int scanout_idx, virgl_renderer_gl_context ctx);
|
|
|
|
|
|
|
|
int (*get_drm_fd)(void *cookie); /* v2, used with flags & VIRGL_RENDERER_USE_EGL */
|
|
|
|
};
|
|
|
|
|
|
|
|
/* virtio-gpu compatible interface */
|
|
|
|
#define VIRGL_RENDERER_USE_EGL 1
|
renderer: use a thread to block for fences.
Instead of polling the fences regularly, have a thread
that blocks for a single fence using a separate shared
context, then uses eventfd to wake up the main thread
when something happens.
Inside the guest, glmark2 typicially runs twice as fast with the thread
sync. Although in general, the performances seems to be about +30%. The
benefits is mostly for CPU-bounds tasks (when main the thread hits 100%)
A naive perf stat of the vtest renderer with glmark2 "build" test with a
fixed number of frames (500) results in the following stats data:
(do not value timing related informations, since the renderer is ran and
stopped manually)
without thread:
3032.282265 task-clock (msec) # 0.420 CPUs utilized
4,277 context-switches # 0.001 M/sec
102 cpu-migrations # 0.034 K/sec
9,020 page-faults # 0.003 M/sec
7,884,098,254 cycles # 2.600 GHz
4,440,126,451 stalled-cycles-frontend # 56.32% frontend cycles idle
<not supported> stalled-cycles-backend
11,024,091,578 instructions # 1.40 insns per cycle
# 0.40 stalled
# cycles per insn
1,091,831,588 branches # 360.069 M/sec
5,426,846 branch-misses # 0.50% of all branches
with thread:
3403.592921 task-clock (msec) # 0.452 CPUs utilized
7,145 context-switches # 0.002 M/sec
410 cpu-migrations # 0.120 K/sec
6,191 page-faults # 0.002 M/sec
7,475,038,064 cycles # 2.196 GHz
4,487,043,071 stalled-cycles-frontend # 60.03% frontend cycles idle
<not supported> stalled-cycles-backend
9,925,205,494 instructions # 1.33 insns per cycle
# 0.45 stalled
# cycles per insn
834,375,503 branches # 245.146 M/sec
4,919,995 branch-misses # 0.59% of all branches
Signed-off-by: Marc-André Lureau <marcandre.lureau@gmail.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
9 years ago
|
|
|
/*
|
|
|
|
* Wait for sync objects in thread rather than polling
|
|
|
|
* need to use virgl_renderer_get_poll_fd to know if this feature is in effect.
|
|
|
|
*/
|
|
|
|
#define VIRGL_RENDERER_THREAD_SYNC 2
|
|
|
|
#define VIRGL_RENDERER_USE_GLX (1 << 2)
|
|
|
|
#define VIRGL_RENDERER_USE_SURFACELESS (1 << 3)
|
|
|
|
#define VIRGL_RENDERER_USE_GLES (1 << 4)
|
|
|
|
|
|
|
|
#ifdef VIRGL_RENDERER_UNSTABLE_APIS
|
|
|
|
/*
|
|
|
|
* Blob resources used with the 3D driver must be able to be represented as file descriptors.
|
|
|
|
* The typical use case is the virtual machine manager (or vtest) is running in a multiprocess
|
|
|
|
* mode. In a standard Linux setup, that means the KVM process is different from the process that
|
|
|
|
* instantiated virglrenderer. For zero-copy capability to work, file descriptors must be used.
|
|
|
|
*
|
|
|
|
* VMMs that advertise support for the virtio-gpu feature VIRTIO_GPU_F_RESOURCE_BLOB and run in
|
|
|
|
* a multi-process mode *must* specify this flag.
|
|
|
|
*/
|
|
|
|
#define VIRGL_RENDERER_USE_EXTERNAL_BLOB (1 << 5)
|
|
|
|
|
|
|
|
#endif /* VIRGL_RENDERER_UNSTABLE_APIS */
|
|
|
|
|
|
|
|
VIRGL_EXPORT int virgl_renderer_init(void *cookie, int flags, struct virgl_renderer_callbacks *cb);
|
|
|
|
VIRGL_EXPORT void virgl_renderer_poll(void); /* force fences */
|
|
|
|
|
|
|
|
/* we need to give qemu the cursor resource contents */
|
|
|
|
VIRGL_EXPORT void *virgl_renderer_get_cursor_data(uint32_t resource_id, uint32_t *width, uint32_t *height);
|
|
|
|
|
|
|
|
VIRGL_EXPORT void virgl_renderer_get_rect(int resource_id, struct iovec *iov, unsigned int num_iovs,
|
|
|
|
uint32_t offset, int x, int y, int width, int height);
|
|
|
|
|
|
|
|
VIRGL_EXPORT int virgl_renderer_get_fd_for_texture(uint32_t tex_id, int *fd);
|
|
|
|
VIRGL_EXPORT int virgl_renderer_get_fd_for_texture2(uint32_t tex_id, int *fd, int *stride, int *offset);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* These are only here for compatibility-reasons. In the future, use the flags
|
|
|
|
* from virgl_hw.h instead.
|
|
|
|
*/
|
|
|
|
#define VIRGL_RES_BIND_DEPTH_STENCIL (1 << 0)
|
|
|
|
#define VIRGL_RES_BIND_RENDER_TARGET (1 << 1)
|
|
|
|
#define VIRGL_RES_BIND_SAMPLER_VIEW (1 << 3)
|
|
|
|
#define VIRGL_RES_BIND_VERTEX_BUFFER (1 << 4)
|
|
|
|
#define VIRGL_RES_BIND_INDEX_BUFFER (1 << 5)
|
|
|
|
#define VIRGL_RES_BIND_CONSTANT_BUFFER (1 << 6)
|
|
|
|
#define VIRGL_RES_BIND_STREAM_OUTPUT (1 << 11)
|
|
|
|
#define VIRGL_RES_BIND_CURSOR (1 << 16)
|
|
|
|
#define VIRGL_RES_BIND_CUSTOM (1 << 17)
|
|
|
|
#define VIRGL_RES_BIND_SCANOUT (1 << 18)
|
|
|
|
#define VIRGL_RES_BIND_SHARED (1 << 20)
|
|
|
|
|
|
|
|
enum virgl_renderer_structure_type_v0 {
|
|
|
|
VIRGL_RENDERER_STRUCTURE_TYPE_NONE = 0,
|
|
|
|
VIRGL_RENDERER_STRUCTURE_TYPE_EXPORT_QUERY = (1 << 0),
|
|
|
|
VIRGL_RENDERER_STRUCTURE_TYPE_SUPPORTED_STRUCTURES = (1 << 1),
|
|
|
|
};
|
|
|
|
|
|
|
|
struct virgl_renderer_resource_create_args {
|
|
|
|
uint32_t handle;
|
|
|
|
uint32_t target;
|
|
|
|
uint32_t format;
|
|
|
|
uint32_t bind;
|
|
|
|
uint32_t width;
|
|
|
|
uint32_t height;
|
|
|
|
uint32_t depth;
|
|
|
|
uint32_t array_size;
|
|
|
|
uint32_t last_level;
|
|
|
|
uint32_t nr_samples;
|
|
|
|
uint32_t flags;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct virgl_renderer_hdr {
|
|
|
|
uint32_t stype;
|
|
|
|
uint32_t stype_version;
|
|
|
|
uint32_t size;
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* "out_num_fds" represents the number of distinct kernel buffers backing an
|
|
|
|
* allocation. If this number or 'out_fourcc' is zero, the resource is not
|
|
|
|
* exportable. The "out_fds" field will be populated with "out_num_fds" file
|
|
|
|
* descriptors if "in_export_fds" is non-zero.
|
|
|
|
*/
|
|
|
|
struct virgl_renderer_export_query {
|
|
|
|
struct virgl_renderer_hdr hdr;
|
|
|
|
uint32_t in_resource_id;
|
|
|
|
|
|
|
|
uint32_t out_num_fds;
|
|
|
|
uint32_t in_export_fds;
|
|
|
|
uint32_t out_fourcc;
|
|
|
|
uint32_t pad;
|
|
|
|
|
|
|
|
int32_t out_fds[4];
|
|
|
|
uint32_t out_strides[4];
|
|
|
|
uint32_t out_offsets[4];
|
|
|
|
uint64_t out_modifier;
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* "out_supported_structures_mask" is a bitmask representing the structures that
|
|
|
|
* virglrenderer knows how to handle for a given "in_stype_version".
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct virgl_renderer_supported_structures {
|
|
|
|
struct virgl_renderer_hdr hdr;
|
|
|
|
uint32_t in_stype_version;
|
|
|
|
uint32_t out_supported_structures_mask;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* new API */
|
|
|
|
/* This typedef must be kept in sync with vrend_debug.h */
|
|
|
|
typedef void (*virgl_debug_callback_type)(const char *fmt, va_list ap);
|
|
|
|
|
|
|
|
VIRGL_EXPORT int virgl_renderer_resource_create(struct virgl_renderer_resource_create_args *args, struct iovec *iov, uint32_t num_iovs);
|
|
|
|
VIRGL_EXPORT int virgl_renderer_resource_import_eglimage(struct virgl_renderer_resource_create_args *args, void *image);
|
|
|
|
VIRGL_EXPORT void virgl_renderer_resource_unref(uint32_t res_handle);
|
|
|
|
|
|
|
|
VIRGL_EXPORT void virgl_renderer_resource_set_priv(uint32_t res_handle, void *priv);
|
|
|
|
VIRGL_EXPORT void *virgl_renderer_resource_get_priv(uint32_t res_handle);
|
|
|
|
|
|
|
|
VIRGL_EXPORT int virgl_renderer_context_create(uint32_t handle, uint32_t nlen, const char *name);
|
|
|
|
VIRGL_EXPORT void virgl_renderer_context_destroy(uint32_t handle);
|
|
|
|
|
|
|
|
VIRGL_EXPORT int virgl_renderer_submit_cmd(void *buffer,
|
|
|
|
int ctx_id,
|
|
|
|
int ndw);
|
|
|
|
|
|
|
|
VIRGL_EXPORT int virgl_renderer_transfer_read_iov(uint32_t handle, uint32_t ctx_id,
|
|
|
|
uint32_t level, uint32_t stride,
|
|
|
|
uint32_t layer_stride,
|
|
|
|
struct virgl_box *box,
|
|
|
|
uint64_t offset, struct iovec *iov,
|
|
|
|
int iovec_cnt);
|
|
|
|
|
|
|
|
VIRGL_EXPORT int virgl_renderer_transfer_write_iov(uint32_t handle,
|
|
|
|
uint32_t ctx_id,
|
|
|
|
int level,
|
|
|
|
uint32_t stride,
|
|
|
|
uint32_t layer_stride,
|
|
|
|
struct virgl_box *box,
|
|
|
|
uint64_t offset,
|
|
|
|
struct iovec *iovec,
|
|
|
|
unsigned int iovec_cnt);
|
|
|
|
|
|
|
|
VIRGL_EXPORT void virgl_renderer_get_cap_set(uint32_t set, uint32_t *max_ver,
|
|
|
|
uint32_t *max_size);
|
|
|
|
|
|
|
|
VIRGL_EXPORT void virgl_renderer_fill_caps(uint32_t set, uint32_t version,
|
|
|
|
void *caps);
|
|
|
|
|
|
|
|
VIRGL_EXPORT int virgl_renderer_resource_attach_iov(int res_handle, struct iovec *iov,
|
|
|
|
int num_iovs);
|
|
|
|
VIRGL_EXPORT void virgl_renderer_resource_detach_iov(int res_handle, struct iovec **iov, int *num_iovs);
|
|
|
|
|
|
|
|
VIRGL_EXPORT int virgl_renderer_create_fence(int client_fence_id, uint32_t ctx_id);
|
|
|
|
|
|
|
|
VIRGL_EXPORT void virgl_renderer_force_ctx_0(void);
|
|
|
|
|
|
|
|
VIRGL_EXPORT void virgl_renderer_ctx_attach_resource(int ctx_id, int res_handle);
|
|
|
|
VIRGL_EXPORT void virgl_renderer_ctx_detach_resource(int ctx_id, int res_handle);
|
|
|
|
|
|
|
|
VIRGL_EXPORT virgl_debug_callback_type virgl_set_debug_callback(virgl_debug_callback_type cb);
|
|
|
|
|
|
|
|
/* return information about a resource */
|
|
|
|
|
|
|
|
struct virgl_renderer_resource_info {
|
|
|
|
uint32_t handle;
|
|
|
|
uint32_t virgl_format;
|
|
|
|
uint32_t width;
|
|
|
|
uint32_t height;
|
|
|
|
uint32_t depth;
|
|
|
|
uint32_t flags;
|
|
|
|
uint32_t tex_id;
|
|
|
|
uint32_t stride;
|
|
|
|
int drm_fourcc;
|
|
|
|
};
|
|
|
|
|
|
|
|
VIRGL_EXPORT int virgl_renderer_resource_get_info(int res_handle,
|
|
|
|
struct virgl_renderer_resource_info *info);
|
|
|
|
|
|
|
|
VIRGL_EXPORT void virgl_renderer_cleanup(void *cookie);
|
|
|
|
|
|
|
|
/* reset the rendererer - destroy all contexts and resource */
|
|
|
|
VIRGL_EXPORT void virgl_renderer_reset(void);
|
renderer: use a thread to block for fences.
Instead of polling the fences regularly, have a thread
that blocks for a single fence using a separate shared
context, then uses eventfd to wake up the main thread
when something happens.
Inside the guest, glmark2 typicially runs twice as fast with the thread
sync. Although in general, the performances seems to be about +30%. The
benefits is mostly for CPU-bounds tasks (when main the thread hits 100%)
A naive perf stat of the vtest renderer with glmark2 "build" test with a
fixed number of frames (500) results in the following stats data:
(do not value timing related informations, since the renderer is ran and
stopped manually)
without thread:
3032.282265 task-clock (msec) # 0.420 CPUs utilized
4,277 context-switches # 0.001 M/sec
102 cpu-migrations # 0.034 K/sec
9,020 page-faults # 0.003 M/sec
7,884,098,254 cycles # 2.600 GHz
4,440,126,451 stalled-cycles-frontend # 56.32% frontend cycles idle
<not supported> stalled-cycles-backend
11,024,091,578 instructions # 1.40 insns per cycle
# 0.40 stalled
# cycles per insn
1,091,831,588 branches # 360.069 M/sec
5,426,846 branch-misses # 0.50% of all branches
with thread:
3403.592921 task-clock (msec) # 0.452 CPUs utilized
7,145 context-switches # 0.002 M/sec
410 cpu-migrations # 0.120 K/sec
6,191 page-faults # 0.002 M/sec
7,475,038,064 cycles # 2.196 GHz
4,487,043,071 stalled-cycles-frontend # 60.03% frontend cycles idle
<not supported> stalled-cycles-backend
9,925,205,494 instructions # 1.33 insns per cycle
# 0.45 stalled
# cycles per insn
834,375,503 branches # 245.146 M/sec
4,919,995 branch-misses # 0.59% of all branches
Signed-off-by: Marc-André Lureau <marcandre.lureau@gmail.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
9 years ago
|
|
|
|
|
|
|
VIRGL_EXPORT int virgl_renderer_get_poll_fd(void);
|
|
|
|
|
|
|
|
VIRGL_EXPORT int virgl_renderer_execute(void *execute_args, uint32_t execute_size);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* These are unstable APIs for development only. Use these for development/testing purposes
|
|
|
|
* only, not in production
|
|
|
|
*/
|
|
|
|
#ifdef VIRGL_RENDERER_UNSTABLE_APIS
|
|
|
|
|
|
|
|
#define VIRGL_RENDERER_BLOB_MEM_GUEST 0x0001
|
|
|
|
#define VIRGL_RENDERER_BLOB_MEM_HOST3D 0x0002
|
|
|
|
#define VIRGL_RENDERER_BLOB_MEM_HOST3D_GUEST 0x0003
|
|
|
|
|
|
|
|
#define VIRGL_RENDERER_BLOB_FLAG_USE_MAPPABLE 0x0001
|
|
|
|
#define VIRGL_RENDERER_BLOB_FLAG_USE_SHAREABLE 0x0002
|
|
|
|
#define VIRGL_RENDERER_BLOB_FLAG_USE_CROSS_DEVICE 0x0004
|
|
|
|
|
|
|
|
struct virgl_renderer_resource_create_blob_args
|
|
|
|
{
|
|
|
|
uint32_t res_handle;
|
|
|
|
uint32_t ctx_id;
|
|
|
|
uint32_t blob_mem;
|
|
|
|
uint32_t blob_flags;
|
|
|
|
uint64_t blob_id;
|
|
|
|
uint64_t size;
|
|
|
|
const struct iovec *iovecs;
|
|
|
|
uint32_t num_iovs;
|
|
|
|
};
|
|
|
|
|
|
|
|
VIRGL_EXPORT int
|
|
|
|
virgl_renderer_resource_create_blob(const struct virgl_renderer_resource_create_blob_args *args);
|
|
|
|
|
|
|
|
VIRGL_EXPORT int virgl_renderer_resource_map(uint32_t res_handle, void **map, uint64_t *out_size);
|
|
|
|
|
|
|
|
VIRGL_EXPORT int virgl_renderer_resource_unmap(uint32_t res_handle);
|
|
|
|
|
|
|
|
#define VIRGL_RENDERER_MAP_CACHE_MASK 0x0f
|
|
|
|
#define VIRGL_RENDERER_MAP_CACHE_NONE 0x00
|
|
|
|
#define VIRGL_RENDERER_MAP_CACHE_CACHED 0x01
|
|
|
|
#define VIRGL_RENDERER_MAP_CACHE_UNCACHED 0x02
|
|
|
|
#define VIRGL_RENDERER_MAP_CACHE_WC 0x03
|
|
|
|
|
|
|
|
VIRGL_EXPORT int virgl_renderer_resource_get_map_info(uint32_t res_handle, uint32_t *map_info);
|
|
|
|
|
|
|
|
#endif /* VIRGL_RENDERER_UNSTABLE_APIS */
|
|
|
|
|
|
|
|
#endif
|