virgl: add support for EGL device selection

and init flag for preferring discrete GPU. Modify GBM selection to
prefer integrated GPU for display. Report "different GPU" back to
client to enable drawable shadowing. Allocate linear GBM buffers to make
them shareable between different devices.

Reviewed-by: Gert Wollny <gert.wollny@collabora.com>
macos/master
Dominik Behr 5 years ago
parent 4acd9f0e34
commit 1c9a7a52e0
  1. 1
      src/virgl_hw.h
  2. 8
      src/vrend_renderer.c
  3. 13
      src/vrend_winsys.c
  4. 1
      src/vrend_winsys.h
  5. 161
      src/vrend_winsys_egl.c
  6. 1
      src/vrend_winsys_egl.h
  7. 5
      src/vrend_winsys_gbm.c
  8. 37
      src/vrend_winsys_gbm.h

@ -440,6 +440,7 @@ enum virgl_formats {
#define VIRGL_CAP_V2_VIDEO_MEMORY (1 << 2)
#define VIRGL_CAP_V2_MEMINFO (1 << 3)
#define VIRGL_CAP_V2_STRING_MARKER (1 << 4)
#define VIRGL_CAP_V2_DIFFERENT_GPU (1 << 5)
/* virgl bind flags - these are compatible with mesa 10.5 gallium.
* but are fixed, no other should be passed to virgl either.

@ -6860,6 +6860,8 @@ static void vrend_resource_gbm_init(struct vrend_resource *gr, uint32_t format)
uint32_t gbm_format = 0;
if (virgl_gbm_convert_format(&format, &gbm_format))
return;
if (vrend_winsys_different_gpu())
gbm_flags |= GBM_BO_USE_LINEAR;
if (gr->base.depth0 != 1 || gr->base.last_level != 0 || gr->base.nr_samples != 0)
return;
@ -10444,7 +10446,8 @@ static void vrend_renderer_fill_caps_v2(int gl_ver, int gles_ver, union virgl_c
#ifdef ENABLE_MINIGBM_ALLOCATION
if (has_feature(feat_memory_object) && has_feature(feat_memory_object_fd)) {
if (!strcmp(gbm_device_get_backend_name(gbm->device), "i915"))
if (!strcmp(gbm_device_get_backend_name(gbm->device), "i915") &&
!vrend_winsys_different_gpu())
caps->v2.capability_bits |= VIRGL_CAP_ARB_BUFFER_STORAGE;
}
#endif
@ -10469,6 +10472,9 @@ static void vrend_renderer_fill_caps_v2(int gl_ver, int gles_ver, union virgl_c
if (has_feature(feat_khr_debug))
caps->v2.capability_bits_v2 |= VIRGL_CAP_V2_STRING_MARKER;
if (vrend_winsys_different_gpu())
caps->v2.capability_bits_v2 |= VIRGL_CAP_V2_DIFFERENT_GPU;
}
void vrend_renderer_fill_caps(uint32_t set, uint32_t version,

@ -207,3 +207,16 @@ uint32_t vrend_winsys_query_video_memory(void)
return 0;
#endif
}
/* different_gpu means that GBM and GL renderer are on two different DRM devices.
* Linear buffers are used for scanouts to make them shareable.
* Advise the client to use drawable shadowing for performance.
*/
bool vrend_winsys_different_gpu(void)
{
#ifdef HAVE_EPOXY_EGL_H
if (egl)
return virgl_egl_different_gpu(egl);
#endif
return false;
}

@ -59,5 +59,6 @@ int vrend_winsys_get_fd_for_texture(uint32_t tex_id, int *fd);
int vrend_winsys_get_fd_for_texture2(uint32_t tex_id, int *fd, int *stride, int *offset);
uint32_t vrend_winsys_query_video_memory(void);
bool vrend_winsys_different_gpu(void);
#endif /* VREND_WINSYS_H */

@ -76,12 +76,16 @@ struct virgl_egl {
EGLContext egl_ctx;
uint32_t extension_bits;
EGLSyncKHR signaled_fence;
bool different_gpu;
};
static bool virgl_egl_has_extension_in_string(const char *haystack, const char *needle)
{
const unsigned needle_len = strlen(needle);
if (!haystack)
return false;
if (needle_len == 0)
return false;
@ -122,6 +126,148 @@ static int virgl_egl_init_extensions(struct virgl_egl *egl, const char *extensio
return 0;
}
#ifdef ENABLE_MINIGBM_ALLOCATION
struct egl_funcs {
PFNEGLGETPLATFORMDISPLAYEXTPROC eglGetPlatformDisplay;
PFNEGLQUERYDEVICESEXTPROC eglQueryDevices;
PFNEGLQUERYDEVICESTRINGEXTPROC eglQueryDeviceString;
};
static bool virgl_egl_get_interface(struct egl_funcs *funcs)
{
const char *client_extensions = eglQueryString (NULL, EGL_EXTENSIONS);
assert(funcs);
if (virgl_egl_has_extension_in_string(client_extensions, "EGL_KHR_platform_base")) {
funcs->eglGetPlatformDisplay =
(PFNEGLGETPLATFORMDISPLAYEXTPROC) eglGetProcAddress ("eglGetPlatformDisplay");
} else if (virgl_egl_has_extension_in_string(client_extensions, "EGL_EXT_platform_base")) {
funcs->eglGetPlatformDisplay =
(PFNEGLGETPLATFORMDISPLAYEXTPROC) eglGetProcAddress ("eglGetPlatformDisplayEXT");
}
if (!funcs->eglGetPlatformDisplay)
return false;
if (!virgl_egl_has_extension_in_string(client_extensions, "EGL_EXT_platform_device"))
return false;
if (!virgl_egl_has_extension_in_string(client_extensions, "EGL_EXT_device_enumeration"))
return false;
funcs->eglQueryDevices = (PFNEGLQUERYDEVICESEXTPROC)eglGetProcAddress ("eglQueryDevicesEXT");
if (!funcs->eglQueryDevices)
return false;
if (!virgl_egl_has_extension_in_string(client_extensions, "EGL_EXT_device_query"))
return false;
funcs->eglQueryDeviceString = (PFNEGLQUERYDEVICESTRINGEXTPROC)eglGetProcAddress("eglQueryDeviceStringEXT");
if (!funcs->eglQueryDeviceString)
return false;
return true;
}
static EGLint virgl_egl_find_3d_device(struct gbm_device_info *dev_infos, EGLint num_devices, uint32_t flags)
{
EGLint d;
for (d = 0; d < num_devices; d++) {
if ((dev_infos[d].dev_type_flags & flags) == flags
&& dev_infos[d].dev_type_flags & GBM_DEV_TYPE_FLAG_3D)
return d;
}
return -1;
}
static EGLint virgl_egl_find_matching_device(struct gbm_device_info *dev_infos, EGLint num_devices, int dri_node_num)
{
EGLint d;
for (d = 0; d < num_devices; d++) {
if (dev_infos[d].dri_node_num == dri_node_num)
return d;
}
return -1;
}
static EGLDeviceEXT virgl_egl_get_device(struct virgl_egl *egl, struct egl_funcs *funcs) {
EGLint num_devices = 0;
EGLint max_devices = 64;
EGLDeviceEXT devices[64];
struct gbm_device_info dev_infos[64];
struct gbm_device_info gbm_dev_info;
EGLint device_num = -1;
EGLint d;
if (gbm_detect_device_info(0, gbm_device_get_fd(egl->gbm->device), &gbm_dev_info) < 0)
return EGL_NO_DEVICE_EXT;
if (!funcs->eglQueryDevices(max_devices, devices, &num_devices))
return EGL_NO_DEVICE_EXT;
/* We query EGL_DRM_DEVICE_FILE_EXT without checking EGL_EXT_device_drm extension,
* we just get NULL when it is not available. Otherwise we would have to query it
* after initializing display for every device.
*/
for (d = 0; d < num_devices; d++) {
const char *dev_node = funcs->eglQueryDeviceString(devices[d], EGL_DRM_DEVICE_FILE_EXT);
memset(&dev_infos[d], 0, sizeof(dev_infos[d]));
if (dev_node) {
if (gbm_detect_device_info_path(0, dev_node, dev_infos+d) < 0)
return false;
} else {
dev_infos[d].dri_node_num = -1;
}
}
if (getenv("VIRGL_PREFER_DGPU"))
/* Find a discrete GPU. */
device_num = virgl_egl_find_3d_device(dev_infos, num_devices, GBM_DEV_TYPE_FLAG_DISCRETE);
if (device_num >= 0) {
egl->different_gpu = dev_infos[device_num].dri_node_num != gbm_dev_info.dri_node_num;
} else if (gbm_dev_info.dev_type_flags & GBM_DEV_TYPE_FLAG_ARMSOC) {
/* Find 3D device on ARM SOC. */
device_num = virgl_egl_find_3d_device(dev_infos, num_devices, GBM_DEV_TYPE_FLAG_ARMSOC);
}
if (device_num < 0) {
/* Try to match GBM device. */
device_num = virgl_egl_find_matching_device(dev_infos, num_devices, gbm_dev_info.dri_node_num);
}
if (device_num < 0)
return EGL_NO_DEVICE_EXT;
return devices[device_num];
}
static bool virgl_egl_get_display(struct virgl_egl *egl)
{
struct egl_funcs funcs = { 0 };
EGLDeviceEXT device;
if (!egl->gbm)
return false;
if (!virgl_egl_get_interface(&funcs))
return false;
device = virgl_egl_get_device(egl, &funcs);
if (device == EGL_NO_DEVICE_EXT)
return false;
egl->egl_display = funcs.eglGetPlatformDisplay(EGL_PLATFORM_DEVICE_EXT, device, NULL);
return true;
}
#endif /* ENABLE_MINIGBM_ALLOCATION */
struct virgl_egl *virgl_egl_init(struct virgl_gbm *gbm, bool surfaceless, bool gles)
{
static EGLint conf_att[] = {
@ -156,9 +302,15 @@ struct virgl_egl *virgl_egl_init(struct virgl_gbm *gbm, bool surfaceless, bool g
goto fail;
egl->gbm = gbm;
egl->different_gpu = false;
const char *client_extensions = eglQueryString (NULL, EGL_EXTENSIONS);
if (client_extensions && strstr(client_extensions, "EGL_KHR_platform_base")) {
#ifdef ENABLE_MINIGBM_ALLOCATION
if (virgl_egl_get_display(egl)) {
/* Make -Wdangling-else happy. */
} else /* Fallback to surfaceless. */
#endif
if (virgl_egl_has_extension_in_string(client_extensions, "EGL_KHR_platform_base")) {
PFNEGLGETPLATFORMDISPLAYEXTPROC get_platform_display =
(PFNEGLGETPLATFORMDISPLAYEXTPROC) eglGetProcAddress ("eglGetPlatformDisplay");
@ -171,7 +323,7 @@ struct virgl_egl *virgl_egl_init(struct virgl_gbm *gbm, bool surfaceless, bool g
} else
egl->egl_display = get_platform_display (EGL_PLATFORM_GBM_KHR,
(EGLNativeDisplayType)egl->gbm->device, NULL);
} else if (client_extensions && strstr(client_extensions, "EGL_EXT_platform_base")) {
} else if (virgl_egl_has_extension_in_string(client_extensions, "EGL_EXT_platform_base")) {
PFNEGLGETPLATFORMDISPLAYEXTPROC get_platform_display =
(PFNEGLGETPLATFORMDISPLAYEXTPROC) eglGetProcAddress ("eglGetPlatformDisplayEXT");
@ -584,3 +736,8 @@ bool virgl_egl_export_fence(struct virgl_egl *egl, EGLSyncKHR fence, int *out_fd
*out_fd = eglDupNativeFenceFDANDROID(egl->egl_display, fence);
return *out_fd != EGL_NO_NATIVE_FENCE_FD_ANDROID;
}
bool virgl_egl_different_gpu(struct virgl_egl *egl)
{
return egl->different_gpu;
}

@ -76,4 +76,5 @@ void virgl_egl_fence_destroy(struct virgl_egl *egl, EGLSyncKHR fence);
bool virgl_egl_client_wait_fence(struct virgl_egl *egl, EGLSyncKHR fence, uint64_t timeout);
bool virgl_egl_export_signaled_fence(struct virgl_egl *egl, int *out_fd);
bool virgl_egl_export_fence(struct virgl_egl *egl, EGLSyncKHR fence, int *out_fd);
bool virgl_egl_different_gpu(struct virgl_egl *egl);
#endif

@ -268,6 +268,10 @@ struct virgl_gbm *virgl_gbm_init(int fd)
gbm->fd = -1;
if (fd < 0) {
#ifdef ENABLE_MINIGBM_ALLOCATION
gbm->fd = gbm_get_default_device_fd();
if (gbm->fd < 0)
#endif
gbm->fd = rendernode_open();
if (gbm->fd < 0)
goto out_error;
@ -281,6 +285,7 @@ struct virgl_gbm *virgl_gbm_init(int fd)
gbm->device = gbm_create_device(fd);
if (!gbm->device)
goto out_error;
gbm->fd = fd;
}
return gbm;

@ -42,7 +42,7 @@
#ifndef MINIGBM
#define GBM_BO_USE_TEXTURING (1 << 5),
#define GBM_BO_USE_TEXTURING (1 << 5)
#define GBM_BO_USE_CAMERA_WRITE (1 << 6)
#define GBM_BO_USE_CAMERA_READ (1 << 7)
#define GBM_BO_USE_PROTECTED (1 << 8)
@ -56,6 +56,41 @@
#endif
#ifdef ENABLE_MINIGBM_ALLOCATION
#define GBM_DEV_TYPE_FLAG_DISCRETE (1u << 0) /* Discrete GPU. Separate chip, dedicated VRAM. */
#define GBM_DEV_TYPE_FLAG_DISPLAY (1u << 1) /* Device capable of display. */
#define GBM_DEV_TYPE_FLAG_3D (1u << 2) /* Device capable or 3D rendering. */
#define GBM_DEV_TYPE_FLAG_ARMSOC (1u << 3) /* Device on ARM SOC. */
#define GBM_DEV_TYPE_FLAG_USB (1u << 4) /* USB device, udl, evdi. */
#define GBM_DEV_TYPE_FLAG_BLOCKED (1u << 5) /* Unsuitable device e.g. vgem, udl, evdi. */
#define GBM_DEV_TYPE_FLAG_INTERNAL_LCD (1u << 6) /* Device is driving internal LCD. */
struct gbm_device_info {
uint32_t dev_type_flags;
int dri_node_num; /* DRI node number (0..63), for easy matching of devices. */
unsigned int connectors;
unsigned int connected;
};
#define GBM_DETECT_FLAG_CONNECTED (1u << 0) /* Check if any connectors are connected. SLOW! */
#ifdef MINIGBM
int gbm_detect_device_info(unsigned int detect_flags, int fd, struct gbm_device_info *info);
int gbm_detect_device_info_path(unsigned int detect_flags, const char *dev_node,
struct gbm_device_info *info);
/*
* Select "default" device to use for graphics memory allocator.
*/
int gbm_get_default_device_fd(void);
#else
#define gbm_detect_device_info(detect_flags, fd, info) -1
#define gbm_detect_device_info_path(detect_flags, dev_node, info) -1
#define gbm_get_default_device_fd() -1
#endif /* MINIGBM */
#endif /* ENABLE_MINIGBM_ALLOCATION */
/*
* If fd >= 0, virglrenderer owns the fd since it was opened via a rendernode
* query. If fd < 0, the gbm device was opened with the fd provided by the

Loading…
Cancel
Save