kwin/glxbackend.cpp

748 lines
25 KiB
C++
Raw Normal View History

2010-11-21 13:01:39 +00:00
/********************************************************************
KWin - the KDE window manager
This file is part of the KDE project.
Copyright (C) 2006 Lubos Lunak <l.lunak@kde.org>
Copyright (C) 2012 Martin Gräßlin <mgraesslin@kde.org>
2010-11-21 13:01:39 +00:00
Based on glcompmgr code by Felix Bellaby.
Using code from Compiz and Beryl.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*********************************************************************/
// TODO: cmake magic
#ifndef KWIN_HAVE_OPENGLES
// own
#include "glxbackend.h"
// kwin
#include "options.h"
#include "utils.h"
#include "overlaywindow.h"
// kwin libs
#include <kwinglplatform.h>
// Qt
#include <QDebug>
Better handling for making the compositing OpenGL context current With QtQuick2 it's possible that the scene graph rendering context either lives in an own thread or uses the main GUI thread. In the latter case it's the same thread as our compositing OpenGL context lives in. This means our basic assumption that between two rendering passes the context stays current does not hold. The code already ensured that before we start a rendering pass the context is made current, but there are many more possible cases. If we use OpenGL in areas not triggered by the rendering loop but in response to other events the context needs to be made current. This includes the loading and unloading of effects (some effects use OpenGL in the static effect check, in the ctor and dtor), background loading of texture data, lazy loading after first usage invoked by shortcut, etc. etc. To properly handle these cases new methods are added to EffectsHandler to make the compositing OpenGL context current. These calls delegate down into the scene. On non-OpenGL scenes they are noop, but on OpenGL they go into the backend and make the context current. In addition they ensure that Qt doesn't think that it's QOpenGLContext is current by calling doneCurrent() on the QOpenGLContext::currentContext(). This unfortunately causes an additional call to makeCurrent with a null context, but there is no other way to tell Qt - it doesn't notice when a different context is made current with low level API calls. In the multi-threaded architecture this doesn't matter as ::currentContext() returns null. A short evaluation showed that a transition to QOpenGLContext doesn't seem feasible. Qt only supports either GLX or EGL while KWin supports both and when entering the transition phase for Wayland, it would become extremely tricky if our native platform is X11, but we want a Wayland EGL context. A future solution might be to have a "KWin-QPA plugin" which uses either xcb or Wayland and hides everything from Qt. The API documentation is extended to describe when the effects-framework ensures that an OpenGL context is current. The effects are changed to make the context current in cases where it's not guaranteed. This has been done by looking for creation or deletion of GLTextures and Shaders. If there are other OpenGL usages outside the rendering loop, ctor/dtor this needs to be changed, too.
2013-11-22 14:05:36 +00:00
#include <QOpenGLContext>
// system
#include <unistd.h>
namespace KWin
{
GlxBackend::GlxBackend()
: OpenGLBackend()
, m_overlayWindow(new OverlayWindow())
, window(None)
, fbconfig(NULL)
, glxWindow(None)
2013-07-22 13:39:54 +00:00
, ctx(nullptr)
, m_bufferAge(0)
, haveSwapInterval(false)
{
init();
}
GlxBackend::~GlxBackend()
{
if (isFailed()) {
m_overlayWindow->destroy();
}
// TODO: cleanup in error case
// do cleanup after initBuffer()
cleanupGL();
checkGLError("Cleanup");
Better handling for making the compositing OpenGL context current With QtQuick2 it's possible that the scene graph rendering context either lives in an own thread or uses the main GUI thread. In the latter case it's the same thread as our compositing OpenGL context lives in. This means our basic assumption that between two rendering passes the context stays current does not hold. The code already ensured that before we start a rendering pass the context is made current, but there are many more possible cases. If we use OpenGL in areas not triggered by the rendering loop but in response to other events the context needs to be made current. This includes the loading and unloading of effects (some effects use OpenGL in the static effect check, in the ctor and dtor), background loading of texture data, lazy loading after first usage invoked by shortcut, etc. etc. To properly handle these cases new methods are added to EffectsHandler to make the compositing OpenGL context current. These calls delegate down into the scene. On non-OpenGL scenes they are noop, but on OpenGL they go into the backend and make the context current. In addition they ensure that Qt doesn't think that it's QOpenGLContext is current by calling doneCurrent() on the QOpenGLContext::currentContext(). This unfortunately causes an additional call to makeCurrent with a null context, but there is no other way to tell Qt - it doesn't notice when a different context is made current with low level API calls. In the multi-threaded architecture this doesn't matter as ::currentContext() returns null. A short evaluation showed that a transition to QOpenGLContext doesn't seem feasible. Qt only supports either GLX or EGL while KWin supports both and when entering the transition phase for Wayland, it would become extremely tricky if our native platform is X11, but we want a Wayland EGL context. A future solution might be to have a "KWin-QPA plugin" which uses either xcb or Wayland and hides everything from Qt. The API documentation is extended to describe when the effects-framework ensures that an OpenGL context is current. The effects are changed to make the context current in cases where it's not guaranteed. This has been done by looking for creation or deletion of GLTextures and Shaders. If there are other OpenGL usages outside the rendering loop, ctor/dtor this needs to be changed, too.
2013-11-22 14:05:36 +00:00
doneCurrent();
if (ctx)
glXDestroyContext(display(), ctx);
if (glxWindow)
glXDestroyWindow(display(), glxWindow);
if (window)
XDestroyWindow(display(), window);
overlayWindow()->destroy();
delete m_overlayWindow;
}
static bool gs_tripleBufferUndetected = true;
static bool gs_tripleBufferNeedsDetection = false;
void GlxBackend::init()
2011-01-30 14:34:42 +00:00
{
2010-11-21 13:01:39 +00:00
initGLX();
// require at least GLX 1.3
if (!hasGLXVersion(1, 3)) {
setFailed(QStringLiteral("Requires at least GLX 1.3"));
return;
2011-01-30 14:34:42 +00:00
}
if (!initDrawableConfigs()) {
setFailed(QStringLiteral("Could not initialize the drawable configs"));
2010-11-21 13:01:39 +00:00
return;
2011-01-30 14:34:42 +00:00
}
if (!initBuffer()) {
setFailed(QStringLiteral("Could not initialize the buffer"));
return;
2011-01-30 14:34:42 +00:00
}
if (!initRenderingContext()) {
setFailed(QStringLiteral("Could not initialize rendering context"));
return;
}
// Initialize OpenGL
GLPlatform *glPlatform = GLPlatform::instance();
glPlatform->detect(GlxPlatformInterface);
if (GLPlatform::instance()->driver() == Driver_Intel)
options->setUnredirectFullscreen(false); // bug #252817
options->setGlPreferBufferSwap(options->glPreferBufferSwap()); // resolve autosetting
if (options->glPreferBufferSwap() == Options::AutoSwapStrategy)
options->setGlPreferBufferSwap('e'); // for unknown drivers - should not happen
glPlatform->printResults();
initGL(GlxPlatformInterface);
2010-11-21 13:01:39 +00:00
// Check whether certain features are supported
haveSwapInterval = glXSwapIntervalMESA || glXSwapIntervalEXT || glXSwapIntervalSGI;
setSupportsBufferAge(false);
if (hasGLExtension("GLX_EXT_buffer_age")) {
const QByteArray useBufferAge = qgetenv("KWIN_USE_BUFFER_AGE");
if (useBufferAge != "0")
setSupportsBufferAge(true);
}
setSyncsToVBlank(false);
setBlocksForRetrace(false);
haveWaitSync = false;
gs_tripleBufferNeedsDetection = false;
m_swapProfiler.init();
const bool wantSync = options->glPreferBufferSwap() != Options::NoSwapEncourage;
if (wantSync && glXIsDirect(display(), ctx)) {
if (haveSwapInterval) { // glXSwapInterval is preferred being more reliable
setSwapInterval(1);
setSyncsToVBlank(true);
const QByteArray tripleBuffer = qgetenv("KWIN_TRIPLE_BUFFER");
if (!tripleBuffer.isEmpty()) {
setBlocksForRetrace(qstrcmp(tripleBuffer, "0") == 0);
gs_tripleBufferUndetected = false;
}
gs_tripleBufferNeedsDetection = gs_tripleBufferUndetected;
} else if (glXGetVideoSync) {
unsigned int sync;
if (glXGetVideoSync(&sync) == 0 && glXWaitVideoSync(1, 0, &sync) == 0) {
setSyncsToVBlank(true);
setBlocksForRetrace(true);
haveWaitSync = true;
} else
qWarning() << "NO VSYNC! glXSwapInterval is not supported, glXWaitVideoSync is supported but broken";
2011-01-30 14:34:42 +00:00
} else
qWarning() << "NO VSYNC! neither glSwapInterval nor glXWaitVideoSync are supported";
} else {
// disable v-sync (if possible)
setSwapInterval(0);
2011-01-30 14:34:42 +00:00
}
if (glPlatform->isVirtualBox()) {
// VirtualBox does not support glxQueryDrawable
// this should actually be in kwinglutils_funcs, but QueryDrawable seems not to be provided by an extension
// and the GLPlatform has not been initialized at the moment when initGLX() is called.
glXQueryDrawable = NULL;
}
setIsDirectRendering(bool(glXIsDirect(display(), ctx)));
qDebug() << "Direct rendering:" << isDirectRendering() << endl;
2011-01-30 14:34:42 +00:00
}
2010-11-21 13:01:39 +00:00
bool GlxBackend::initRenderingContext()
2011-01-30 14:34:42 +00:00
{
const bool direct = true;
// Use glXCreateContextAttribsARB() when it's available
if (glXCreateContextAttribsARB) {
const int attribs_31_core_robustness[] = {
GLX_CONTEXT_MAJOR_VERSION_ARB, 3,
GLX_CONTEXT_MINOR_VERSION_ARB, 1,
GLX_CONTEXT_FLAGS_ARB, GLX_CONTEXT_ROBUST_ACCESS_BIT_ARB,
GLX_CONTEXT_RESET_NOTIFICATION_STRATEGY_ARB, GLX_LOSE_CONTEXT_ON_RESET_ARB,
0
};
const int attribs_31_core[] = {
GLX_CONTEXT_MAJOR_VERSION_ARB, 3,
GLX_CONTEXT_MINOR_VERSION_ARB, 1,
0
};
const int attribs_legacy_robustness[] = {
GLX_CONTEXT_FLAGS_ARB, GLX_CONTEXT_ROBUST_ACCESS_BIT_ARB,
GLX_CONTEXT_RESET_NOTIFICATION_STRATEGY_ARB, GLX_LOSE_CONTEXT_ON_RESET_ARB,
0
};
const int attribs_legacy[] = {
GLX_CONTEXT_MAJOR_VERSION_ARB, 1,
GLX_CONTEXT_MINOR_VERSION_ARB, 2,
0
};
const bool have_robustness = hasGLExtension(QStringLiteral("GLX_ARB_create_context_robustness"));
// Try to create a 3.1 context first
if (options->glCoreProfile()) {
if (have_robustness)
ctx = glXCreateContextAttribsARB(display(), fbconfig, 0, direct, attribs_31_core_robustness);
if (!ctx)
ctx = glXCreateContextAttribsARB(display(), fbconfig, 0, direct, attribs_31_core);
}
if (!ctx && have_robustness)
ctx = glXCreateContextAttribsARB(display(), fbconfig, 0, direct, attribs_legacy_robustness);
if (!ctx)
ctx = glXCreateContextAttribsARB(display(), fbconfig, 0, direct, attribs_legacy);
}
if (!ctx)
ctx = glXCreateNewContext(display(), fbconfig, GLX_RGBA_TYPE, NULL, direct);
if (!ctx) {
qDebug() << "Failed to create an OpenGL context.";
return false;
}
if (!glXMakeCurrent(display(), glxWindow, ctx)) {
qDebug() << "Failed to make the OpenGL context current.";
glXDestroyContext(display(), ctx);
ctx = 0;
return false;
2010-11-21 13:01:39 +00:00
}
2011-01-30 14:34:42 +00:00
return true;
}
2010-11-21 13:01:39 +00:00
bool GlxBackend::initBuffer()
2011-01-30 14:34:42 +00:00
{
if (!initFbConfig())
2010-11-21 13:01:39 +00:00
return false;
if (overlayWindow()->create()) {
// Try to create double-buffered window in the overlay
XVisualInfo* visual = glXGetVisualFromFBConfig(display(), fbconfig);
if (!visual) {
qCritical() << "Failed to get visual from fbconfig";
return false;
}
2010-11-21 13:01:39 +00:00
XSetWindowAttributes attrs;
2011-01-30 14:34:42 +00:00
attrs.colormap = XCreateColormap(display(), rootWindow(), visual->visual, AllocNone);
window = XCreateWindow(display(), overlayWindow()->window(), 0, 0, displayWidth(), displayHeight(),
0, visual->depth, InputOutput, visual->visual, CWColormap, &attrs);
glxWindow = glXCreateWindow(display(), fbconfig, window, NULL);
overlayWindow()->setup(window);
2011-01-30 14:34:42 +00:00
XFree(visual);
} else {
qCritical() << "Failed to create overlay window";
return false;
2011-01-30 14:34:42 +00:00
}
2010-11-21 13:01:39 +00:00
int vis_buffer;
glXGetFBConfigAttrib(display(), fbconfig, GLX_VISUAL_ID, &vis_buffer);
XVisualInfo* visinfo_buffer = glXGetVisualFromFBConfig(display(), fbconfig);
qDebug() << "Buffer visual (depth " << visinfo_buffer->depth << "): 0x" << QString::number(vis_buffer, 16);
2011-01-30 14:34:42 +00:00
XFree(visinfo_buffer);
2010-11-21 13:01:39 +00:00
return true;
2011-01-30 14:34:42 +00:00
}
2010-11-21 13:01:39 +00:00
bool GlxBackend::initFbConfig()
2011-01-30 14:34:42 +00:00
{
const int attribs[] = {
GLX_RENDER_TYPE, GLX_RGBA_BIT,
GLX_RED_SIZE, 1,
GLX_GREEN_SIZE, 1,
GLX_BLUE_SIZE, 1,
GLX_ALPHA_SIZE, 0,
GLX_DEPTH_SIZE, 0,
GLX_STENCIL_SIZE, 0,
GLX_CONFIG_CAVEAT, GLX_NONE,
GLX_DOUBLEBUFFER, true,
0
};
// Try to find a double buffered configuration
int count = 0;
GLXFBConfig *configs = glXChooseFBConfig(display(), DefaultScreen(display()), attribs, &count);
if (count > 0) {
fbconfig = configs[0];
XFree(configs);
2011-01-30 14:34:42 +00:00
}
if (fbconfig == NULL) {
qCritical() << "Failed to find a usable framebuffer configuration";
2010-11-21 13:01:39 +00:00
return false;
2011-01-30 14:34:42 +00:00
}
2011-01-30 14:34:42 +00:00
return true;
}
2010-11-21 13:01:39 +00:00
bool GlxBackend::initDrawableConfigs()
2011-01-30 14:34:42 +00:00
{
const int attribs[] = {
GLX_RENDER_TYPE, GLX_RGBA_BIT,
GLX_DRAWABLE_TYPE, GLX_WINDOW_BIT | GLX_PIXMAP_BIT,
GLX_X_VISUAL_TYPE, GLX_TRUE_COLOR,
GLX_X_RENDERABLE, True,
GLX_CONFIG_CAVEAT, int(GLX_DONT_CARE), // The ARGB32 visual is marked non-conformant in Catalyst
GLX_RED_SIZE, 5,
GLX_GREEN_SIZE, 5,
GLX_BLUE_SIZE, 5,
GLX_ALPHA_SIZE, 0,
GLX_STENCIL_SIZE, 0,
GLX_DEPTH_SIZE, 0,
0
};
int count = 0;
GLXFBConfig *configs = glXChooseFBConfig(display(), DefaultScreen(display()), attribs, &count);
if (count < 1) {
qCritical() << "Could not find any usable framebuffer configurations.";
return false;
}
2010-11-21 13:01:39 +00:00
2011-01-30 14:34:42 +00:00
for (int i = 0; i <= 32; i++) {
fbcdrawableinfo[i].fbconfig = NULL;
fbcdrawableinfo[i].bind_texture_format = 0;
fbcdrawableinfo[i].texture_targets = 0;
fbcdrawableinfo[i].y_inverted = 0;
fbcdrawableinfo[i].mipmap = 0;
}
// Find the first usable framebuffer configuration for each depth.
// Single-buffered ones will appear first in the list.
const int depths[] = { 15, 16, 24, 30, 32 };
for (unsigned int i = 0; i < sizeof(depths) / sizeof(depths[0]); i++) {
const int depth = depths[i];
for (int j = 0; j < count; j++) {
int alpha_size, buffer_size;
glXGetFBConfigAttrib(display(), configs[j], GLX_ALPHA_SIZE, &alpha_size);
glXGetFBConfigAttrib(display(), configs[j], GLX_BUFFER_SIZE, &buffer_size);
if (buffer_size != depth && (buffer_size - alpha_size) != depth)
continue;
if (depth == 32 && alpha_size != 8)
continue;
XVisualInfo *vi = glXGetVisualFromFBConfig(display(), configs[j]);
2011-01-30 14:34:42 +00:00
if (vi == NULL)
2010-11-21 13:01:39 +00:00
continue;
int visual_depth = vi->depth;
2011-01-30 14:34:42 +00:00
XFree(vi);
if (visual_depth != depth)
2010-11-21 13:01:39 +00:00
continue;
int bind_rgb, bind_rgba;
glXGetFBConfigAttrib(display(), configs[j], GLX_BIND_TO_TEXTURE_RGBA_EXT, &bind_rgba);
glXGetFBConfigAttrib(display(), configs[j], GLX_BIND_TO_TEXTURE_RGB_EXT, &bind_rgb);
// Skip this config if it cannot be bound to a texture
if (!bind_rgb && !bind_rgba)
2010-11-21 13:01:39 +00:00
continue;
int texture_format;
if (depth == 32)
texture_format = bind_rgba ? GLX_TEXTURE_FORMAT_RGBA_EXT : GLX_TEXTURE_FORMAT_RGB_EXT;
else
texture_format = bind_rgb ? GLX_TEXTURE_FORMAT_RGB_EXT : GLX_TEXTURE_FORMAT_RGBA_EXT;
int y_inverted, texture_targets;
glXGetFBConfigAttrib(display(), configs[j], GLX_BIND_TO_TEXTURE_TARGETS_EXT, &texture_targets);
glXGetFBConfigAttrib(display(), configs[j], GLX_Y_INVERTED_EXT, &y_inverted);
fbcdrawableinfo[depth].fbconfig = configs[j];
fbcdrawableinfo[depth].bind_texture_format = texture_format;
fbcdrawableinfo[depth].texture_targets = texture_targets;
fbcdrawableinfo[depth].y_inverted = y_inverted;
fbcdrawableinfo[depth].mipmap = 0;
break;
2010-11-21 13:01:39 +00:00
}
2011-01-30 14:34:42 +00:00
}
if (count)
XFree(configs);
if (fbcdrawableinfo[DefaultDepth(display(), DefaultScreen(display()))].fbconfig == NULL) {
qCritical() << "Could not find a framebuffer configuration for the default depth.";
2010-11-21 13:01:39 +00:00
return false;
2011-01-30 14:34:42 +00:00
}
if (fbcdrawableinfo[32].fbconfig == NULL) {
qCritical() << "Could not find a framebuffer configuration for depth 32.";
2010-11-21 13:01:39 +00:00
return false;
}
for (int i = 0; i <= 32; i++) {
if (fbcdrawableinfo[i].fbconfig == NULL)
continue;
int vis_drawable = 0;
glXGetFBConfigAttrib(display(), fbcdrawableinfo[i].fbconfig, GLX_VISUAL_ID, &vis_drawable);
qDebug() << "Drawable visual (depth " << i << "): 0x" << QString::number(vis_drawable, 16);
}
2011-01-30 14:34:42 +00:00
return true;
}
2010-11-21 13:01:39 +00:00
void GlxBackend::setSwapInterval(int interval)
{
if (glXSwapIntervalEXT)
glXSwapIntervalEXT(display(), glxWindow, interval);
else if (glXSwapIntervalMESA)
glXSwapIntervalMESA(interval);
else if (glXSwapIntervalSGI)
glXSwapIntervalSGI(interval);
}
void GlxBackend::waitSync()
2011-01-30 14:34:42 +00:00
{
// NOTE that vsync has no effect with indirect rendering
if (haveWaitSync) {
2010-11-21 13:01:39 +00:00
uint sync;
#if 0
// TODO: why precisely is this important?
// the sync counter /can/ perform multiple steps during glXGetVideoSync & glXWaitVideoSync
// but this only leads to waiting for two frames??!?
2011-01-30 14:34:42 +00:00
glXGetVideoSync(&sync);
glXWaitVideoSync(2, (sync + 1) % 2, &sync);
#else
glXWaitVideoSync(1, 0, &sync);
#endif
2010-11-21 13:01:39 +00:00
}
2011-01-30 14:34:42 +00:00
}
2010-11-21 13:01:39 +00:00
void GlxBackend::present()
2011-01-30 14:34:42 +00:00
{
2013-03-28 21:26:47 +00:00
if (lastDamage().isEmpty())
return;
const QRegion displayRegion(0, 0, displayWidth(), displayHeight());
const bool fullRepaint = supportsBufferAge() || (lastDamage() == displayRegion);
if (fullRepaint) {
if (haveSwapInterval) {
if (gs_tripleBufferNeedsDetection) {
glXWaitGL();
m_swapProfiler.begin();
}
glXSwapBuffers(display(), glxWindow);
if (gs_tripleBufferNeedsDetection) {
glXWaitGL();
if (char result = m_swapProfiler.end()) {
gs_tripleBufferUndetected = gs_tripleBufferNeedsDetection = false;
if (result == 'd' && GLPlatform::instance()->driver() == Driver_NVidia) {
// TODO this is a workaround, we should get __GL_YIELD set before libGL checks it
if (qstrcmp(qgetenv("__GL_YIELD"), "USLEEP")) {
options->setGlPreferBufferSwap(0);
setSwapInterval(0);
qWarning() << "\nIt seems you are using the nvidia driver without triple buffering\n"
"You must export __GL_YIELD=\"USLEEP\" to prevent large CPU overhead on synced swaps\n"
"Preferably, enable the TripleBuffer Option in the xorg.conf Device\n"
"For this reason, the tearing prevention has been disabled.\n"
"See https://bugs.kde.org/show_bug.cgi?id=322060\n";
}
}
setBlocksForRetrace(result == 'd');
}
}
} else {
2013-03-28 21:26:47 +00:00
waitSync();
glXSwapBuffers(display(), glxWindow);
}
if (supportsBufferAge()) {
glXQueryDrawable(display(), glxWindow, GLX_BACK_BUFFER_AGE_EXT, (GLuint *) &m_bufferAge);
}
} else if (glXCopySubBuffer) {
foreach (const QRect & r, lastDamage().rects()) {
// convert to OpenGL coordinates
int y = displayHeight() - r.y() - r.height();
glXCopySubBuffer(display(), glxWindow, r.x(), y, r.width(), r.height());
}
} else { // Copy Pixels (horribly slow on Mesa)
glDrawBuffer(GL_FRONT);
SceneOpenGL::copyPixels(lastDamage());
glDrawBuffer(GL_BACK);
2010-11-21 13:01:39 +00:00
}
2013-03-26 20:49:59 +00:00
setLastDamage(QRegion());
if (!supportsBufferAge()) {
glXWaitGL();
XFlush(display());
}
2011-01-30 14:34:42 +00:00
}
2010-11-21 13:01:39 +00:00
void GlxBackend::screenGeometryChanged(const QSize &size)
{
Better handling for making the compositing OpenGL context current With QtQuick2 it's possible that the scene graph rendering context either lives in an own thread or uses the main GUI thread. In the latter case it's the same thread as our compositing OpenGL context lives in. This means our basic assumption that between two rendering passes the context stays current does not hold. The code already ensured that before we start a rendering pass the context is made current, but there are many more possible cases. If we use OpenGL in areas not triggered by the rendering loop but in response to other events the context needs to be made current. This includes the loading and unloading of effects (some effects use OpenGL in the static effect check, in the ctor and dtor), background loading of texture data, lazy loading after first usage invoked by shortcut, etc. etc. To properly handle these cases new methods are added to EffectsHandler to make the compositing OpenGL context current. These calls delegate down into the scene. On non-OpenGL scenes they are noop, but on OpenGL they go into the backend and make the context current. In addition they ensure that Qt doesn't think that it's QOpenGLContext is current by calling doneCurrent() on the QOpenGLContext::currentContext(). This unfortunately causes an additional call to makeCurrent with a null context, but there is no other way to tell Qt - it doesn't notice when a different context is made current with low level API calls. In the multi-threaded architecture this doesn't matter as ::currentContext() returns null. A short evaluation showed that a transition to QOpenGLContext doesn't seem feasible. Qt only supports either GLX or EGL while KWin supports both and when entering the transition phase for Wayland, it would become extremely tricky if our native platform is X11, but we want a Wayland EGL context. A future solution might be to have a "KWin-QPA plugin" which uses either xcb or Wayland and hides everything from Qt. The API documentation is extended to describe when the effects-framework ensures that an OpenGL context is current. The effects are changed to make the context current in cases where it's not guaranteed. This has been done by looking for creation or deletion of GLTextures and Shaders. If there are other OpenGL usages outside the rendering loop, ctor/dtor this needs to be changed, too.
2013-11-22 14:05:36 +00:00
doneCurrent();
XMoveResizeWindow(display(), window, 0, 0, size.width(), size.height());
overlayWindow()->setup(window);
Xcb::sync();
Better handling for making the compositing OpenGL context current With QtQuick2 it's possible that the scene graph rendering context either lives in an own thread or uses the main GUI thread. In the latter case it's the same thread as our compositing OpenGL context lives in. This means our basic assumption that between two rendering passes the context stays current does not hold. The code already ensured that before we start a rendering pass the context is made current, but there are many more possible cases. If we use OpenGL in areas not triggered by the rendering loop but in response to other events the context needs to be made current. This includes the loading and unloading of effects (some effects use OpenGL in the static effect check, in the ctor and dtor), background loading of texture data, lazy loading after first usage invoked by shortcut, etc. etc. To properly handle these cases new methods are added to EffectsHandler to make the compositing OpenGL context current. These calls delegate down into the scene. On non-OpenGL scenes they are noop, but on OpenGL they go into the backend and make the context current. In addition they ensure that Qt doesn't think that it's QOpenGLContext is current by calling doneCurrent() on the QOpenGLContext::currentContext(). This unfortunately causes an additional call to makeCurrent with a null context, but there is no other way to tell Qt - it doesn't notice when a different context is made current with low level API calls. In the multi-threaded architecture this doesn't matter as ::currentContext() returns null. A short evaluation showed that a transition to QOpenGLContext doesn't seem feasible. Qt only supports either GLX or EGL while KWin supports both and when entering the transition phase for Wayland, it would become extremely tricky if our native platform is X11, but we want a Wayland EGL context. A future solution might be to have a "KWin-QPA plugin" which uses either xcb or Wayland and hides everything from Qt. The API documentation is extended to describe when the effects-framework ensures that an OpenGL context is current. The effects are changed to make the context current in cases where it's not guaranteed. This has been done by looking for creation or deletion of GLTextures and Shaders. If there are other OpenGL usages outside the rendering loop, ctor/dtor this needs to be changed, too.
2013-11-22 14:05:36 +00:00
makeCurrent();
glViewport(0, 0, size.width(), size.height());
// The back buffer contents are now undefined
m_bufferAge = 0;
}
SceneOpenGL::TexturePrivate *GlxBackend::createBackendTexture(SceneOpenGL::Texture *texture)
{
return new GlxTexture(texture, this);
}
2010-11-21 13:01:39 +00:00
QRegion GlxBackend::prepareRenderingFrame()
2011-01-30 14:34:42 +00:00
{
QRegion repaint;
if (gs_tripleBufferNeedsDetection) {
// the composite timer floors the repaint frequency. This can pollute our triple buffering
// detection because the glXSwapBuffers call for the new frame has to wait until the pending
// one scanned out.
// So we compensate for that by waiting an extra milisecond to give the driver the chance to
// fllush the buffer queue
usleep(1000);
}
2013-03-28 21:26:47 +00:00
present();
if (supportsBufferAge())
repaint = accumulatedDamageHistory(m_bufferAge);
2013-03-28 21:26:47 +00:00
startRenderTimer();
glXWaitX();
return repaint;
2011-01-30 14:34:42 +00:00
}
2010-11-21 13:01:39 +00:00
void GlxBackend::endRenderingFrame(const QRegion &renderedRegion, const QRegion &damagedRegion)
{
if (damagedRegion.isEmpty()) {
setLastDamage(QRegion());
// If the damaged region of a window is fully occluded, the only
// rendering done, if any, will have been to repair a reused back
// buffer, making it identical to the front buffer.
//
// In this case we won't post the back buffer. Instead we'll just
// set the buffer age to 1, so the repaired regions won't be
// rendered again in the next frame.
if (!renderedRegion.isEmpty())
glFlush();
m_bufferAge = 1;
return;
}
setLastDamage(renderedRegion);
2013-03-28 21:26:47 +00:00
if (!blocksForRetrace()) {
// This also sets lastDamage to empty which prevents the frame from
// being posted again when prepareRenderingFrame() is called.
present();
} else {
// Make sure that the GPU begins processing the command stream
// now and not the next time prepareRenderingFrame() is called.
glFlush();
2013-03-28 21:26:47 +00:00
}
if (overlayWindow()->window()) // show the window only after the first pass,
overlayWindow()->show(); // since that pass may take long
// Save the damaged region to history
if (supportsBufferAge())
addToDamageHistory(damagedRegion);
}
Better handling for making the compositing OpenGL context current With QtQuick2 it's possible that the scene graph rendering context either lives in an own thread or uses the main GUI thread. In the latter case it's the same thread as our compositing OpenGL context lives in. This means our basic assumption that between two rendering passes the context stays current does not hold. The code already ensured that before we start a rendering pass the context is made current, but there are many more possible cases. If we use OpenGL in areas not triggered by the rendering loop but in response to other events the context needs to be made current. This includes the loading and unloading of effects (some effects use OpenGL in the static effect check, in the ctor and dtor), background loading of texture data, lazy loading after first usage invoked by shortcut, etc. etc. To properly handle these cases new methods are added to EffectsHandler to make the compositing OpenGL context current. These calls delegate down into the scene. On non-OpenGL scenes they are noop, but on OpenGL they go into the backend and make the context current. In addition they ensure that Qt doesn't think that it's QOpenGLContext is current by calling doneCurrent() on the QOpenGLContext::currentContext(). This unfortunately causes an additional call to makeCurrent with a null context, but there is no other way to tell Qt - it doesn't notice when a different context is made current with low level API calls. In the multi-threaded architecture this doesn't matter as ::currentContext() returns null. A short evaluation showed that a transition to QOpenGLContext doesn't seem feasible. Qt only supports either GLX or EGL while KWin supports both and when entering the transition phase for Wayland, it would become extremely tricky if our native platform is X11, but we want a Wayland EGL context. A future solution might be to have a "KWin-QPA plugin" which uses either xcb or Wayland and hides everything from Qt. The API documentation is extended to describe when the effects-framework ensures that an OpenGL context is current. The effects are changed to make the context current in cases where it's not guaranteed. This has been done by looking for creation or deletion of GLTextures and Shaders. If there are other OpenGL usages outside the rendering loop, ctor/dtor this needs to be changed, too.
2013-11-22 14:05:36 +00:00
bool GlxBackend::makeCurrent()
{
if (QOpenGLContext *context = QOpenGLContext::currentContext()) {
// Workaround to tell Qt that no QOpenGLContext is current
context->doneCurrent();
}
const bool current = glXMakeCurrent(display(), glxWindow, ctx);
return current;
}
void GlxBackend::doneCurrent()
{
glXMakeCurrent(display(), None, nullptr);
}
OverlayWindow* GlxBackend::overlayWindow()
{
return m_overlayWindow;
}
bool GlxBackend::usesOverlayWindow() const
{
return true;
}
/********************************************************
* GlxTexture
*******************************************************/
GlxTexture::GlxTexture(SceneOpenGL::Texture *texture, GlxBackend *backend)
: SceneOpenGL::TexturePrivate()
, q(texture)
, m_backend(backend)
, m_glxpixmap(None)
{
}
GlxTexture::~GlxTexture()
{
if (m_glxpixmap != None) {
if (!options->isGlStrictBinding()) {
glXReleaseTexImageEXT(display(), m_glxpixmap, GLX_FRONT_LEFT_EXT);
}
glXDestroyPixmap(display(), m_glxpixmap);
m_glxpixmap = None;
2010-11-21 13:01:39 +00:00
}
2011-01-30 14:34:42 +00:00
}
2010-11-21 13:01:39 +00:00
void GlxTexture::onDamage()
{
if (options->isGlStrictBinding() && m_glxpixmap) {
glXReleaseTexImageEXT(display(), m_glxpixmap, GLX_FRONT_LEFT_EXT);
glXBindTexImageEXT(display(), m_glxpixmap, GLX_FRONT_LEFT_EXT, NULL);
}
GLTexturePrivate::onDamage();
}
void GlxTexture::findTarget()
2011-01-30 14:34:42 +00:00
{
2010-11-21 13:01:39 +00:00
unsigned int new_target = 0;
if (glXQueryDrawable && m_glxpixmap != None)
glXQueryDrawable(display(), m_glxpixmap, GLX_TEXTURE_TARGET_EXT, &new_target);
// HACK: this used to be a hack for Xgl.
// without this hack the NVIDIA blob aborts when trying to bind a texture from
// a pixmap icon
if (new_target == 0) {
if (GLTexture::NPOTTextureSupported() ||
(isPowerOfTwo(m_size.width()) && isPowerOfTwo(m_size.height()))) {
new_target = GLX_TEXTURE_2D_EXT;
} else {
new_target = GLX_TEXTURE_RECTANGLE_EXT;
}
}
2011-01-30 14:34:42 +00:00
switch(new_target) {
case GLX_TEXTURE_2D_EXT:
m_target = GL_TEXTURE_2D;
m_scale.setWidth(1.0f / m_size.width());
m_scale.setHeight(1.0f / m_size.height());
2011-01-30 14:34:42 +00:00
break;
case GLX_TEXTURE_RECTANGLE_EXT:
m_target = GL_TEXTURE_RECTANGLE_ARB;
m_scale.setWidth(1.0f);
m_scale.setHeight(1.0f);
2011-01-30 14:34:42 +00:00
break;
default:
abort();
2010-11-21 13:01:39 +00:00
}
2011-01-30 14:34:42 +00:00
}
2010-11-21 13:01:39 +00:00
bool GlxTexture::loadTexture(const Pixmap& pix, const QSize& size, int depth)
2011-01-30 14:34:42 +00:00
{
2010-11-21 13:01:39 +00:00
#ifdef CHECK_GL_ERROR
2011-01-30 14:34:42 +00:00
checkGLError("TextureLoad1");
2010-11-21 13:01:39 +00:00
#endif
2011-01-30 14:34:42 +00:00
if (pix == None || size.isEmpty() || depth < 1)
2010-11-21 13:01:39 +00:00
return false;
if (m_backend->fbcdrawableinfo[ depth ].fbconfig == NULL) {
qDebug() << "No framebuffer configuration for depth " << depth
2011-01-30 14:34:42 +00:00
<< "; not binding pixmap" << endl;
return false;
2011-01-30 14:34:42 +00:00
}
2010-11-21 13:01:39 +00:00
m_size = size;
// new texture, or texture contents changed; mipmaps now invalid
q->setDirty();
2010-11-21 13:01:39 +00:00
#ifdef CHECK_GL_ERROR
2011-01-30 14:34:42 +00:00
checkGLError("TextureLoad2");
2010-11-21 13:01:39 +00:00
#endif
// tfp mode, simply bind the pixmap to texture
glGenTextures(1, &m_texture);
// The GLX pixmap references the contents of the original pixmap, so it doesn't
// need to be recreated when the contents change.
// The texture may or may not use the same storage depending on the EXT_tfp
// implementation. When options->glStrictBinding is true, the texture uses
// a different storage and needs to be updated with a call to
// glXBindTexImageEXT() when the contents of the pixmap has changed.
int attrs[] = {
GLX_TEXTURE_FORMAT_EXT, m_backend->fbcdrawableinfo[ depth ].bind_texture_format,
GLX_MIPMAP_TEXTURE_EXT, m_backend->fbcdrawableinfo[ depth ].mipmap > 0,
None, None, None
};
// Specifying the texture target explicitly is reported to cause a performance
// regression with R300G (see bug #256654).
if (GLPlatform::instance()->driver() != Driver_R300G) {
if ((m_backend->fbcdrawableinfo[ depth ].texture_targets & GLX_TEXTURE_2D_BIT_EXT) &&
(GLTexture::NPOTTextureSupported() ||
(isPowerOfTwo(size.width()) && isPowerOfTwo(size.height())))) {
attrs[ 4 ] = GLX_TEXTURE_TARGET_EXT;
attrs[ 5 ] = GLX_TEXTURE_2D_EXT;
} else if (m_backend->fbcdrawableinfo[ depth ].texture_targets & GLX_TEXTURE_RECTANGLE_BIT_EXT) {
attrs[ 4 ] = GLX_TEXTURE_TARGET_EXT;
attrs[ 5 ] = GLX_TEXTURE_RECTANGLE_EXT;
2011-01-30 14:34:42 +00:00
}
}
m_glxpixmap = glXCreatePixmap(display(), m_backend->fbcdrawableinfo[ depth ].fbconfig, pix, attrs);
2010-11-21 13:01:39 +00:00
#ifdef CHECK_GL_ERROR
checkGLError("TextureLoadTFP1");
2010-11-21 13:01:39 +00:00
#endif
findTarget();
m_yInverted = m_backend->fbcdrawableinfo[ depth ].y_inverted ? true : false;
m_canUseMipmaps = m_backend->fbcdrawableinfo[ depth ].mipmap > 0;
q->setFilter(m_backend->fbcdrawableinfo[ depth ].mipmap > 0 ? GL_NEAREST_MIPMAP_LINEAR : GL_NEAREST);
glBindTexture(m_target, m_texture);
#ifdef CHECK_GL_ERROR
checkGLError("TextureLoadTFP2");
#endif
glXBindTexImageEXT(display(), m_glxpixmap, GLX_FRONT_LEFT_EXT, NULL);
2010-11-21 13:01:39 +00:00
#ifdef CHECK_GL_ERROR
2011-01-30 14:34:42 +00:00
checkGLError("TextureLoad0");
2010-11-21 13:01:39 +00:00
#endif
updateMatrix();
unbind();
2010-11-21 13:01:39 +00:00
return true;
2011-01-30 14:34:42 +00:00
}
2010-11-21 13:01:39 +00:00
OpenGLBackend *GlxTexture::backend()
2011-01-30 14:34:42 +00:00
{
return m_backend;
}
} // namespace
#endif