kwin/effects/zoom/zoom.cpp

546 lines
19 KiB
C++
Raw Normal View History

2020-08-02 22:22:19 +00:00
/*
KWin - the KDE window manager
This file is part of the KDE project.
2020-08-02 22:22:19 +00:00
SPDX-FileCopyrightText: 2006 Lubos Lunak <l.lunak@kde.org>
SPDX-FileCopyrightText: 2010 Sebastian Sauer <sebsauer@kdab.com>
2020-08-02 22:22:19 +00:00
SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "zoom.h"
// KConfigSkeleton
#include "zoomconfig.h"
#if HAVE_ACCESSIBILITY
#include "accessibilityintegration.h"
#endif
#include <QAction>
#include <QApplication>
#include <QStyle>
#include <QVector2D>
#include <kstandardaction.h>
#include <KConfigGroup>
#include <KGlobalAccel>
#include <KLocalizedString>
#include <kwinglutils.h>
#ifdef KWIN_HAVE_XRENDER_COMPOSITING
#include <kwinxrenderutils.h>
#include <xcb/render.h>
#endif
namespace KWin
{
ZoomEffect::ZoomEffect()
: Effect()
2011-01-30 14:34:42 +00:00
, zoom(1)
, target_zoom(1)
, polling(false)
, zoomFactor(1.25)
, mouseTracking(MouseTrackingProportional)
, mousePointer(MousePointerScale)
, focusDelay(350) // in milliseconds
, imageWidth(0)
, imageHeight(0)
, isMouseHidden(false)
, xMove(0)
, yMove(0)
, moveFactor(20.0)
Provide expected presentation time to effects Effects are given the interval between two consecutive frames. The main flaw of this approach is that if the Compositor transitions from the idle state to "active" state, i.e. when there is something to repaint, effects may see a very large interval between the last painted frame and the current. In order to address this issue, the Scene invalidates the timer that is used to measure time between consecutive frames before the Compositor is about to become idle. While this works perfectly fine with Xinerama-style rendering, with per screen rendering, determining whether the compositor is about to idle is rather a tedious task mostly because a single output can't be used for the test. Furthermore, since the Compositor schedules pointless repaints just to ensure that it's idle, it might take several attempts to figure out whether the scene timer must be invalidated if you use (true) per screen rendering. Ideally, all effects should use a timeline helper that is aware of the underlying render loop and its timings. However, this option is off the table because it will involve a lot of work to implement it. Alternative and much simpler option is to pass the expected presentation time to effects rather than time between consecutive frames. This means that effects are responsible for determining how much animation timelines have to be advanced. Typically, an effect would have to store the presentation timestamp provided in either prePaint{Screen,Window} and use it in the subsequent prePaint{Screen,Window} call to estimate the amount of time passed between the next and the last frames. Unfortunately, this is an API incompatible change. However, it shouldn't take a lot of work to port third-party binary effects, which don't use the AnimationEffect class, to the new API. On the bright side, we no longer need to be concerned about the Compositor getting idle. We do still try to determine whether the Compositor is about to idle, primarily, because the OpenGL render backend swaps buffers on present, but that will change with the ongoing compositing timing rework.
2020-11-20 15:44:04 +00:00
, lastPresentTime(std::chrono::milliseconds::zero())
2011-01-30 14:34:42 +00:00
{
initConfig<ZoomConfig>();
QAction* a = nullptr;
a = KStandardAction::zoomIn(this, SLOT(zoomIn()), this);
KGlobalAccel::self()->setDefaultShortcut(a, QList<QKeySequence>() << Qt::META + Qt::Key_Equal);
KGlobalAccel::self()->setShortcut(a, QList<QKeySequence>() << Qt::META + Qt::Key_Equal);
effects->registerGlobalShortcut(Qt::META + Qt::Key_Equal, a);
effects->registerAxisShortcut(Qt::ControlModifier | Qt::MetaModifier, PointerAxisDown, a);
a = KStandardAction::zoomOut(this, SLOT(zoomOut()), this);
KGlobalAccel::self()->setDefaultShortcut(a, QList<QKeySequence>() << Qt::META + Qt::Key_Minus);
KGlobalAccel::self()->setShortcut(a, QList<QKeySequence>() << Qt::META + Qt::Key_Minus);
effects->registerGlobalShortcut(Qt::META + Qt::Key_Minus, a);
effects->registerAxisShortcut(Qt::ControlModifier | Qt::MetaModifier, PointerAxisUp, a);
a = KStandardAction::actualSize(this, SLOT(actualSize()), this);
KGlobalAccel::self()->setDefaultShortcut(a, QList<QKeySequence>() << Qt::META + Qt::Key_0);
KGlobalAccel::self()->setShortcut(a, QList<QKeySequence>() << Qt::META + Qt::Key_0);
effects->registerGlobalShortcut(Qt::META + Qt::Key_0, a);
a = new QAction(this);
a->setObjectName(QStringLiteral("MoveZoomLeft"));
a->setText(i18n("Move Zoomed Area to Left"));
KGlobalAccel::self()->setDefaultShortcut(a, QList<QKeySequence>());
KGlobalAccel::self()->setShortcut(a, QList<QKeySequence>());
effects->registerGlobalShortcut(QKeySequence(), a);
connect(a, &QAction::triggered, this, &ZoomEffect::moveZoomLeft);
a = new QAction(this);
a->setObjectName(QStringLiteral("MoveZoomRight"));
a->setText(i18n("Move Zoomed Area to Right"));
KGlobalAccel::self()->setDefaultShortcut(a, QList<QKeySequence>());
KGlobalAccel::self()->setShortcut(a, QList<QKeySequence>());
effects->registerGlobalShortcut(QKeySequence(), a);
connect(a, &QAction::triggered, this, &ZoomEffect::moveZoomRight);
a = new QAction(this);
a->setObjectName(QStringLiteral("MoveZoomUp"));
a->setText(i18n("Move Zoomed Area Upwards"));
KGlobalAccel::self()->setDefaultShortcut(a, QList<QKeySequence>());
KGlobalAccel::self()->setShortcut(a, QList<QKeySequence>());
effects->registerGlobalShortcut(QKeySequence(), a);
connect(a, &QAction::triggered, this, &ZoomEffect::moveZoomUp);
a = new QAction(this);
a->setObjectName(QStringLiteral("MoveZoomDown"));
a->setText(i18n("Move Zoomed Area Downwards"));
KGlobalAccel::self()->setDefaultShortcut(a, QList<QKeySequence>());
KGlobalAccel::self()->setShortcut(a, QList<QKeySequence>());
effects->registerGlobalShortcut(QKeySequence(), a);
connect(a, &QAction::triggered, this, &ZoomEffect::moveZoomDown);
// TODO: these two actions don't belong into the effect. They need to be moved into KWin core
a = new QAction(this);
a->setObjectName(QStringLiteral("MoveMouseToFocus"));
2011-01-30 14:34:42 +00:00
a->setText(i18n("Move Mouse to Focus"));
KGlobalAccel::self()->setDefaultShortcut(a, QList<QKeySequence>() << Qt::META + Qt::Key_F5);
KGlobalAccel::self()->setShortcut(a, QList<QKeySequence>() << Qt::META + Qt::Key_F5);
effects->registerGlobalShortcut(Qt::META + Qt::Key_F5, a);
connect(a, &QAction::triggered, this, &ZoomEffect::moveMouseToFocus);
a = new QAction(this);
a->setObjectName(QStringLiteral("MoveMouseToCenter"));
2011-01-30 14:34:42 +00:00
a->setText(i18n("Move Mouse to Center"));
KGlobalAccel::self()->setDefaultShortcut(a, QList<QKeySequence>() << Qt::META + Qt::Key_F6);
KGlobalAccel::self()->setShortcut(a, QList<QKeySequence>() << Qt::META + Qt::Key_F6);
effects->registerGlobalShortcut(Qt::META + Qt::Key_F6, a);
connect(a, &QAction::triggered, this, &ZoomEffect::moveMouseToCenter);
2011-01-30 14:34:42 +00:00
timeline.setDuration(350);
timeline.setFrameRange(0, 100);
connect(&timeline, &QTimeLine::frameChanged, this, &ZoomEffect::timelineFrameChanged);
connect(effects, &EffectsHandler::mouseChanged, this, &ZoomEffect::slotMouseChanged);
connect(effects, &EffectsHandler::windowDamaged, this, &ZoomEffect::slotWindowDamaged);
#if HAVE_ACCESSIBILITY
m_accessibilityIntegration = new ZoomAccessibilityIntegration(this);
connect(m_accessibilityIntegration, &ZoomAccessibilityIntegration::focusPointChanged, this, &ZoomEffect::moveFocus);
#endif
source_zoom = -1; // used to trigger initialZoom reading
2011-01-30 14:34:42 +00:00
reconfigure(ReconfigureAll);
}
ZoomEffect::~ZoomEffect()
2011-01-30 14:34:42 +00:00
{
// switch off and free resources
showCursor();
// Save the zoom value.
ZoomConfig::setInitialZoom(target_zoom);
ZoomConfig::self()->save();
2011-01-30 14:34:42 +00:00
}
bool ZoomEffect::isFocusTrackingEnabled() const
{
#if HAVE_ACCESSIBILITY
return m_accessibilityIntegration->isFocusTrackingEnabled();
#else
return false;
#endif
}
bool ZoomEffect::isTextCaretTrackingEnabled() const
{
#if HAVE_ACCESSIBILITY
return m_accessibilityIntegration->isTextCaretTrackingEnabled();
#else
return false;
#endif
}
void ZoomEffect::showCursor()
2011-01-30 14:34:42 +00:00
{
if (isMouseHidden) {
disconnect(effects, &EffectsHandler::cursorShapeChanged, this, &ZoomEffect::recreateTexture);
// show the previously hidden mouse-pointer again and free the loaded texture/picture.
effects->showCursor();
texture.reset();
#ifdef KWIN_HAVE_XRENDER_COMPOSITING
xrenderPicture.reset();
#endif
isMouseHidden = false;
}
2011-01-30 14:34:42 +00:00
}
void ZoomEffect::hideCursor()
2011-01-30 14:34:42 +00:00
{
if (mouseTracking == MouseTrackingProportional && mousePointer == MousePointerKeep)
return; // don't replace the actual cursor by a static image for no reason.
2011-01-30 14:34:42 +00:00
if (!isMouseHidden) {
// try to load the cursor-theme into a OpenGL texture and if successful then hide the mouse-pointer
recreateTexture();
bool shouldHide = false;
if (effects->isOpenGLCompositing()) {
shouldHide = !texture.isNull();
} else if (effects->compositingType() == XRenderCompositing) {
#ifdef KWIN_HAVE_XRENDER_COMPOSITING
shouldHide = !xrenderPicture.isNull();
#endif
}
if (shouldHide) {
effects->hideCursor();
connect(effects, &EffectsHandler::cursorShapeChanged, this, &ZoomEffect::recreateTexture);
isMouseHidden = true;
}
}
2011-01-30 14:34:42 +00:00
}
void ZoomEffect::recreateTexture()
2011-01-30 14:34:42 +00:00
{
Better handling for making the compositing OpenGL context current With QtQuick2 it's possible that the scene graph rendering context either lives in an own thread or uses the main GUI thread. In the latter case it's the same thread as our compositing OpenGL context lives in. This means our basic assumption that between two rendering passes the context stays current does not hold. The code already ensured that before we start a rendering pass the context is made current, but there are many more possible cases. If we use OpenGL in areas not triggered by the rendering loop but in response to other events the context needs to be made current. This includes the loading and unloading of effects (some effects use OpenGL in the static effect check, in the ctor and dtor), background loading of texture data, lazy loading after first usage invoked by shortcut, etc. etc. To properly handle these cases new methods are added to EffectsHandler to make the compositing OpenGL context current. These calls delegate down into the scene. On non-OpenGL scenes they are noop, but on OpenGL they go into the backend and make the context current. In addition they ensure that Qt doesn't think that it's QOpenGLContext is current by calling doneCurrent() on the QOpenGLContext::currentContext(). This unfortunately causes an additional call to makeCurrent with a null context, but there is no other way to tell Qt - it doesn't notice when a different context is made current with low level API calls. In the multi-threaded architecture this doesn't matter as ::currentContext() returns null. A short evaluation showed that a transition to QOpenGLContext doesn't seem feasible. Qt only supports either GLX or EGL while KWin supports both and when entering the transition phase for Wayland, it would become extremely tricky if our native platform is X11, but we want a Wayland EGL context. A future solution might be to have a "KWin-QPA plugin" which uses either xcb or Wayland and hides everything from Qt. The API documentation is extended to describe when the effects-framework ensures that an OpenGL context is current. The effects are changed to make the context current in cases where it's not guaranteed. This has been done by looking for creation or deletion of GLTextures and Shaders. If there are other OpenGL usages outside the rendering loop, ctor/dtor this needs to be changed, too.
2013-11-22 14:05:36 +00:00
effects->makeOpenGLContextCurrent();
const auto cursor = effects->cursorImage();
if (!cursor.image().isNull()) {
imageWidth = cursor.image().width();
imageHeight = cursor.image().height();
cursorHotSpot = cursor.hotSpot();
if (effects->isOpenGLCompositing()) {
texture.reset(new GLTexture(cursor.image()));
texture->setWrapMode(GL_CLAMP_TO_EDGE);
}
#ifdef KWIN_HAVE_XRENDER_COMPOSITING
2011-01-30 14:34:42 +00:00
if (effects->compositingType() == XRenderCompositing)
xrenderPicture.reset(new XRenderPicture(cursor.image()));
#endif
}
else {
qCDebug(KWINEFFECTS) << "Falling back to proportional mouse tracking!";
mouseTracking = MouseTrackingProportional;
}
2011-01-30 14:34:42 +00:00
}
2011-01-30 14:34:42 +00:00
void ZoomEffect::reconfigure(ReconfigureFlags)
{
ZoomConfig::self()->read();
// On zoom-in and zoom-out change the zoom by the defined zoom-factor.
zoomFactor = qMax(0.1, ZoomConfig::zoomFactor());
// Visibility of the mouse-pointer.
mousePointer = MousePointerType(ZoomConfig::mousePointer());
// Track moving of the mouse.
mouseTracking = MouseTrackingType(ZoomConfig::mouseTracking());
#if HAVE_ACCESSIBILITY
// Enable tracking of the focused location.
m_accessibilityIntegration->setFocusTrackingEnabled(ZoomConfig::enableFocusTracking());
// Enable tracking of the text caret.
m_accessibilityIntegration->setTextCaretTrackingEnabled(ZoomConfig::enableTextCaretTracking());
#endif
// The time in milliseconds to wait before a focus-event takes away a mouse-move.
focusDelay = qMax(uint(0), ZoomConfig::focusDelay());
// The factor the zoom-area will be moved on touching an edge on push-mode or using the navigation KAction's.
moveFactor = qMax(0.1, ZoomConfig::moveFactor());
if (source_zoom < 0) {
// Load the saved zoom value.
source_zoom = 1.0;
target_zoom = ZoomConfig::initialZoom();
if (target_zoom > 1.0)
zoomIn(target_zoom);
} else {
source_zoom = 1.0;
}
2011-01-30 14:34:42 +00:00
}
Provide expected presentation time to effects Effects are given the interval between two consecutive frames. The main flaw of this approach is that if the Compositor transitions from the idle state to "active" state, i.e. when there is something to repaint, effects may see a very large interval between the last painted frame and the current. In order to address this issue, the Scene invalidates the timer that is used to measure time between consecutive frames before the Compositor is about to become idle. While this works perfectly fine with Xinerama-style rendering, with per screen rendering, determining whether the compositor is about to idle is rather a tedious task mostly because a single output can't be used for the test. Furthermore, since the Compositor schedules pointless repaints just to ensure that it's idle, it might take several attempts to figure out whether the scene timer must be invalidated if you use (true) per screen rendering. Ideally, all effects should use a timeline helper that is aware of the underlying render loop and its timings. However, this option is off the table because it will involve a lot of work to implement it. Alternative and much simpler option is to pass the expected presentation time to effects rather than time between consecutive frames. This means that effects are responsible for determining how much animation timelines have to be advanced. Typically, an effect would have to store the presentation timestamp provided in either prePaint{Screen,Window} and use it in the subsequent prePaint{Screen,Window} call to estimate the amount of time passed between the next and the last frames. Unfortunately, this is an API incompatible change. However, it shouldn't take a lot of work to port third-party binary effects, which don't use the AnimationEffect class, to the new API. On the bright side, we no longer need to be concerned about the Compositor getting idle. We do still try to determine whether the Compositor is about to idle, primarily, because the OpenGL render backend swaps buffers on present, but that will change with the ongoing compositing timing rework.
2020-11-20 15:44:04 +00:00
void ZoomEffect::prePaintScreen(ScreenPrePaintData& data, std::chrono::milliseconds presentTime)
2011-01-30 14:34:42 +00:00
{
if (zoom != target_zoom) {
Provide expected presentation time to effects Effects are given the interval between two consecutive frames. The main flaw of this approach is that if the Compositor transitions from the idle state to "active" state, i.e. when there is something to repaint, effects may see a very large interval between the last painted frame and the current. In order to address this issue, the Scene invalidates the timer that is used to measure time between consecutive frames before the Compositor is about to become idle. While this works perfectly fine with Xinerama-style rendering, with per screen rendering, determining whether the compositor is about to idle is rather a tedious task mostly because a single output can't be used for the test. Furthermore, since the Compositor schedules pointless repaints just to ensure that it's idle, it might take several attempts to figure out whether the scene timer must be invalidated if you use (true) per screen rendering. Ideally, all effects should use a timeline helper that is aware of the underlying render loop and its timings. However, this option is off the table because it will involve a lot of work to implement it. Alternative and much simpler option is to pass the expected presentation time to effects rather than time between consecutive frames. This means that effects are responsible for determining how much animation timelines have to be advanced. Typically, an effect would have to store the presentation timestamp provided in either prePaint{Screen,Window} and use it in the subsequent prePaint{Screen,Window} call to estimate the amount of time passed between the next and the last frames. Unfortunately, this is an API incompatible change. However, it shouldn't take a lot of work to port third-party binary effects, which don't use the AnimationEffect class, to the new API. On the bright side, we no longer need to be concerned about the Compositor getting idle. We do still try to determine whether the Compositor is about to idle, primarily, because the OpenGL render backend swaps buffers on present, but that will change with the ongoing compositing timing rework.
2020-11-20 15:44:04 +00:00
int time = 0;
if (lastPresentTime.count())
time = (presentTime - lastPresentTime).count();
lastPresentTime = presentTime;
const float zoomDist = qAbs(target_zoom - source_zoom);
2011-01-30 14:34:42 +00:00
if (target_zoom > zoom)
zoom = qMin(zoom + ((zoomDist * time) / animationTime(150*zoomFactor)), target_zoom);
else
zoom = qMax(zoom - ((zoomDist * time) / animationTime(150*zoomFactor)), target_zoom);
2011-01-30 14:34:42 +00:00
}
2011-01-30 14:34:42 +00:00
if (zoom == 1.0) {
showCursor();
2011-01-30 14:34:42 +00:00
} else {
hideCursor();
data.mask |= PAINT_SCREEN_TRANSFORMED;
}
Provide expected presentation time to effects Effects are given the interval between two consecutive frames. The main flaw of this approach is that if the Compositor transitions from the idle state to "active" state, i.e. when there is something to repaint, effects may see a very large interval between the last painted frame and the current. In order to address this issue, the Scene invalidates the timer that is used to measure time between consecutive frames before the Compositor is about to become idle. While this works perfectly fine with Xinerama-style rendering, with per screen rendering, determining whether the compositor is about to idle is rather a tedious task mostly because a single output can't be used for the test. Furthermore, since the Compositor schedules pointless repaints just to ensure that it's idle, it might take several attempts to figure out whether the scene timer must be invalidated if you use (true) per screen rendering. Ideally, all effects should use a timeline helper that is aware of the underlying render loop and its timings. However, this option is off the table because it will involve a lot of work to implement it. Alternative and much simpler option is to pass the expected presentation time to effects rather than time between consecutive frames. This means that effects are responsible for determining how much animation timelines have to be advanced. Typically, an effect would have to store the presentation timestamp provided in either prePaint{Screen,Window} and use it in the subsequent prePaint{Screen,Window} call to estimate the amount of time passed between the next and the last frames. Unfortunately, this is an API incompatible change. However, it shouldn't take a lot of work to port third-party binary effects, which don't use the AnimationEffect class, to the new API. On the bright side, we no longer need to be concerned about the Compositor getting idle. We do still try to determine whether the Compositor is about to idle, primarily, because the OpenGL render backend swaps buffers on present, but that will change with the ongoing compositing timing rework.
2020-11-20 15:44:04 +00:00
effects->prePaintScreen(data, presentTime);
2011-01-30 14:34:42 +00:00
}
void ZoomEffect::paintScreen(int mask, const QRegion &region, ScreenPaintData& data)
2011-01-30 14:34:42 +00:00
{
if (zoom != 1.0) {
data *= QVector2D(zoom, zoom);
const QSize screenSize = effects->virtualScreenSize();
// mouse-tracking allows navigation of the zoom-area using the mouse.
2011-01-30 14:34:42 +00:00
switch(mouseTracking) {
case MouseTrackingProportional:
data.setXTranslation(- int(cursorPoint.x() * (zoom - 1.0)));
data.setYTranslation(- int(cursorPoint.y() * (zoom - 1.0)));
2011-01-30 14:34:42 +00:00
prevPoint = cursorPoint;
break;
case MouseTrackingCentred:
prevPoint = cursorPoint;
2012-03-11 17:45:17 +00:00
// fall through
case MouseTrackingDisabled:
data.setXTranslation(qMin(0, qMax(int(screenSize.width() - screenSize.width() * zoom), int(screenSize.width() / 2 - prevPoint.x() * zoom))));
data.setYTranslation(qMin(0, qMax(int(screenSize.height() - screenSize.height() * zoom), int(screenSize.height() / 2 - prevPoint.y() * zoom))));
2011-01-30 14:34:42 +00:00
break;
2013-01-12 01:59:22 +00:00
case MouseTrackingPush: {
2011-01-30 14:34:42 +00:00
// touching an edge of the screen moves the zoom-area in that direction.
int x = cursorPoint.x() * zoom - prevPoint.x() * (zoom - 1.0);
int y = cursorPoint.y() * zoom - prevPoint.y() * (zoom - 1.0);
2013-01-12 01:59:22 +00:00
int threshold = 4;
2011-01-30 14:34:42 +00:00
xMove = yMove = 0;
if (x < threshold)
2013-01-12 01:59:22 +00:00
xMove = (x - threshold) / zoom;
else if (x + threshold > screenSize.width())
xMove = (x + threshold - screenSize.width()) / zoom;
2011-01-30 14:34:42 +00:00
if (y < threshold)
2013-01-12 01:59:22 +00:00
yMove = (y - threshold) / zoom;
else if (y + threshold > screenSize.height())
yMove = (y + threshold - screenSize.height()) / zoom;
2013-01-12 01:59:22 +00:00
if (xMove)
prevPoint.setX(qMax(0, qMin(screenSize.width(), prevPoint.x() + xMove)));
2013-01-12 01:59:22 +00:00
if (yMove)
prevPoint.setY(qMax(0, qMin(screenSize.height(), prevPoint.y() + yMove)));
2013-01-12 01:59:22 +00:00
data.setXTranslation(- int(prevPoint.x() * (zoom - 1.0)));
data.setYTranslation(- int(prevPoint.y() * (zoom - 1.0)));
break;
}
2011-01-30 14:34:42 +00:00
}
// use the focusPoint if focus tracking is enabled
if (isFocusTrackingEnabled() || isTextCaretTrackingEnabled()) {
bool acceptFocus = true;
2011-01-30 14:34:42 +00:00
if (mouseTracking != MouseTrackingDisabled && focusDelay > 0) {
// Wait some time for the mouse before doing the switch. This serves as threshold
// to prevent the focus from jumping around to much while working with the mouse.
const int msecs = lastMouseEvent.msecsTo(lastFocusEvent);
acceptFocus = msecs > focusDelay;
2011-01-30 14:34:42 +00:00
}
if (acceptFocus) {
data.setXTranslation(- int(focusPoint.x() * (zoom - 1.0)));
data.setYTranslation(- int(focusPoint.y() * (zoom - 1.0)));
prevPoint = focusPoint;
}
}
2011-01-30 14:34:42 +00:00
}
2011-01-30 14:34:42 +00:00
effects->paintScreen(mask, region, data);
2011-01-30 14:34:42 +00:00
if (zoom != 1.0 && mousePointer != MousePointerHide) {
// Draw the mouse-texture at the position matching to zoomed-in image of the desktop. Hiding the
// previous mouse-cursor and drawing our own fake mouse-cursor is needed to be able to scale the
// mouse-cursor up and to re-position those mouse-cursor to match to the chosen zoom-level.
int w = imageWidth;
int h = imageHeight;
if (mousePointer == MousePointerScale) {
w *= zoom;
h *= zoom;
}
const QPoint p = effects->cursorPos() - cursorHotSpot;
QRect rect(p.x() * zoom + data.xTranslation(), p.y() * zoom + data.yTranslation(), w, h);
2011-01-30 14:34:42 +00:00
if (texture) {
texture->bind();
2011-01-30 14:34:42 +00:00
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
auto s = ShaderManager::instance()->pushShader(ShaderTrait::MapTexture);
QMatrix4x4 mvp = data.projectionMatrix();
mvp.translate(rect.x(), rect.y());
s->setUniform(GLShader::ModelViewProjectionMatrix, mvp);
texture->render(region, rect);
ShaderManager::instance()->popShader();
texture->unbind();
2010-12-08 17:45:23 +00:00
glDisable(GL_BLEND);
2011-01-30 14:34:42 +00:00
}
#ifdef KWIN_HAVE_XRENDER_COMPOSITING
2011-01-30 14:34:42 +00:00
if (xrenderPicture) {
#define DOUBLE_TO_FIXED(d) ((xcb_render_fixed_t) ((d) * 65536))
static const xcb_render_transform_t xrenderIdentity = {
DOUBLE_TO_FIXED(1), DOUBLE_TO_FIXED(0), DOUBLE_TO_FIXED(0),
DOUBLE_TO_FIXED(0), DOUBLE_TO_FIXED(1), DOUBLE_TO_FIXED(0),
DOUBLE_TO_FIXED(0), DOUBLE_TO_FIXED(0), DOUBLE_TO_FIXED(1)
};
if (mousePointer == MousePointerScale) {
xcb_render_set_picture_filter(xcbConnection(), *xrenderPicture, 4, const_cast<char*>("good"), 0, nullptr);
const xcb_render_transform_t xform = {
DOUBLE_TO_FIXED(1.0 / zoom), DOUBLE_TO_FIXED(0), DOUBLE_TO_FIXED(0),
DOUBLE_TO_FIXED(0), DOUBLE_TO_FIXED(1.0 / zoom), DOUBLE_TO_FIXED(0),
DOUBLE_TO_FIXED(0), DOUBLE_TO_FIXED(0), DOUBLE_TO_FIXED(1)
};
xcb_render_set_picture_transform(xcbConnection(), *xrenderPicture, xform);
}
xcb_render_composite(xcbConnection(), XCB_RENDER_PICT_OP_OVER, *xrenderPicture, XCB_RENDER_PICTURE_NONE,
effects->xrenderBufferPicture(), 0, 0, 0, 0, rect.x(), rect.y(), rect.width(), rect.height());
if (mousePointer == MousePointerScale)
xcb_render_set_picture_transform(xcbConnection(), *xrenderPicture, xrenderIdentity);
#undef DOUBLE_TO_FIXED
}
2011-01-30 14:34:42 +00:00
#endif
}
2011-01-30 14:34:42 +00:00
}
void ZoomEffect::postPaintScreen()
2011-01-30 14:34:42 +00:00
{
if (zoom != target_zoom)
effects->addRepaintFull();
Provide expected presentation time to effects Effects are given the interval between two consecutive frames. The main flaw of this approach is that if the Compositor transitions from the idle state to "active" state, i.e. when there is something to repaint, effects may see a very large interval between the last painted frame and the current. In order to address this issue, the Scene invalidates the timer that is used to measure time between consecutive frames before the Compositor is about to become idle. While this works perfectly fine with Xinerama-style rendering, with per screen rendering, determining whether the compositor is about to idle is rather a tedious task mostly because a single output can't be used for the test. Furthermore, since the Compositor schedules pointless repaints just to ensure that it's idle, it might take several attempts to figure out whether the scene timer must be invalidated if you use (true) per screen rendering. Ideally, all effects should use a timeline helper that is aware of the underlying render loop and its timings. However, this option is off the table because it will involve a lot of work to implement it. Alternative and much simpler option is to pass the expected presentation time to effects rather than time between consecutive frames. This means that effects are responsible for determining how much animation timelines have to be advanced. Typically, an effect would have to store the presentation timestamp provided in either prePaint{Screen,Window} and use it in the subsequent prePaint{Screen,Window} call to estimate the amount of time passed between the next and the last frames. Unfortunately, this is an API incompatible change. However, it shouldn't take a lot of work to port third-party binary effects, which don't use the AnimationEffect class, to the new API. On the bright side, we no longer need to be concerned about the Compositor getting idle. We do still try to determine whether the Compositor is about to idle, primarily, because the OpenGL render backend swaps buffers on present, but that will change with the ongoing compositing timing rework.
2020-11-20 15:44:04 +00:00
else
lastPresentTime = std::chrono::milliseconds::zero();
effects->postPaintScreen();
2011-01-30 14:34:42 +00:00
}
void ZoomEffect::zoomIn(double to)
2011-01-30 14:34:42 +00:00
{
source_zoom = zoom;
if (to < 0.0)
target_zoom *= zoomFactor;
else
target_zoom = to;
2011-01-30 14:34:42 +00:00
if (!polling) {
polling = true;
effects->startMousePolling();
}
cursorPoint = effects->cursorPos();
2012-03-11 17:45:17 +00:00
if (mouseTracking == MouseTrackingDisabled)
prevPoint = cursorPoint;
2011-01-30 14:34:42 +00:00
effects->addRepaintFull();
}
void ZoomEffect::zoomOut()
2011-01-30 14:34:42 +00:00
{
source_zoom = zoom;
target_zoom /= zoomFactor;
if ((zoomFactor > 1 && target_zoom < 1.01) || (zoomFactor < 1 && target_zoom > 0.99)) {
target_zoom = 1;
2011-01-30 14:34:42 +00:00
if (polling) {
polling = false;
effects->stopMousePolling();
}
}
2012-03-11 17:45:17 +00:00
if (mouseTracking == MouseTrackingDisabled)
prevPoint = effects->cursorPos();
2011-01-30 14:34:42 +00:00
effects->addRepaintFull();
}
void ZoomEffect::actualSize()
2011-01-30 14:34:42 +00:00
{
source_zoom = zoom;
target_zoom = 1;
2011-01-30 14:34:42 +00:00
if (polling) {
polling = false;
effects->stopMousePolling();
}
2011-01-30 14:34:42 +00:00
effects->addRepaintFull();
}
void ZoomEffect::timelineFrameChanged(int /* frame */)
2011-01-30 14:34:42 +00:00
{
const QSize screenSize = effects->virtualScreenSize();
prevPoint.setX(qMax(0, qMin(screenSize.width(), prevPoint.x() + xMove)));
prevPoint.setY(qMax(0, qMin(screenSize.height(), prevPoint.y() + yMove)));
cursorPoint = prevPoint;
effects->addRepaintFull();
2011-01-30 14:34:42 +00:00
}
void ZoomEffect::moveZoom(int x, int y)
2011-01-30 14:34:42 +00:00
{
if (timeline.state() == QTimeLine::Running)
timeline.stop();
const QSize screenSize = effects->virtualScreenSize();
2011-01-30 14:34:42 +00:00
if (x < 0)
xMove = - qMax(1.0, screenSize.width() / zoom / moveFactor);
2011-01-30 14:34:42 +00:00
else if (x > 0)
xMove = qMax(1.0, screenSize.width() / zoom / moveFactor);
else
xMove = 0;
2011-01-30 14:34:42 +00:00
if (y < 0)
yMove = - qMax(1.0, screenSize.height() / zoom / moveFactor);
2011-01-30 14:34:42 +00:00
else if (y > 0)
yMove = qMax(1.0, screenSize.height() / zoom / moveFactor);
else
yMove = 0;
2011-01-30 14:34:42 +00:00
timeline.start();
2011-01-30 14:34:42 +00:00
}
void ZoomEffect::moveZoomLeft()
2011-01-30 14:34:42 +00:00
{
moveZoom(-1, 0);
2011-01-30 14:34:42 +00:00
}
void ZoomEffect::moveZoomRight()
2011-01-30 14:34:42 +00:00
{
moveZoom(1, 0);
2011-01-30 14:34:42 +00:00
}
void ZoomEffect::moveZoomUp()
2011-01-30 14:34:42 +00:00
{
moveZoom(0, -1);
2011-01-30 14:34:42 +00:00
}
void ZoomEffect::moveZoomDown()
2011-01-30 14:34:42 +00:00
{
moveZoom(0, 1);
2011-01-30 14:34:42 +00:00
}
void ZoomEffect::moveMouseToFocus()
2011-01-30 14:34:42 +00:00
{
QCursor::setPos(focusPoint.x(), focusPoint.y());
2011-01-30 14:34:42 +00:00
}
void ZoomEffect::moveMouseToCenter()
2011-01-30 14:34:42 +00:00
{
const QRect r = effects->virtualScreenGeometry();
QCursor::setPos(r.x() + r.width() / 2, r.y() + r.height() / 2);
2011-01-30 14:34:42 +00:00
}
2011-03-12 13:37:30 +00:00
void ZoomEffect::slotMouseChanged(const QPoint& pos, const QPoint& old, Qt::MouseButtons,
2011-01-30 14:34:42 +00:00
Qt::MouseButtons, Qt::KeyboardModifiers, Qt::KeyboardModifiers)
{
if (zoom == 1.0)
return;
cursorPoint = pos;
2011-01-30 14:34:42 +00:00
if (pos != old) {
lastMouseEvent = QTime::currentTime();
effects->addRepaintFull();
}
2011-01-30 14:34:42 +00:00
}
void ZoomEffect::slotWindowDamaged()
{
if (zoom != 1.0) {
effects->addRepaintFull();
}
}
void ZoomEffect::moveFocus(const QPoint &point)
2011-01-30 14:34:42 +00:00
{
if (zoom == 1.0)
return;
focusPoint = point;
lastFocusEvent = QTime::currentTime();
effects->addRepaintFull();
2011-01-30 14:34:42 +00:00
}
bool ZoomEffect::isActive() const
{
return zoom != 1.0 || zoom != target_zoom;
}
} // namespace