opengl: Harden GLRenderTimeQuery against opengl providing bad timestamps
The end render timestamp can be slightly in the past before the start render timestamp. This results in negative render times, which can make kwin wait way more than just one vblank interval before starting the next frame. It appears that there is no way to detect if the gpu has performed a disjoint operation in OpenGL. It's available only in GLES. As a way around, this change makes the GLRenderTimeQuery insert two probes: one queries gl timestamps when starting rendering and ending rendering; another one just queries std::steady_clock before and after painting. This hardens the GLRenderTimeQuery against OpenGL providing nonsensical results sometimes. BUG: 481721
This commit is contained in:
parent
79b7545840
commit
0dc3f4906f
2 changed files with 32 additions and 26 deletions
|
@ -15,36 +15,33 @@ namespace KWin
|
|||
GLRenderTimeQuery::GLRenderTimeQuery()
|
||||
{
|
||||
if (GLPlatform::instance()->supports(GLFeature::TimerQuery)) {
|
||||
glGenQueries(1, &m_query);
|
||||
glGenQueries(1, &m_gpuProbe.query);
|
||||
}
|
||||
}
|
||||
|
||||
GLRenderTimeQuery::~GLRenderTimeQuery()
|
||||
{
|
||||
if (m_query) {
|
||||
glDeleteQueries(1, &m_query);
|
||||
if (m_gpuProbe.query) {
|
||||
glDeleteQueries(1, &m_gpuProbe.query);
|
||||
}
|
||||
}
|
||||
|
||||
void GLRenderTimeQuery::begin()
|
||||
{
|
||||
if (m_query) {
|
||||
GLint64 nanos = 0;
|
||||
glGetInteger64v(GL_TIMESTAMP, &nanos);
|
||||
m_cpuStart = std::chrono::nanoseconds(nanos);
|
||||
} else {
|
||||
m_cpuStart = std::chrono::steady_clock::now().time_since_epoch();
|
||||
if (m_gpuProbe.query) {
|
||||
glGetInteger64v(GL_TIMESTAMP, &m_gpuProbe.start);
|
||||
}
|
||||
m_cpuProbe.start = std::chrono::steady_clock::now().time_since_epoch();
|
||||
}
|
||||
|
||||
void GLRenderTimeQuery::end()
|
||||
{
|
||||
if (m_query) {
|
||||
glQueryCounter(m_query, GL_TIMESTAMP);
|
||||
} else {
|
||||
m_cpuEnd = std::chrono::steady_clock::now().time_since_epoch();
|
||||
}
|
||||
m_hasResult = true;
|
||||
|
||||
if (m_gpuProbe.query) {
|
||||
glQueryCounter(m_gpuProbe.query, GL_TIMESTAMP);
|
||||
}
|
||||
m_cpuProbe.end = std::chrono::steady_clock::now().time_since_epoch();
|
||||
}
|
||||
|
||||
std::chrono::nanoseconds GLRenderTimeQuery::result()
|
||||
|
@ -53,16 +50,15 @@ std::chrono::nanoseconds GLRenderTimeQuery::result()
|
|||
return std::chrono::nanoseconds::zero();
|
||||
}
|
||||
m_hasResult = false;
|
||||
if (m_query) {
|
||||
uint64_t nanos = 0;
|
||||
glGetQueryObjectui64v(m_query, GL_QUERY_RESULT, &nanos);
|
||||
if (nanos == 0) {
|
||||
return std::chrono::nanoseconds::zero();
|
||||
}
|
||||
return std::chrono::nanoseconds(nanos) - m_cpuStart;
|
||||
} else {
|
||||
return m_cpuEnd - m_cpuStart;
|
||||
|
||||
if (m_gpuProbe.query) {
|
||||
glGetQueryObjecti64v(m_gpuProbe.query, GL_QUERY_RESULT, &m_gpuProbe.end);
|
||||
}
|
||||
|
||||
const std::chrono::nanoseconds gpuTime(m_gpuProbe.end - m_gpuProbe.start);
|
||||
const std::chrono::nanoseconds cpuTime = m_cpuProbe.end - m_cpuProbe.start;
|
||||
|
||||
return std::max(gpuTime, cpuTime);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -31,10 +31,20 @@ public:
|
|||
std::chrono::nanoseconds result();
|
||||
|
||||
private:
|
||||
GLuint m_query = 0;
|
||||
bool m_hasResult = false;
|
||||
std::chrono::nanoseconds m_cpuStart = std::chrono::nanoseconds::zero();
|
||||
std::chrono::nanoseconds m_cpuEnd = std::chrono::nanoseconds::zero();
|
||||
|
||||
struct
|
||||
{
|
||||
std::chrono::nanoseconds start = std::chrono::nanoseconds::zero();
|
||||
std::chrono::nanoseconds end = std::chrono::nanoseconds::zero();
|
||||
} m_cpuProbe;
|
||||
|
||||
struct
|
||||
{
|
||||
GLuint query = 0;
|
||||
GLint64 start = 0;
|
||||
GLint64 end = 0;
|
||||
} m_gpuProbe;
|
||||
};
|
||||
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue