wayland: Allow mapping more than one shm buffer at the same time

The shared memory buffer may need to install a SIGBUS handler if the
compositor _may_ access the data outside of the buffer.

However, signal handling requires great care. For the simplicity sake,
only one shared memory buffer is allowed to be accessed at the time.
But such a restricted access policy is inconvenient and requires us
making client buffer contents copies.

This change eases the restrictions on map() and unmap() functions so
several buffers can be accessed simulatenously.

Note that atomic pointers are used because a signal handler can be
invoked at any point in the main thread's execution. Even though
"a = b" is a single operation from the developer's pov, it can take
several steps for the CPU to store a new value to the variable. The
atomic pointers ensure that assignments to the next and s_accessedBuffers
happen atomically.

Also keep in mind that we cannot start removing copy() calls yet
because the ShmClientBuffer life time management requires further
changes, which I believe, are out of the scope of this patch.
This commit is contained in:
Vlad Zahorodnii 2024-08-17 15:00:10 +03:00
parent 0162aea100
commit 76eb1d20b9
2 changed files with 43 additions and 26 deletions

View file

@ -38,14 +38,7 @@ static constexpr uint32_t s_formats[] = {
WL_SHM_FORMAT_RGB888, WL_SHM_FORMAT_RGB888,
}; };
class ShmSigbusData static std::atomic<ShmAccess *> s_accessedBuffers = nullptr;
{
public:
ShmPool *pool = nullptr;
int accessCount = 0;
};
static thread_local ShmSigbusData sigbusData;
static struct sigaction prevSigbusAction; static struct sigaction prevSigbusAction;
static uint32_t shmFormatToDrmFormat(uint32_t shmFormat) static uint32_t shmFormatToDrmFormat(uint32_t shmFormat)
@ -228,21 +221,23 @@ static void sigbusHandler(int signum, siginfo_t *info, void *context)
} }
}; };
const ShmPool *pool = sigbusData.pool; MemoryMap *mapping = nullptr;
if (!pool) { for (auto access = s_accessedBuffers.load(); access; access = access->next) {
reraise(); const uchar *addr = static_cast<uchar *>(info->si_addr);
return; const uchar *mappingStart = static_cast<uchar *>(access->mapping->data());
if (addr >= mappingStart && addr < mappingStart + access->mapping->size()) {
mapping = access->mapping.get();
break;
}
} }
const uchar *addr = static_cast<uchar *>(info->si_addr); if (!mapping) {
const uchar *mappingStart = static_cast<uchar *>(pool->mapping->data());
if (addr < mappingStart || addr >= mappingStart + pool->mapping->size()) {
reraise(); reraise();
return; return;
} }
// Replace the faulty mapping with a new one that's filled with zeros. // Replace the faulty mapping with a new one that's filled with zeros.
if (mmap(pool->mapping->data(), pool->mapping->size(), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, -1, 0) == MAP_FAILED) { if (mmap(mapping->data(), mapping->size(), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, -1, 0) == MAP_FAILED) {
reraise(); reraise();
return; return;
} }
@ -264,29 +259,43 @@ GraphicsBuffer::Map ShmClientBuffer::map(MapFlags flags)
action.sa_flags = SA_SIGINFO | SA_NODEFER; action.sa_flags = SA_SIGINFO | SA_NODEFER;
sigaction(SIGBUS, &action, &prevSigbusAction); sigaction(SIGBUS, &action, &prevSigbusAction);
}); });
Q_ASSERT(!sigbusData.pool || sigbusData.pool == m_shmPool);
sigbusData.pool = m_shmPool;
++sigbusData.accessCount;
} }
if (!m_shmAccess.has_value()) {
ShmAccess &access = m_shmAccess.emplace(m_shmPool->mapping, 0, s_accessedBuffers.load());
s_accessedBuffers = &access;
}
m_shmAccess->count++;
return Map{ return Map{
.data = reinterpret_cast<uchar *>(m_shmPool->mapping->data()) + m_shmAttributes.offset, .data = reinterpret_cast<uchar *>(m_shmAccess->mapping->data()) + m_shmAttributes.offset,
.stride = uint32_t(m_shmAttributes.stride), .stride = uint32_t(m_shmAttributes.stride),
}; };
} }
void ShmClientBuffer::unmap() void ShmClientBuffer::unmap()
{ {
if (m_shmPool->sigbusImpossible) { if (!m_shmAccess.has_value()) {
return; return;
} }
Q_ASSERT(sigbusData.accessCount > 0); m_shmAccess->count--;
--sigbusData.accessCount; if (m_shmAccess->count != 0) {
if (sigbusData.accessCount == 0) { return;
sigbusData.pool = nullptr;
} }
if (s_accessedBuffers == &m_shmAccess.value()) {
s_accessedBuffers = m_shmAccess->next.load();
} else {
for (auto access = s_accessedBuffers.load(); access; access = access->next) {
if (access->next == &m_shmAccess.value()) {
access->next = m_shmAccess->next.load();
break;
}
}
}
m_shmAccess.reset();
} }
ShmClientBufferIntegrationPrivate::ShmClientBufferIntegrationPrivate(Display *display, ShmClientBufferIntegration *q) ShmClientBufferIntegrationPrivate::ShmClientBufferIntegrationPrivate(Display *display, ShmClientBufferIntegration *q)

View file

@ -49,6 +49,13 @@ protected:
void shm_pool_resize(Resource *resource, int32_t size) override; void shm_pool_resize(Resource *resource, int32_t size) override;
}; };
struct ShmAccess
{
std::shared_ptr<MemoryMap> mapping;
int count = 0;
std::atomic<ShmAccess *> next = nullptr;
};
class KWIN_EXPORT ShmClientBuffer : public GraphicsBuffer class KWIN_EXPORT ShmClientBuffer : public GraphicsBuffer
{ {
Q_OBJECT Q_OBJECT
@ -74,6 +81,7 @@ private:
wl_resource *m_resource = nullptr; wl_resource *m_resource = nullptr;
ShmPool *m_shmPool; ShmPool *m_shmPool;
ShmAttributes m_shmAttributes; ShmAttributes m_shmAttributes;
std::optional<ShmAccess> m_shmAccess;
}; };
} // namespace KWin } // namespace KWin