2019-06-12 20:08:54 +02:00
|
|
|
#include "shm.h"
|
|
|
|
|
|
2020-03-25 18:26:58 +01:00
|
|
|
#include <stdio.h>
|
shm: new function, shm_scroll()
This function "scrolls" the buffer by the specified number of (pixel)
rows.
The idea is move the image offset by re-sizing the underlying memfd
object. I.e. to scroll forward, increase the size of the memfd file,
and move the pixman image offset forward (and the Wayland SHM buffer
as well).
Only increasing the file size would, obviously, cause the memfd file
to grow indefinitely. To deal with this, we "punch" a whole from the
beginning of the file to the new offset. This frees the associated
memory.
Thus, while we have a memfd file whose size is (as seen by
e.g. fstat()) is ever growing, the actual file size is always the
original buffer size.
Some notes:
* FALLOC_FL_PUNCH_HOLE can be quite slow when the number of used pages
to drop is large.
* all normal fallocate() usages have been replaced with ftruncate(),
as this is *much* faster. fallocate() guarantees subsequent writes
wont fail. I.e. it actually reserves (disk) space. While it doesn't
allocate on-disk blocks for on-disk files, it *does* zero-initialize
the in-memory blocks. And this is slow. ftruncate() doesn't do this.
TODO: implement reverse scrolling (i.e. a negative row count).
2020-03-22 20:06:44 +01:00
|
|
|
#include <string.h>
|
2020-02-25 19:07:23 +01:00
|
|
|
#include <errno.h>
|
2020-03-25 18:26:58 +01:00
|
|
|
#include <unistd.h>
|
|
|
|
|
#include <limits.h>
|
2022-01-15 14:56:13 +05:30
|
|
|
#include <time.h>
|
2019-06-12 20:08:54 +02:00
|
|
|
|
|
|
|
|
#include <sys/types.h>
|
|
|
|
|
#include <sys/mman.h>
|
2020-02-15 19:46:00 +01:00
|
|
|
#include <fcntl.h>
|
2019-06-12 20:08:54 +02:00
|
|
|
|
2019-08-16 20:40:32 +02:00
|
|
|
#include <pixman.h>
|
|
|
|
|
|
2019-12-01 14:03:24 +01:00
|
|
|
#include <fcft/stride.h>
|
2019-11-17 19:19:55 +01:00
|
|
|
#include <tllist.h>
|
|
|
|
|
|
2019-06-12 20:08:54 +02:00
|
|
|
#define LOG_MODULE "shm"
|
2019-11-02 00:33:37 +01:00
|
|
|
#define LOG_ENABLE_DBG 0
|
2019-06-12 20:08:54 +02:00
|
|
|
#include "log.h"
|
2021-01-15 20:39:45 +00:00
|
|
|
#include "debug.h"
|
2020-08-07 20:42:34 +01:00
|
|
|
#include "macros.h"
|
2020-08-08 20:34:30 +01:00
|
|
|
#include "xmalloc.h"
|
2019-06-12 20:08:54 +02:00
|
|
|
|
2020-10-10 11:29:04 +02:00
|
|
|
#if !defined(MAP_UNINITIALIZED)
|
|
|
|
|
#define MAP_UNINITIALIZED 0
|
|
|
|
|
#endif
|
|
|
|
|
|
2023-10-12 16:16:11 +02:00
|
|
|
#if !defined(MFD_NOEXEC_SEAL)
|
|
|
|
|
#define MFD_NOEXEC_SEAL 0
|
2023-10-08 11:03:13 +02:00
|
|
|
#endif
|
|
|
|
|
|
shm: new function, shm_scroll()
This function "scrolls" the buffer by the specified number of (pixel)
rows.
The idea is move the image offset by re-sizing the underlying memfd
object. I.e. to scroll forward, increase the size of the memfd file,
and move the pixman image offset forward (and the Wayland SHM buffer
as well).
Only increasing the file size would, obviously, cause the memfd file
to grow indefinitely. To deal with this, we "punch" a whole from the
beginning of the file to the new offset. This frees the associated
memory.
Thus, while we have a memfd file whose size is (as seen by
e.g. fstat()) is ever growing, the actual file size is always the
original buffer size.
Some notes:
* FALLOC_FL_PUNCH_HOLE can be quite slow when the number of used pages
to drop is large.
* all normal fallocate() usages have been replaced with ftruncate(),
as this is *much* faster. fallocate() guarantees subsequent writes
wont fail. I.e. it actually reserves (disk) space. While it doesn't
allocate on-disk blocks for on-disk files, it *does* zero-initialize
the in-memory blocks. And this is slow. ftruncate() doesn't do this.
TODO: implement reverse scrolling (i.e. a negative row count).
2020-03-22 20:06:44 +01:00
|
|
|
#define TIME_SCROLL 0
|
|
|
|
|
|
2021-05-07 18:18:35 +02:00
|
|
|
#define FORCED_DOUBLE_BUFFERING 0
|
|
|
|
|
|
2020-03-24 17:46:48 +01:00
|
|
|
/*
|
|
|
|
|
* Maximum memfd size allowed.
|
|
|
|
|
*
|
|
|
|
|
* On 64-bit, we could in theory use up to 2GB (wk_shm_create_pool()
|
|
|
|
|
* is limited to int32_t), since we never mmap() the entire region.
|
|
|
|
|
*
|
|
|
|
|
* The compositor is different matter - it needs to mmap() the entire
|
|
|
|
|
* range, and *keep* the mapping for as long as is has buffers
|
|
|
|
|
* referencing it (thus - always). And if we open multiple terminals,
|
|
|
|
|
* then the required address space multiples...
|
|
|
|
|
*
|
|
|
|
|
* That said, 128TB (the total amount of available user address space
|
|
|
|
|
* on 64-bit) is *a lot*; we can fit 67108864 2GB memfds into
|
|
|
|
|
* that. But, let's be conservative for now.
|
|
|
|
|
*
|
|
|
|
|
* On 32-bit the available address space is too small and SHM
|
|
|
|
|
* scrolling is disabled.
|
2020-03-25 20:48:02 +01:00
|
|
|
*
|
|
|
|
|
* Note: this is the _default_ size. It can be overridden by calling
|
|
|
|
|
* shm_set_max_pool_size();
|
2020-03-24 17:46:48 +01:00
|
|
|
*/
|
2020-03-25 20:48:02 +01:00
|
|
|
static off_t max_pool_size = 512 * 1024 * 1024;
|
2020-03-24 17:46:48 +01:00
|
|
|
|
2020-03-22 20:36:15 +01:00
|
|
|
static bool can_punch_hole = false;
|
|
|
|
|
static bool can_punch_hole_initialized = false;
|
|
|
|
|
|
2021-07-15 22:17:12 +02:00
|
|
|
struct buffer_pool {
|
|
|
|
|
int fd; /* memfd */
|
|
|
|
|
struct wl_shm_pool *wl_pool;
|
|
|
|
|
|
|
|
|
|
void *real_mmapped; /* Address returned from mmap */
|
|
|
|
|
size_t mmap_size; /* Size of mmap (>= size) */
|
|
|
|
|
|
|
|
|
|
size_t ref_count;
|
|
|
|
|
};
|
|
|
|
|
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
struct buffer_chain;
|
2021-07-15 22:17:12 +02:00
|
|
|
struct buffer_private {
|
|
|
|
|
struct buffer public;
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
struct buffer_chain *chain;
|
2021-07-15 22:17:12 +02:00
|
|
|
|
2021-07-16 16:47:57 +02:00
|
|
|
size_t ref_count;
|
2021-07-15 22:30:08 +02:00
|
|
|
bool busy; /* Owned by compositor */
|
2021-07-15 22:17:12 +02:00
|
|
|
|
|
|
|
|
struct buffer_pool *pool;
|
2021-07-15 22:30:08 +02:00
|
|
|
off_t offset; /* Offset into memfd where data begins */
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
size_t size;
|
2024-02-21 16:29:10 +01:00
|
|
|
bool with_alpha;
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
|
|
|
|
|
bool scrollable;
|
|
|
|
|
};
|
2021-07-15 22:17:12 +02:00
|
|
|
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
struct buffer_chain {
|
|
|
|
|
tll(struct buffer_private *) bufs;
|
|
|
|
|
struct wl_shm *shm;
|
|
|
|
|
size_t pix_instances;
|
2021-07-15 22:17:12 +02:00
|
|
|
bool scrollable;
|
|
|
|
|
};
|
|
|
|
|
|
2021-07-16 16:48:18 +02:00
|
|
|
static tll(struct buffer_private *) deferred;
|
2021-07-15 22:17:12 +02:00
|
|
|
|
2020-04-18 12:24:40 +02:00
|
|
|
#undef MEASURE_SHM_ALLOCS
|
|
|
|
|
#if defined(MEASURE_SHM_ALLOCS)
|
|
|
|
|
static size_t max_alloced = 0;
|
|
|
|
|
#endif
|
|
|
|
|
|
2020-03-25 20:48:02 +01:00
|
|
|
void
|
|
|
|
|
shm_set_max_pool_size(off_t _max_pool_size)
|
|
|
|
|
{
|
|
|
|
|
max_pool_size = _max_pool_size;
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-02 00:35:02 +01:00
|
|
|
static void
|
2020-03-24 17:46:48 +01:00
|
|
|
buffer_destroy_dont_close(struct buffer *buf)
|
2019-11-02 00:35:02 +01:00
|
|
|
{
|
2020-06-04 15:39:19 +02:00
|
|
|
if (buf->pix != NULL) {
|
|
|
|
|
for (size_t i = 0; i < buf->pix_instances; i++)
|
|
|
|
|
if (buf->pix[i] != NULL)
|
|
|
|
|
pixman_image_unref(buf->pix[i]);
|
|
|
|
|
}
|
shm: new function, shm_scroll()
This function "scrolls" the buffer by the specified number of (pixel)
rows.
The idea is move the image offset by re-sizing the underlying memfd
object. I.e. to scroll forward, increase the size of the memfd file,
and move the pixman image offset forward (and the Wayland SHM buffer
as well).
Only increasing the file size would, obviously, cause the memfd file
to grow indefinitely. To deal with this, we "punch" a whole from the
beginning of the file to the new offset. This frees the associated
memory.
Thus, while we have a memfd file whose size is (as seen by
e.g. fstat()) is ever growing, the actual file size is always the
original buffer size.
Some notes:
* FALLOC_FL_PUNCH_HOLE can be quite slow when the number of used pages
to drop is large.
* all normal fallocate() usages have been replaced with ftruncate(),
as this is *much* faster. fallocate() guarantees subsequent writes
wont fail. I.e. it actually reserves (disk) space. While it doesn't
allocate on-disk blocks for on-disk files, it *does* zero-initialize
the in-memory blocks. And this is slow. ftruncate() doesn't do this.
TODO: implement reverse scrolling (i.e. a negative row count).
2020-03-22 20:06:44 +01:00
|
|
|
if (buf->wl_buf != NULL)
|
|
|
|
|
wl_buffer_destroy(buf->wl_buf);
|
2020-03-24 17:46:48 +01:00
|
|
|
|
2020-06-04 15:39:19 +02:00
|
|
|
free(buf->pix);
|
2020-03-24 17:46:48 +01:00
|
|
|
buf->pix = NULL;
|
|
|
|
|
buf->wl_buf = NULL;
|
2021-07-15 22:18:09 +02:00
|
|
|
buf->data = NULL;
|
2020-03-24 17:46:48 +01:00
|
|
|
}
|
|
|
|
|
|
2021-07-15 18:32:19 +02:00
|
|
|
static void
|
|
|
|
|
pool_unref(struct buffer_pool *pool)
|
|
|
|
|
{
|
|
|
|
|
if (pool == NULL)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
xassert(pool->ref_count > 0);
|
|
|
|
|
pool->ref_count--;
|
|
|
|
|
|
|
|
|
|
if (pool->ref_count > 0)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
if (pool->real_mmapped != MAP_FAILED)
|
|
|
|
|
munmap(pool->real_mmapped, pool->mmap_size);
|
|
|
|
|
if (pool->wl_pool != NULL)
|
|
|
|
|
wl_shm_pool_destroy(pool->wl_pool);
|
|
|
|
|
if (pool->fd >= 0)
|
|
|
|
|
close(pool->fd);
|
|
|
|
|
|
|
|
|
|
pool->real_mmapped = MAP_FAILED;
|
|
|
|
|
pool->wl_pool = NULL;
|
|
|
|
|
pool->fd = -1;
|
|
|
|
|
free(pool);
|
|
|
|
|
}
|
|
|
|
|
|
2020-03-24 17:46:48 +01:00
|
|
|
static void
|
2021-07-15 22:17:12 +02:00
|
|
|
buffer_destroy(struct buffer_private *buf)
|
2020-03-24 17:46:48 +01:00
|
|
|
{
|
2021-07-15 22:17:12 +02:00
|
|
|
buffer_destroy_dont_close(&buf->public);
|
2021-07-15 18:32:19 +02:00
|
|
|
pool_unref(buf->pool);
|
2020-03-25 18:26:58 +01:00
|
|
|
buf->pool = NULL;
|
2021-05-07 20:20:47 +02:00
|
|
|
|
2023-10-07 16:23:09 +02:00
|
|
|
for (size_t i = 0; i < buf->public.pix_instances; i++)
|
|
|
|
|
pixman_region32_fini(&buf->public.dirty[i]);
|
|
|
|
|
free(buf->public.dirty);
|
2021-07-16 16:48:18 +02:00
|
|
|
free(buf);
|
2019-11-02 00:35:02 +01:00
|
|
|
}
|
|
|
|
|
|
2021-07-16 16:47:57 +02:00
|
|
|
static bool
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
buffer_unref_no_remove_from_chain(struct buffer_private *buf)
|
2021-07-16 16:47:57 +02:00
|
|
|
{
|
2021-07-16 16:48:18 +02:00
|
|
|
xassert(buf->ref_count > 0);
|
|
|
|
|
buf->ref_count--;
|
2021-07-16 16:47:57 +02:00
|
|
|
|
2021-07-16 16:48:18 +02:00
|
|
|
if (buf->ref_count > 0)
|
2021-07-16 16:47:57 +02:00
|
|
|
return false;
|
|
|
|
|
|
2021-07-16 16:48:18 +02:00
|
|
|
if (buf->busy)
|
|
|
|
|
tll_push_back(deferred, buf);
|
|
|
|
|
else
|
|
|
|
|
buffer_destroy(buf);
|
2021-07-16 16:47:57 +02:00
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2020-03-25 20:48:02 +01:00
|
|
|
void
|
|
|
|
|
shm_fini(void)
|
|
|
|
|
{
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
LOG_DBG("deferred buffers: %zu", tll_length(deferred));
|
2021-07-16 16:48:18 +02:00
|
|
|
|
|
|
|
|
tll_foreach(deferred, it) {
|
|
|
|
|
buffer_destroy(it->item);
|
|
|
|
|
tll_remove(deferred, it);
|
|
|
|
|
}
|
|
|
|
|
|
2020-04-18 12:24:40 +02:00
|
|
|
#if defined(MEASURE_SHM_ALLOCS) && MEASURE_SHM_ALLOCS
|
|
|
|
|
LOG_INFO("max total allocations was: %zu MB", max_alloced / 1024 / 1024);
|
|
|
|
|
#endif
|
2020-03-25 20:48:02 +01:00
|
|
|
}
|
|
|
|
|
|
2019-06-12 20:08:54 +02:00
|
|
|
static void
|
|
|
|
|
buffer_release(void *data, struct wl_buffer *wl_buffer)
|
|
|
|
|
{
|
2021-07-15 22:17:12 +02:00
|
|
|
struct buffer_private *buffer = data;
|
2021-07-15 18:32:19 +02:00
|
|
|
|
2021-07-15 22:17:12 +02:00
|
|
|
xassert(buffer->public.wl_buf == wl_buffer);
|
2021-01-16 20:16:00 +00:00
|
|
|
xassert(buffer->busy);
|
2019-06-12 20:08:54 +02:00
|
|
|
buffer->busy = false;
|
2021-07-16 16:47:57 +02:00
|
|
|
|
2021-07-16 16:48:18 +02:00
|
|
|
if (buffer->ref_count == 0) {
|
|
|
|
|
bool found = false;
|
|
|
|
|
tll_foreach(deferred, it) {
|
|
|
|
|
if (it->item == buffer) {
|
|
|
|
|
found = true;
|
|
|
|
|
tll_remove(deferred, it);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
buffer_destroy(buffer);
|
|
|
|
|
|
2021-07-16 16:48:18 +02:00
|
|
|
xassert(found);
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
if (!found)
|
|
|
|
|
LOG_WARN("deferred delete: buffer not on the 'deferred' list");
|
2021-07-16 16:48:18 +02:00
|
|
|
}
|
2019-06-12 20:08:54 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static const struct wl_buffer_listener buffer_listener = {
|
|
|
|
|
.release = &buffer_release,
|
|
|
|
|
};
|
|
|
|
|
|
2021-01-21 12:39:51 +01:00
|
|
|
#if __SIZEOF_POINTER__ == 8
|
2020-03-24 17:46:48 +01:00
|
|
|
static size_t
|
|
|
|
|
page_size(void)
|
|
|
|
|
{
|
|
|
|
|
static size_t size = 0;
|
|
|
|
|
if (size == 0) {
|
2020-08-25 19:39:17 +01:00
|
|
|
long n = sysconf(_SC_PAGE_SIZE);
|
|
|
|
|
if (n <= 0) {
|
2020-03-24 17:46:48 +01:00
|
|
|
LOG_ERRNO("failed to get page size");
|
|
|
|
|
size = 4096;
|
2020-08-25 19:39:17 +01:00
|
|
|
} else {
|
|
|
|
|
size = (size_t)n;
|
2020-03-24 17:46:48 +01:00
|
|
|
}
|
|
|
|
|
}
|
2021-01-16 20:16:00 +00:00
|
|
|
xassert(size > 0);
|
2020-03-24 17:46:48 +01:00
|
|
|
return size;
|
|
|
|
|
}
|
2021-01-19 15:17:38 +00:00
|
|
|
#endif
|
2020-03-24 17:46:48 +01:00
|
|
|
|
shm: new function, shm_scroll()
This function "scrolls" the buffer by the specified number of (pixel)
rows.
The idea is move the image offset by re-sizing the underlying memfd
object. I.e. to scroll forward, increase the size of the memfd file,
and move the pixman image offset forward (and the Wayland SHM buffer
as well).
Only increasing the file size would, obviously, cause the memfd file
to grow indefinitely. To deal with this, we "punch" a whole from the
beginning of the file to the new offset. This frees the associated
memory.
Thus, while we have a memfd file whose size is (as seen by
e.g. fstat()) is ever growing, the actual file size is always the
original buffer size.
Some notes:
* FALLOC_FL_PUNCH_HOLE can be quite slow when the number of used pages
to drop is large.
* all normal fallocate() usages have been replaced with ftruncate(),
as this is *much* faster. fallocate() guarantees subsequent writes
wont fail. I.e. it actually reserves (disk) space. While it doesn't
allocate on-disk blocks for on-disk files, it *does* zero-initialize
the in-memory blocks. And this is slow. ftruncate() doesn't do this.
TODO: implement reverse scrolling (i.e. a negative row count).
2020-03-22 20:06:44 +01:00
|
|
|
static bool
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
instantiate_offset(struct buffer_private *buf, off_t new_offset)
|
shm: new function, shm_scroll()
This function "scrolls" the buffer by the specified number of (pixel)
rows.
The idea is move the image offset by re-sizing the underlying memfd
object. I.e. to scroll forward, increase the size of the memfd file,
and move the pixman image offset forward (and the Wayland SHM buffer
as well).
Only increasing the file size would, obviously, cause the memfd file
to grow indefinitely. To deal with this, we "punch" a whole from the
beginning of the file to the new offset. This frees the associated
memory.
Thus, while we have a memfd file whose size is (as seen by
e.g. fstat()) is ever growing, the actual file size is always the
original buffer size.
Some notes:
* FALLOC_FL_PUNCH_HOLE can be quite slow when the number of used pages
to drop is large.
* all normal fallocate() usages have been replaced with ftruncate(),
as this is *much* faster. fallocate() guarantees subsequent writes
wont fail. I.e. it actually reserves (disk) space. While it doesn't
allocate on-disk blocks for on-disk files, it *does* zero-initialize
the in-memory blocks. And this is slow. ftruncate() doesn't do this.
TODO: implement reverse scrolling (i.e. a negative row count).
2020-03-22 20:06:44 +01:00
|
|
|
{
|
2021-07-15 22:18:09 +02:00
|
|
|
xassert(buf->public.data == NULL);
|
2021-07-15 22:17:12 +02:00
|
|
|
xassert(buf->public.pix == NULL);
|
|
|
|
|
xassert(buf->public.wl_buf == NULL);
|
2021-07-15 18:32:19 +02:00
|
|
|
xassert(buf->pool != NULL);
|
|
|
|
|
|
|
|
|
|
const struct buffer_pool *pool = buf->pool;
|
shm: new function, shm_scroll()
This function "scrolls" the buffer by the specified number of (pixel)
rows.
The idea is move the image offset by re-sizing the underlying memfd
object. I.e. to scroll forward, increase the size of the memfd file,
and move the pixman image offset forward (and the Wayland SHM buffer
as well).
Only increasing the file size would, obviously, cause the memfd file
to grow indefinitely. To deal with this, we "punch" a whole from the
beginning of the file to the new offset. This frees the associated
memory.
Thus, while we have a memfd file whose size is (as seen by
e.g. fstat()) is ever growing, the actual file size is always the
original buffer size.
Some notes:
* FALLOC_FL_PUNCH_HOLE can be quite slow when the number of used pages
to drop is large.
* all normal fallocate() usages have been replaced with ftruncate(),
as this is *much* faster. fallocate() guarantees subsequent writes
wont fail. I.e. it actually reserves (disk) space. While it doesn't
allocate on-disk blocks for on-disk files, it *does* zero-initialize
the in-memory blocks. And this is slow. ftruncate() doesn't do this.
TODO: implement reverse scrolling (i.e. a negative row count).
2020-03-22 20:06:44 +01:00
|
|
|
|
|
|
|
|
void *mmapped = MAP_FAILED;
|
|
|
|
|
struct wl_buffer *wl_buf = NULL;
|
2024-07-30 16:33:19 +02:00
|
|
|
pixman_image_t **pix = xcalloc(buf->public.pix_instances, sizeof(pix[0]));
|
shm: new function, shm_scroll()
This function "scrolls" the buffer by the specified number of (pixel)
rows.
The idea is move the image offset by re-sizing the underlying memfd
object. I.e. to scroll forward, increase the size of the memfd file,
and move the pixman image offset forward (and the Wayland SHM buffer
as well).
Only increasing the file size would, obviously, cause the memfd file
to grow indefinitely. To deal with this, we "punch" a whole from the
beginning of the file to the new offset. This frees the associated
memory.
Thus, while we have a memfd file whose size is (as seen by
e.g. fstat()) is ever growing, the actual file size is always the
original buffer size.
Some notes:
* FALLOC_FL_PUNCH_HOLE can be quite slow when the number of used pages
to drop is large.
* all normal fallocate() usages have been replaced with ftruncate(),
as this is *much* faster. fallocate() guarantees subsequent writes
wont fail. I.e. it actually reserves (disk) space. While it doesn't
allocate on-disk blocks for on-disk files, it *does* zero-initialize
the in-memory blocks. And this is slow. ftruncate() doesn't do this.
TODO: implement reverse scrolling (i.e. a negative row count).
2020-03-22 20:06:44 +01:00
|
|
|
|
2021-07-15 18:32:19 +02:00
|
|
|
mmapped = (uint8_t *)pool->real_mmapped + new_offset;
|
shm: new function, shm_scroll()
This function "scrolls" the buffer by the specified number of (pixel)
rows.
The idea is move the image offset by re-sizing the underlying memfd
object. I.e. to scroll forward, increase the size of the memfd file,
and move the pixman image offset forward (and the Wayland SHM buffer
as well).
Only increasing the file size would, obviously, cause the memfd file
to grow indefinitely. To deal with this, we "punch" a whole from the
beginning of the file to the new offset. This frees the associated
memory.
Thus, while we have a memfd file whose size is (as seen by
e.g. fstat()) is ever growing, the actual file size is always the
original buffer size.
Some notes:
* FALLOC_FL_PUNCH_HOLE can be quite slow when the number of used pages
to drop is large.
* all normal fallocate() usages have been replaced with ftruncate(),
as this is *much* faster. fallocate() guarantees subsequent writes
wont fail. I.e. it actually reserves (disk) space. While it doesn't
allocate on-disk blocks for on-disk files, it *does* zero-initialize
the in-memory blocks. And this is slow. ftruncate() doesn't do this.
TODO: implement reverse scrolling (i.e. a negative row count).
2020-03-22 20:06:44 +01:00
|
|
|
|
|
|
|
|
wl_buf = wl_shm_pool_create_buffer(
|
2021-07-15 22:17:12 +02:00
|
|
|
pool->wl_pool, new_offset,
|
|
|
|
|
buf->public.width, buf->public.height, buf->public.stride,
|
2024-02-21 16:29:10 +01:00
|
|
|
buf->with_alpha ? WL_SHM_FORMAT_ARGB8888 : WL_SHM_FORMAT_XRGB8888);
|
2021-07-15 18:32:19 +02:00
|
|
|
|
shm: new function, shm_scroll()
This function "scrolls" the buffer by the specified number of (pixel)
rows.
The idea is move the image offset by re-sizing the underlying memfd
object. I.e. to scroll forward, increase the size of the memfd file,
and move the pixman image offset forward (and the Wayland SHM buffer
as well).
Only increasing the file size would, obviously, cause the memfd file
to grow indefinitely. To deal with this, we "punch" a whole from the
beginning of the file to the new offset. This frees the associated
memory.
Thus, while we have a memfd file whose size is (as seen by
e.g. fstat()) is ever growing, the actual file size is always the
original buffer size.
Some notes:
* FALLOC_FL_PUNCH_HOLE can be quite slow when the number of used pages
to drop is large.
* all normal fallocate() usages have been replaced with ftruncate(),
as this is *much* faster. fallocate() guarantees subsequent writes
wont fail. I.e. it actually reserves (disk) space. While it doesn't
allocate on-disk blocks for on-disk files, it *does* zero-initialize
the in-memory blocks. And this is slow. ftruncate() doesn't do this.
TODO: implement reverse scrolling (i.e. a negative row count).
2020-03-22 20:06:44 +01:00
|
|
|
if (wl_buf == NULL) {
|
|
|
|
|
LOG_ERR("failed to create SHM buffer");
|
|
|
|
|
goto err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* One pixman image for each worker thread (do we really need multiple?) */
|
2021-07-15 22:17:12 +02:00
|
|
|
for (size_t i = 0; i < buf->public.pix_instances; i++) {
|
2020-06-04 15:39:19 +02:00
|
|
|
pix[i] = pixman_image_create_bits_no_clear(
|
2024-02-21 16:29:10 +01:00
|
|
|
buf->with_alpha ? PIXMAN_a8r8g8b8 : PIXMAN_x8r8g8b8,
|
|
|
|
|
buf->public.width, buf->public.height,
|
2021-07-15 22:17:12 +02:00
|
|
|
(uint32_t *)mmapped, buf->public.stride);
|
2020-06-04 15:39:19 +02:00
|
|
|
if (pix[i] == NULL) {
|
|
|
|
|
LOG_ERR("failed to create pixman image");
|
|
|
|
|
goto err;
|
|
|
|
|
}
|
shm: new function, shm_scroll()
This function "scrolls" the buffer by the specified number of (pixel)
rows.
The idea is move the image offset by re-sizing the underlying memfd
object. I.e. to scroll forward, increase the size of the memfd file,
and move the pixman image offset forward (and the Wayland SHM buffer
as well).
Only increasing the file size would, obviously, cause the memfd file
to grow indefinitely. To deal with this, we "punch" a whole from the
beginning of the file to the new offset. This frees the associated
memory.
Thus, while we have a memfd file whose size is (as seen by
e.g. fstat()) is ever growing, the actual file size is always the
original buffer size.
Some notes:
* FALLOC_FL_PUNCH_HOLE can be quite slow when the number of used pages
to drop is large.
* all normal fallocate() usages have been replaced with ftruncate(),
as this is *much* faster. fallocate() guarantees subsequent writes
wont fail. I.e. it actually reserves (disk) space. While it doesn't
allocate on-disk blocks for on-disk files, it *does* zero-initialize
the in-memory blocks. And this is slow. ftruncate() doesn't do this.
TODO: implement reverse scrolling (i.e. a negative row count).
2020-03-22 20:06:44 +01:00
|
|
|
}
|
|
|
|
|
|
2021-07-15 22:18:09 +02:00
|
|
|
buf->public.data = mmapped;
|
2021-07-15 22:17:12 +02:00
|
|
|
buf->public.wl_buf = wl_buf;
|
|
|
|
|
buf->public.pix = pix;
|
2021-07-15 18:32:19 +02:00
|
|
|
buf->offset = new_offset;
|
shm: new function, shm_scroll()
This function "scrolls" the buffer by the specified number of (pixel)
rows.
The idea is move the image offset by re-sizing the underlying memfd
object. I.e. to scroll forward, increase the size of the memfd file,
and move the pixman image offset forward (and the Wayland SHM buffer
as well).
Only increasing the file size would, obviously, cause the memfd file
to grow indefinitely. To deal with this, we "punch" a whole from the
beginning of the file to the new offset. This frees the associated
memory.
Thus, while we have a memfd file whose size is (as seen by
e.g. fstat()) is ever growing, the actual file size is always the
original buffer size.
Some notes:
* FALLOC_FL_PUNCH_HOLE can be quite slow when the number of used pages
to drop is large.
* all normal fallocate() usages have been replaced with ftruncate(),
as this is *much* faster. fallocate() guarantees subsequent writes
wont fail. I.e. it actually reserves (disk) space. While it doesn't
allocate on-disk blocks for on-disk files, it *does* zero-initialize
the in-memory blocks. And this is slow. ftruncate() doesn't do this.
TODO: implement reverse scrolling (i.e. a negative row count).
2020-03-22 20:06:44 +01:00
|
|
|
|
|
|
|
|
wl_buffer_add_listener(wl_buf, &buffer_listener, buf);
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
|
|
err:
|
2020-06-04 15:39:19 +02:00
|
|
|
if (pix != NULL) {
|
2021-07-15 22:17:12 +02:00
|
|
|
for (size_t i = 0; i < buf->public.pix_instances; i++)
|
2020-06-04 15:39:19 +02:00
|
|
|
if (pix[i] != NULL)
|
|
|
|
|
pixman_image_unref(pix[i]);
|
|
|
|
|
}
|
|
|
|
|
free(pix);
|
shm: new function, shm_scroll()
This function "scrolls" the buffer by the specified number of (pixel)
rows.
The idea is move the image offset by re-sizing the underlying memfd
object. I.e. to scroll forward, increase the size of the memfd file,
and move the pixman image offset forward (and the Wayland SHM buffer
as well).
Only increasing the file size would, obviously, cause the memfd file
to grow indefinitely. To deal with this, we "punch" a whole from the
beginning of the file to the new offset. This frees the associated
memory.
Thus, while we have a memfd file whose size is (as seen by
e.g. fstat()) is ever growing, the actual file size is always the
original buffer size.
Some notes:
* FALLOC_FL_PUNCH_HOLE can be quite slow when the number of used pages
to drop is large.
* all normal fallocate() usages have been replaced with ftruncate(),
as this is *much* faster. fallocate() guarantees subsequent writes
wont fail. I.e. it actually reserves (disk) space. While it doesn't
allocate on-disk blocks for on-disk files, it *does* zero-initialize
the in-memory blocks. And this is slow. ftruncate() doesn't do this.
TODO: implement reverse scrolling (i.e. a negative row count).
2020-03-22 20:06:44 +01:00
|
|
|
if (wl_buf != NULL)
|
|
|
|
|
wl_buffer_destroy(wl_buf);
|
|
|
|
|
|
|
|
|
|
abort();
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2021-07-15 18:32:19 +02:00
|
|
|
static void NOINLINE
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
get_new_buffers(struct buffer_chain *chain, size_t count,
|
|
|
|
|
int widths[static count], int heights[static count],
|
2024-02-21 16:29:10 +01:00
|
|
|
struct buffer *bufs[static count], bool with_alpha,
|
|
|
|
|
bool immediate_purge)
|
2021-07-15 18:32:19 +02:00
|
|
|
{
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
xassert(count == 1 || !chain->scrollable);
|
2019-06-12 20:08:54 +02:00
|
|
|
/*
|
|
|
|
|
* No existing buffer available. Create a new one by:
|
|
|
|
|
*
|
|
|
|
|
* 1. open a memory backed "file" with memfd_create()
|
2019-08-16 22:11:22 +02:00
|
|
|
* 2. mmap() the memory file, to be used by the pixman image
|
2019-06-12 20:08:54 +02:00
|
|
|
* 3. create a wayland shm buffer for the same memory file
|
|
|
|
|
*
|
2019-08-16 22:11:22 +02:00
|
|
|
* The pixman image and the wayland buffer are now sharing memory.
|
2019-06-12 20:08:54 +02:00
|
|
|
*/
|
|
|
|
|
|
2021-07-15 18:32:19 +02:00
|
|
|
int stride[count];
|
|
|
|
|
int sizes[count];
|
|
|
|
|
|
|
|
|
|
size_t total_size = 0;
|
|
|
|
|
for (size_t i = 0; i < count; i++) {
|
2024-02-21 16:29:10 +01:00
|
|
|
stride[i] = stride_for_format_and_width(
|
|
|
|
|
with_alpha ? PIXMAN_a8r8g8b8 : PIXMAN_x8r8g8b8, widths[i]);
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
sizes[i] = stride[i] * heights[i];
|
2021-07-15 18:32:19 +02:00
|
|
|
total_size += sizes[i];
|
|
|
|
|
}
|
2021-09-23 22:17:43 +00:00
|
|
|
if (total_size == 0)
|
|
|
|
|
return;
|
2021-07-15 18:32:19 +02:00
|
|
|
|
2019-06-12 20:08:54 +02:00
|
|
|
int pool_fd = -1;
|
|
|
|
|
|
2020-03-25 18:26:58 +01:00
|
|
|
void *real_mmapped = MAP_FAILED;
|
2021-07-15 18:32:19 +02:00
|
|
|
struct wl_shm_pool *wl_pool = NULL;
|
|
|
|
|
struct buffer_pool *pool = NULL;
|
2019-06-12 20:08:54 +02:00
|
|
|
|
|
|
|
|
/* Backing memory for SHM */
|
2021-01-19 14:20:55 +00:00
|
|
|
#if defined(MEMFD_CREATE)
|
2023-10-12 16:16:11 +02:00
|
|
|
/*
|
|
|
|
|
* Older kernels reject MFD_NOEXEC_SEAL with EINVAL. Try first
|
|
|
|
|
* *with* it, and if that fails, try again *without* it.
|
|
|
|
|
*/
|
|
|
|
|
errno = 0;
|
|
|
|
|
pool_fd = memfd_create(
|
|
|
|
|
"foot-wayland-shm-buffer-pool",
|
|
|
|
|
MFD_CLOEXEC | MFD_ALLOW_SEALING | MFD_NOEXEC_SEAL);
|
|
|
|
|
|
2024-05-22 14:06:15 +02:00
|
|
|
if (pool_fd < 0 && errno == EINVAL && MFD_NOEXEC_SEAL != 0) {
|
2023-10-12 16:16:11 +02:00
|
|
|
pool_fd = memfd_create(
|
|
|
|
|
"foot-wayland-shm-buffer-pool", MFD_CLOEXEC | MFD_ALLOW_SEALING);
|
|
|
|
|
}
|
|
|
|
|
|
2021-01-19 14:20:55 +00:00
|
|
|
#elif defined(__FreeBSD__)
|
|
|
|
|
// memfd_create on FreeBSD 13 is SHM_ANON without sealing support
|
|
|
|
|
pool_fd = shm_open(SHM_ANON, O_RDWR | O_CLOEXEC, 0600);
|
|
|
|
|
#else
|
|
|
|
|
char name[] = "/tmp/foot-wayland-shm-buffer-pool-XXXXXX";
|
|
|
|
|
pool_fd = mkostemp(name, O_CLOEXEC);
|
|
|
|
|
unlink(name);
|
|
|
|
|
#endif
|
2019-06-12 20:08:54 +02:00
|
|
|
if (pool_fd == -1) {
|
|
|
|
|
LOG_ERRNO("failed to create SHM backing memory file");
|
|
|
|
|
goto err;
|
|
|
|
|
}
|
|
|
|
|
|
2021-01-21 12:39:51 +01:00
|
|
|
#if __SIZEOF_POINTER__ == 8
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
off_t offset = chain->scrollable && max_pool_size > 0
|
|
|
|
|
? (max_pool_size / 4) & ~(page_size() - 1)
|
|
|
|
|
: 0;
|
|
|
|
|
off_t memfd_size = chain->scrollable && max_pool_size > 0
|
|
|
|
|
? max_pool_size
|
|
|
|
|
: total_size;
|
2021-01-21 12:39:51 +01:00
|
|
|
#else
|
2021-07-15 18:32:19 +02:00
|
|
|
off_t offset = 0;
|
|
|
|
|
off_t memfd_size = total_size;
|
2020-03-25 18:26:58 +01:00
|
|
|
#endif
|
2020-03-24 17:46:48 +01:00
|
|
|
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
xassert(chain->scrollable || (offset == 0 && memfd_size == total_size));
|
2021-07-15 18:32:19 +02:00
|
|
|
|
|
|
|
|
LOG_DBG("memfd-size: %lu, initial offset: %lu", memfd_size, offset);
|
2020-03-25 20:48:02 +01:00
|
|
|
|
2020-03-24 17:46:48 +01:00
|
|
|
if (ftruncate(pool_fd, memfd_size) == -1) {
|
2020-03-25 18:26:58 +01:00
|
|
|
LOG_ERRNO("failed to set size of SHM backing memory file");
|
2019-06-12 20:08:54 +02:00
|
|
|
goto err;
|
|
|
|
|
}
|
|
|
|
|
|
2020-03-22 20:36:15 +01:00
|
|
|
if (!can_punch_hole_initialized) {
|
|
|
|
|
can_punch_hole_initialized = true;
|
2021-01-21 12:39:51 +01:00
|
|
|
#if __SIZEOF_POINTER__ == 8 && defined(FALLOC_FL_PUNCH_HOLE)
|
2020-03-22 20:36:15 +01:00
|
|
|
can_punch_hole = fallocate(
|
|
|
|
|
pool_fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, 1) == 0;
|
2020-03-22 21:04:00 +01:00
|
|
|
|
|
|
|
|
if (!can_punch_hole) {
|
|
|
|
|
LOG_WARN(
|
|
|
|
|
"fallocate(FALLOC_FL_PUNCH_HOLE) not "
|
|
|
|
|
"supported (%s): expect lower performance", strerror(errno));
|
|
|
|
|
}
|
2020-08-11 17:22:12 +02:00
|
|
|
#else
|
|
|
|
|
/* This is mostly to make sure we skip the warning issued
|
|
|
|
|
* above */
|
|
|
|
|
can_punch_hole = false;
|
|
|
|
|
#endif
|
2020-03-22 20:36:15 +01:00
|
|
|
}
|
|
|
|
|
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
if (chain->scrollable && !can_punch_hole) {
|
2021-07-15 18:32:19 +02:00
|
|
|
offset = 0;
|
|
|
|
|
memfd_size = total_size;
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
chain->scrollable = false;
|
2020-03-25 18:32:41 +01:00
|
|
|
|
|
|
|
|
if (ftruncate(pool_fd, memfd_size) < 0) {
|
|
|
|
|
LOG_ERRNO("failed to set size of SHM backing memory file");
|
|
|
|
|
goto err;
|
|
|
|
|
}
|
2020-03-25 18:26:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
real_mmapped = mmap(
|
|
|
|
|
NULL, memfd_size, PROT_READ | PROT_WRITE,
|
|
|
|
|
MAP_SHARED | MAP_UNINITIALIZED, pool_fd, 0);
|
|
|
|
|
|
|
|
|
|
if (real_mmapped == MAP_FAILED) {
|
|
|
|
|
LOG_ERRNO("failed to mmap SHM backing memory file");
|
|
|
|
|
goto err;
|
|
|
|
|
}
|
|
|
|
|
|
2021-01-19 14:20:55 +00:00
|
|
|
#if defined(MEMFD_CREATE)
|
2020-03-25 18:30:21 +01:00
|
|
|
/* Seal file - we no longer allow any kind of resizing */
|
|
|
|
|
/* TODO: wayland mmaps(PROT_WRITE), for some unknown reason, hence we cannot use F_SEAL_FUTURE_WRITE */
|
|
|
|
|
if (fcntl(pool_fd, F_ADD_SEALS,
|
|
|
|
|
F_SEAL_GROW | F_SEAL_SHRINK | /*F_SEAL_FUTURE_WRITE |*/ F_SEAL_SEAL) < 0)
|
|
|
|
|
{
|
|
|
|
|
LOG_ERRNO("failed to seal SHM backing memory file");
|
2020-04-03 20:14:53 +02:00
|
|
|
/* This is not a fatal error */
|
2020-03-25 18:30:21 +01:00
|
|
|
}
|
2021-01-19 14:20:55 +00:00
|
|
|
#endif
|
2020-03-25 18:30:21 +01:00
|
|
|
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
wl_pool = wl_shm_create_pool(chain->shm, pool_fd, memfd_size);
|
2021-07-15 18:32:19 +02:00
|
|
|
if (wl_pool == NULL) {
|
2020-03-25 18:26:58 +01:00
|
|
|
LOG_ERR("failed to create SHM pool");
|
|
|
|
|
goto err;
|
|
|
|
|
}
|
|
|
|
|
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
pool = xmalloc(sizeof(*pool));
|
2021-07-15 18:32:19 +02:00
|
|
|
if (pool == NULL) {
|
|
|
|
|
LOG_ERRNO("failed to allocate buffer pool");
|
shm: new function, shm_scroll()
This function "scrolls" the buffer by the specified number of (pixel)
rows.
The idea is move the image offset by re-sizing the underlying memfd
object. I.e. to scroll forward, increase the size of the memfd file,
and move the pixman image offset forward (and the Wayland SHM buffer
as well).
Only increasing the file size would, obviously, cause the memfd file
to grow indefinitely. To deal with this, we "punch" a whole from the
beginning of the file to the new offset. This frees the associated
memory.
Thus, while we have a memfd file whose size is (as seen by
e.g. fstat()) is ever growing, the actual file size is always the
original buffer size.
Some notes:
* FALLOC_FL_PUNCH_HOLE can be quite slow when the number of used pages
to drop is large.
* all normal fallocate() usages have been replaced with ftruncate(),
as this is *much* faster. fallocate() guarantees subsequent writes
wont fail. I.e. it actually reserves (disk) space. While it doesn't
allocate on-disk blocks for on-disk files, it *does* zero-initialize
the in-memory blocks. And this is slow. ftruncate() doesn't do this.
TODO: implement reverse scrolling (i.e. a negative row count).
2020-03-22 20:06:44 +01:00
|
|
|
goto err;
|
2021-07-15 18:32:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
*pool = (struct buffer_pool){
|
|
|
|
|
.fd = pool_fd,
|
|
|
|
|
.wl_pool = wl_pool,
|
|
|
|
|
.real_mmapped = real_mmapped,
|
|
|
|
|
.mmap_size = memfd_size,
|
|
|
|
|
.ref_count = 0,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < count; i++) {
|
2021-07-18 16:44:49 +02:00
|
|
|
if (sizes[i] == 0) {
|
|
|
|
|
bufs[i] = NULL;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2021-07-15 18:32:19 +02:00
|
|
|
|
|
|
|
|
/* Push to list of available buffers, but marked as 'busy' */
|
2021-07-16 16:48:18 +02:00
|
|
|
struct buffer_private *buf = xmalloc(sizeof(*buf));
|
|
|
|
|
*buf = (struct buffer_private){
|
|
|
|
|
.public = {
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
.width = widths[i],
|
|
|
|
|
.height = heights[i],
|
2021-07-16 16:48:18 +02:00
|
|
|
.stride = stride[i],
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
.pix_instances = chain->pix_instances,
|
2021-07-16 16:48:18 +02:00
|
|
|
.age = 1234, /* Force a full repaint */
|
|
|
|
|
},
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
.chain = chain,
|
2021-07-16 16:48:18 +02:00
|
|
|
.ref_count = immediate_purge ? 0 : 1,
|
|
|
|
|
.busy = true,
|
2024-02-21 16:29:10 +01:00
|
|
|
.with_alpha = with_alpha,
|
2021-07-16 16:48:18 +02:00
|
|
|
.pool = pool,
|
|
|
|
|
.offset = 0,
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
.size = sizes[i],
|
|
|
|
|
.scrollable = chain->scrollable,
|
2021-07-16 16:48:18 +02:00
|
|
|
};
|
|
|
|
|
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
if (!instantiate_offset(buf, offset)) {
|
2021-07-16 16:48:18 +02:00
|
|
|
free(buf);
|
2021-07-15 18:32:19 +02:00
|
|
|
goto err;
|
2021-07-16 16:48:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (immediate_purge)
|
|
|
|
|
tll_push_front(deferred, buf);
|
|
|
|
|
else
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
tll_push_front(chain->bufs, buf);
|
2020-04-18 12:24:40 +02:00
|
|
|
|
2024-01-25 07:03:50 +00:00
|
|
|
buf->public.dirty = xmalloc(
|
2023-10-07 16:23:09 +02:00
|
|
|
chain->pix_instances * sizeof(buf->public.dirty[0]));
|
|
|
|
|
|
|
|
|
|
for (size_t j = 0; j < chain->pix_instances; j++)
|
|
|
|
|
pixman_region32_init(&buf->public.dirty[j]);
|
|
|
|
|
|
2021-07-15 18:32:19 +02:00
|
|
|
pool->ref_count++;
|
2021-07-15 22:30:08 +02:00
|
|
|
offset += buf->size;
|
2021-07-15 22:17:12 +02:00
|
|
|
bufs[i] = &buf->public;
|
2021-07-15 18:32:19 +02:00
|
|
|
}
|
2021-05-07 20:20:47 +02:00
|
|
|
|
2020-04-18 12:24:40 +02:00
|
|
|
#if defined(MEASURE_SHM_ALLOCS) && MEASURE_SHM_ALLOCS
|
|
|
|
|
{
|
|
|
|
|
size_t currently_alloced = 0;
|
|
|
|
|
tll_foreach(buffers, it)
|
|
|
|
|
currently_alloced += it->item.size;
|
|
|
|
|
if (currently_alloced > max_alloced)
|
|
|
|
|
max_alloced = currently_alloced;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2021-09-23 22:17:43 +00:00
|
|
|
if (!(bufs[0] && shm_can_scroll(bufs[0]))) {
|
2024-02-06 12:36:45 +01:00
|
|
|
/* We only need to keep the pool FD open if we're going to SHM
|
2021-07-11 10:22:47 +02:00
|
|
|
* scroll it */
|
|
|
|
|
close(pool_fd);
|
2021-07-15 18:32:19 +02:00
|
|
|
pool->fd = -1;
|
2021-07-11 10:22:47 +02:00
|
|
|
}
|
|
|
|
|
|
2021-07-15 18:32:19 +02:00
|
|
|
return;
|
2019-06-12 20:08:54 +02:00
|
|
|
|
|
|
|
|
err:
|
2021-07-15 18:32:19 +02:00
|
|
|
pool_unref(pool);
|
|
|
|
|
if (wl_pool != NULL)
|
|
|
|
|
wl_shm_pool_destroy(wl_pool);
|
2020-03-25 18:26:58 +01:00
|
|
|
if (real_mmapped != MAP_FAILED)
|
|
|
|
|
munmap(real_mmapped, memfd_size);
|
2019-06-12 20:08:54 +02:00
|
|
|
if (pool_fd != -1)
|
|
|
|
|
close(pool_fd);
|
|
|
|
|
|
2020-03-10 18:02:10 +01:00
|
|
|
/* We don't handle this */
|
|
|
|
|
abort();
|
2021-07-15 18:32:19 +02:00
|
|
|
}
|
|
|
|
|
|
2022-04-16 17:47:56 +02:00
|
|
|
void
|
|
|
|
|
shm_did_not_use_buf(struct buffer *_buf)
|
|
|
|
|
{
|
|
|
|
|
struct buffer_private *buf = (struct buffer_private *)_buf;
|
|
|
|
|
buf->busy = false;
|
|
|
|
|
}
|
|
|
|
|
|
2021-07-15 18:32:19 +02:00
|
|
|
void
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
shm_get_many(struct buffer_chain *chain, size_t count,
|
|
|
|
|
int widths[static count], int heights[static count],
|
2024-02-21 16:29:10 +01:00
|
|
|
struct buffer *bufs[static count], bool with_alpha)
|
2021-07-15 18:32:19 +02:00
|
|
|
{
|
2024-02-21 16:29:10 +01:00
|
|
|
get_new_buffers(chain, count, widths, heights, bufs, with_alpha, true);
|
2021-07-15 18:32:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct buffer *
|
2024-02-21 16:29:10 +01:00
|
|
|
shm_get_buffer(struct buffer_chain *chain, int width, int height, bool with_alpha)
|
2021-07-15 18:32:19 +02:00
|
|
|
{
|
2021-07-16 16:49:52 +02:00
|
|
|
LOG_DBG(
|
2023-10-03 14:12:58 +02:00
|
|
|
"chain=%p: looking for a reusable %dx%d buffer "
|
2021-07-16 16:49:52 +02:00
|
|
|
"among %zu potential buffers",
|
|
|
|
|
(void *)chain, width, height, tll_length(chain->bufs));
|
|
|
|
|
|
2021-07-15 22:17:12 +02:00
|
|
|
struct buffer_private *cached = NULL;
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
tll_foreach(chain->bufs, it) {
|
2021-07-16 16:48:18 +02:00
|
|
|
struct buffer_private *buf = it->item;
|
|
|
|
|
|
2024-06-24 17:55:07 +02:00
|
|
|
if (buf->public.width != width || buf->public.height != height ||
|
|
|
|
|
with_alpha != buf->with_alpha)
|
|
|
|
|
{
|
2021-07-16 16:49:27 +02:00
|
|
|
LOG_DBG("purging mismatching buffer %p", (void *)buf);
|
|
|
|
|
if (buffer_unref_no_remove_from_chain(buf))
|
|
|
|
|
tll_remove(chain->bufs, it);
|
2021-07-15 18:32:19 +02:00
|
|
|
continue;
|
2021-07-16 16:49:27 +02:00
|
|
|
}
|
2021-07-15 18:32:19 +02:00
|
|
|
|
2021-07-16 16:48:18 +02:00
|
|
|
if (buf->busy)
|
|
|
|
|
buf->public.age++;
|
2021-07-15 18:32:19 +02:00
|
|
|
else
|
|
|
|
|
#if FORCED_DOUBLE_BUFFERING
|
2024-09-20 17:06:47 +02:00
|
|
|
if (buf->public.age == 0)
|
|
|
|
|
buf->public.age++;
|
2021-07-15 18:32:19 +02:00
|
|
|
else
|
|
|
|
|
#endif
|
|
|
|
|
{
|
shm: get_buffer(): make sure buffer->busy is set
When going through the cached buffers, we only set buffer->busy on
the *first* re-usable buffer we found.
In some cases, we will find more than one re-usable buffer. In this
case, we select the “youngest” one (i.e the one most recently used, in
the hopes that we can use damage tracking instead of re-rendering the
entire buffer).
If the “current” buffer is younger than the previously detected,
re-usable, buffer, then we unref:ed the previously selected buffer,
and replaced it with the current one.
But, we did not sanitize it. That is, we did not:
* set buffer->busy
* clear its dirty region
* clear its scroll damage
That buffer would eventually get rendered to, and committed to the
compositor. Later, the compositor would free it. And there, in our
buffer_release() callback, we’d assert that buffer->busy was
set. And fail.
Closes #844
2021-12-15 12:37:21 +01:00
|
|
|
if (cached == NULL)
|
|
|
|
|
cached = buf;
|
|
|
|
|
else {
|
2021-07-15 19:40:15 +02:00
|
|
|
/* We have multiple buffers eligible for
|
2024-02-06 12:36:45 +01:00
|
|
|
* reuse. Pick the "youngest" one, and mark the
|
shm: auto-purge when we have multiple buffers eligible for re-use
It may happen that we end up with multiple non-busy, same-sized
buffers for the same cookie (context), and thus eligible for re-use.
Before this patch, we would keep all those buffers around. This is
completely unnecessary. Under normal circumstances, we’ll either be
re-using a single buffer, or swap between two. In the second case, the
“other” buffer is always busy, and thus not eligible for re-use.
So, if we _do_ detect multiple, re-usable buffers, pick the one with
the lowest “age” (increasing the chance of applying damage tracking,
instead of re-drawing everything), and mark the other one for purging.
2021-07-15 19:19:31 +02:00
|
|
|
* other one for purging */
|
2021-07-16 16:48:18 +02:00
|
|
|
if (buf->public.age < cached->public.age) {
|
2021-07-16 16:47:57 +02:00
|
|
|
shm_unref(&cached->public);
|
2021-07-16 16:48:18 +02:00
|
|
|
cached = buf;
|
2021-07-16 16:47:57 +02:00
|
|
|
} else {
|
shm: get_buffer(): make sure buffer->busy is set
When going through the cached buffers, we only set buffer->busy on
the *first* re-usable buffer we found.
In some cases, we will find more than one re-usable buffer. In this
case, we select the “youngest” one (i.e the one most recently used, in
the hopes that we can use damage tracking instead of re-rendering the
entire buffer).
If the “current” buffer is younger than the previously detected,
re-usable, buffer, then we unref:ed the previously selected buffer,
and replaced it with the current one.
But, we did not sanitize it. That is, we did not:
* set buffer->busy
* clear its dirty region
* clear its scroll damage
That buffer would eventually get rendered to, and committed to the
compositor. Later, the compositor would free it. And there, in our
buffer_release() callback, we’d assert that buffer->busy was
set. And fail.
Closes #844
2021-12-15 12:37:21 +01:00
|
|
|
/*
|
|
|
|
|
* TODO: I think we _can_ use shm_unref()
|
|
|
|
|
* here...
|
|
|
|
|
*
|
2024-02-06 12:36:45 +01:00
|
|
|
* shm_unref() may remove 'it', but that
|
|
|
|
|
* should be safe; "our" tll_foreach() already
|
shm: get_buffer(): make sure buffer->busy is set
When going through the cached buffers, we only set buffer->busy on
the *first* re-usable buffer we found.
In some cases, we will find more than one re-usable buffer. In this
case, we select the “youngest” one (i.e the one most recently used, in
the hopes that we can use damage tracking instead of re-rendering the
entire buffer).
If the “current” buffer is younger than the previously detected,
re-usable, buffer, then we unref:ed the previously selected buffer,
and replaced it with the current one.
But, we did not sanitize it. That is, we did not:
* set buffer->busy
* clear its dirty region
* clear its scroll damage
That buffer would eventually get rendered to, and committed to the
compositor. Later, the compositor would free it. And there, in our
buffer_release() callback, we’d assert that buffer->busy was
set. And fail.
Closes #844
2021-12-15 12:37:21 +01:00
|
|
|
* holds the next pointer.
|
|
|
|
|
*/
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
if (buffer_unref_no_remove_from_chain(buf))
|
|
|
|
|
tll_remove(chain->bufs, it);
|
2021-07-16 16:47:57 +02:00
|
|
|
}
|
2021-07-15 18:32:19 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
shm: get_buffer(): make sure buffer->busy is set
When going through the cached buffers, we only set buffer->busy on
the *first* re-usable buffer we found.
In some cases, we will find more than one re-usable buffer. In this
case, we select the “youngest” one (i.e the one most recently used, in
the hopes that we can use damage tracking instead of re-rendering the
entire buffer).
If the “current” buffer is younger than the previously detected,
re-usable, buffer, then we unref:ed the previously selected buffer,
and replaced it with the current one.
But, we did not sanitize it. That is, we did not:
* set buffer->busy
* clear its dirty region
* clear its scroll damage
That buffer would eventually get rendered to, and committed to the
compositor. Later, the compositor would free it. And there, in our
buffer_release() callback, we’d assert that buffer->busy was
set. And fail.
Closes #844
2021-12-15 12:37:21 +01:00
|
|
|
if (cached != NULL) {
|
2025-01-22 07:50:49 +01:00
|
|
|
LOG_DBG("reusing buffer %p from cache", (void *)cached);
|
shm: get_buffer(): make sure buffer->busy is set
When going through the cached buffers, we only set buffer->busy on
the *first* re-usable buffer we found.
In some cases, we will find more than one re-usable buffer. In this
case, we select the “youngest” one (i.e the one most recently used, in
the hopes that we can use damage tracking instead of re-rendering the
entire buffer).
If the “current” buffer is younger than the previously detected,
re-usable, buffer, then we unref:ed the previously selected buffer,
and replaced it with the current one.
But, we did not sanitize it. That is, we did not:
* set buffer->busy
* clear its dirty region
* clear its scroll damage
That buffer would eventually get rendered to, and committed to the
compositor. Later, the compositor would free it. And there, in our
buffer_release() callback, we’d assert that buffer->busy was
set. And fail.
Closes #844
2021-12-15 12:37:21 +01:00
|
|
|
cached->busy = true;
|
2023-10-07 16:23:09 +02:00
|
|
|
for (size_t i = 0; i < cached->public.pix_instances; i++)
|
|
|
|
|
pixman_region32_clear(&cached->public.dirty[i]);
|
shm: get_buffer(): make sure buffer->busy is set
When going through the cached buffers, we only set buffer->busy on
the *first* re-usable buffer we found.
In some cases, we will find more than one re-usable buffer. In this
case, we select the “youngest” one (i.e the one most recently used, in
the hopes that we can use damage tracking instead of re-rendering the
entire buffer).
If the “current” buffer is younger than the previously detected,
re-usable, buffer, then we unref:ed the previously selected buffer,
and replaced it with the current one.
But, we did not sanitize it. That is, we did not:
* set buffer->busy
* clear its dirty region
* clear its scroll damage
That buffer would eventually get rendered to, and committed to the
compositor. Later, the compositor would free it. And there, in our
buffer_release() callback, we’d assert that buffer->busy was
set. And fail.
Closes #844
2021-12-15 12:37:21 +01:00
|
|
|
xassert(cached->public.pix_instances == chain->pix_instances);
|
2021-07-15 22:17:12 +02:00
|
|
|
return &cached->public;
|
shm: get_buffer(): make sure buffer->busy is set
When going through the cached buffers, we only set buffer->busy on
the *first* re-usable buffer we found.
In some cases, we will find more than one re-usable buffer. In this
case, we select the “youngest” one (i.e the one most recently used, in
the hopes that we can use damage tracking instead of re-rendering the
entire buffer).
If the “current” buffer is younger than the previously detected,
re-usable, buffer, then we unref:ed the previously selected buffer,
and replaced it with the current one.
But, we did not sanitize it. That is, we did not:
* set buffer->busy
* clear its dirty region
* clear its scroll damage
That buffer would eventually get rendered to, and committed to the
compositor. Later, the compositor would free it. And there, in our
buffer_release() callback, we’d assert that buffer->busy was
set. And fail.
Closes #844
2021-12-15 12:37:21 +01:00
|
|
|
}
|
2021-07-15 18:32:19 +02:00
|
|
|
|
|
|
|
|
struct buffer *ret;
|
2024-02-21 16:29:10 +01:00
|
|
|
get_new_buffers(chain, 1, &width, &height, &ret, with_alpha, false);
|
2021-07-15 18:32:19 +02:00
|
|
|
return ret;
|
2019-06-12 20:08:54 +02:00
|
|
|
}
|
|
|
|
|
|
2020-03-23 19:31:05 +01:00
|
|
|
bool
|
2021-07-15 22:17:12 +02:00
|
|
|
shm_can_scroll(const struct buffer *_buf)
|
2020-03-23 19:31:05 +01:00
|
|
|
{
|
2021-01-21 12:39:51 +01:00
|
|
|
#if __SIZEOF_POINTER__ == 8
|
2021-07-15 22:17:12 +02:00
|
|
|
const struct buffer_private *buf = (const struct buffer_private *)_buf;
|
2021-01-21 12:39:51 +01:00
|
|
|
return can_punch_hole && max_pool_size > 0 && buf->scrollable;
|
|
|
|
|
#else
|
2020-03-24 17:46:48 +01:00
|
|
|
/* Not enough virtual address space in 32-bit */
|
|
|
|
|
return false;
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
2021-01-21 12:39:51 +01:00
|
|
|
#if __SIZEOF_POINTER__ == 8 && defined(FALLOC_FL_PUNCH_HOLE)
|
2020-03-24 17:46:48 +01:00
|
|
|
static bool
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
wrap_buffer(struct buffer_private *buf, off_t new_offset)
|
2020-03-24 17:46:48 +01:00
|
|
|
{
|
2021-07-15 18:32:19 +02:00
|
|
|
struct buffer_pool *pool = buf->pool;
|
|
|
|
|
xassert(pool->ref_count == 1);
|
|
|
|
|
|
2020-03-25 18:26:58 +01:00
|
|
|
/* We don't allow overlapping offsets */
|
2021-07-15 22:17:12 +02:00
|
|
|
off_t UNUSED diff = new_offset < buf->offset
|
|
|
|
|
? buf->offset - new_offset
|
|
|
|
|
: new_offset - buf->offset;
|
2021-07-15 22:30:08 +02:00
|
|
|
xassert(diff > buf->size);
|
2020-03-25 18:26:58 +01:00
|
|
|
|
2021-07-15 22:17:12 +02:00
|
|
|
memcpy((uint8_t *)pool->real_mmapped + new_offset,
|
2021-07-15 22:18:09 +02:00
|
|
|
buf->public.data,
|
2021-07-15 22:30:08 +02:00
|
|
|
buf->size);
|
2020-03-25 18:26:58 +01:00
|
|
|
|
|
|
|
|
off_t trim_ofs, trim_len;
|
|
|
|
|
if (new_offset > buf->offset) {
|
|
|
|
|
/* Trim everything *before* the new offset */
|
|
|
|
|
trim_ofs = 0;
|
|
|
|
|
trim_len = new_offset;
|
|
|
|
|
} else {
|
|
|
|
|
/* Trim everything *after* the new buffer location */
|
2021-07-15 22:30:08 +02:00
|
|
|
trim_ofs = new_offset + buf->size;
|
2021-07-15 18:32:19 +02:00
|
|
|
trim_len = pool->mmap_size - trim_ofs;
|
2020-03-25 18:26:58 +01:00
|
|
|
}
|
2020-03-24 17:46:48 +01:00
|
|
|
|
2020-03-25 18:26:58 +01:00
|
|
|
if (fallocate(
|
2021-07-15 18:32:19 +02:00
|
|
|
pool->fd,
|
2020-03-25 18:26:58 +01:00
|
|
|
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
|
|
|
|
|
trim_ofs, trim_len) < 0)
|
|
|
|
|
{
|
|
|
|
|
LOG_ERRNO("failed to trim SHM backing memory file");
|
2020-03-24 17:46:48 +01:00
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Re-instantiate pixman+wl_buffer+raw pointersw */
|
2021-07-15 22:17:12 +02:00
|
|
|
buffer_destroy_dont_close(&buf->public);
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
return instantiate_offset(buf, new_offset);
|
2020-03-23 19:31:05 +01:00
|
|
|
}
|
|
|
|
|
|
2020-03-23 21:14:51 +01:00
|
|
|
static bool
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
shm_scroll_forward(struct buffer_private *buf, int rows,
|
2020-03-23 21:14:51 +01:00
|
|
|
int top_margin, int top_keep_rows,
|
|
|
|
|
int bottom_margin, int bottom_keep_rows)
|
shm: new function, shm_scroll()
This function "scrolls" the buffer by the specified number of (pixel)
rows.
The idea is move the image offset by re-sizing the underlying memfd
object. I.e. to scroll forward, increase the size of the memfd file,
and move the pixman image offset forward (and the Wayland SHM buffer
as well).
Only increasing the file size would, obviously, cause the memfd file
to grow indefinitely. To deal with this, we "punch" a whole from the
beginning of the file to the new offset. This frees the associated
memory.
Thus, while we have a memfd file whose size is (as seen by
e.g. fstat()) is ever growing, the actual file size is always the
original buffer size.
Some notes:
* FALLOC_FL_PUNCH_HOLE can be quite slow when the number of used pages
to drop is large.
* all normal fallocate() usages have been replaced with ftruncate(),
as this is *much* faster. fallocate() guarantees subsequent writes
wont fail. I.e. it actually reserves (disk) space. While it doesn't
allocate on-disk blocks for on-disk files, it *does* zero-initialize
the in-memory blocks. And this is slow. ftruncate() doesn't do this.
TODO: implement reverse scrolling (i.e. a negative row count).
2020-03-22 20:06:44 +01:00
|
|
|
{
|
2021-07-15 18:32:19 +02:00
|
|
|
struct buffer_pool *pool = buf->pool;
|
|
|
|
|
|
2021-01-16 20:16:00 +00:00
|
|
|
xassert(can_punch_hole);
|
|
|
|
|
xassert(buf->busy);
|
2021-07-15 22:17:12 +02:00
|
|
|
xassert(buf->public.pix != NULL);
|
|
|
|
|
xassert(buf->public.wl_buf != NULL);
|
2021-07-15 18:32:19 +02:00
|
|
|
xassert(pool != NULL);
|
|
|
|
|
xassert(pool->ref_count == 1);
|
|
|
|
|
xassert(pool->fd >= 0);
|
shm: new function, shm_scroll()
This function "scrolls" the buffer by the specified number of (pixel)
rows.
The idea is move the image offset by re-sizing the underlying memfd
object. I.e. to scroll forward, increase the size of the memfd file,
and move the pixman image offset forward (and the Wayland SHM buffer
as well).
Only increasing the file size would, obviously, cause the memfd file
to grow indefinitely. To deal with this, we "punch" a whole from the
beginning of the file to the new offset. This frees the associated
memory.
Thus, while we have a memfd file whose size is (as seen by
e.g. fstat()) is ever growing, the actual file size is always the
original buffer size.
Some notes:
* FALLOC_FL_PUNCH_HOLE can be quite slow when the number of used pages
to drop is large.
* all normal fallocate() usages have been replaced with ftruncate(),
as this is *much* faster. fallocate() guarantees subsequent writes
wont fail. I.e. it actually reserves (disk) space. While it doesn't
allocate on-disk blocks for on-disk files, it *does* zero-initialize
the in-memory blocks. And this is slow. ftruncate() doesn't do this.
TODO: implement reverse scrolling (i.e. a negative row count).
2020-03-22 20:06:44 +01:00
|
|
|
|
2021-07-16 16:47:15 +02:00
|
|
|
LOG_DBG("scrolling %d rows (%d bytes)", rows, rows * buf->public.stride);
|
shm: new function, shm_scroll()
This function "scrolls" the buffer by the specified number of (pixel)
rows.
The idea is move the image offset by re-sizing the underlying memfd
object. I.e. to scroll forward, increase the size of the memfd file,
and move the pixman image offset forward (and the Wayland SHM buffer
as well).
Only increasing the file size would, obviously, cause the memfd file
to grow indefinitely. To deal with this, we "punch" a whole from the
beginning of the file to the new offset. This frees the associated
memory.
Thus, while we have a memfd file whose size is (as seen by
e.g. fstat()) is ever growing, the actual file size is always the
original buffer size.
Some notes:
* FALLOC_FL_PUNCH_HOLE can be quite slow when the number of used pages
to drop is large.
* all normal fallocate() usages have been replaced with ftruncate(),
as this is *much* faster. fallocate() guarantees subsequent writes
wont fail. I.e. it actually reserves (disk) space. While it doesn't
allocate on-disk blocks for on-disk files, it *does* zero-initialize
the in-memory blocks. And this is slow. ftruncate() doesn't do this.
TODO: implement reverse scrolling (i.e. a negative row count).
2020-03-22 20:06:44 +01:00
|
|
|
|
2021-07-15 22:17:12 +02:00
|
|
|
const off_t diff = rows * buf->public.stride;
|
2021-01-16 20:16:00 +00:00
|
|
|
xassert(rows > 0);
|
2021-07-15 22:30:08 +02:00
|
|
|
xassert(diff < buf->size);
|
2020-03-24 17:46:48 +01:00
|
|
|
|
2021-07-15 22:30:08 +02:00
|
|
|
if (buf->offset + diff + buf->size > max_pool_size) {
|
2020-03-25 18:26:58 +01:00
|
|
|
LOG_DBG("memfd offset wrap around");
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
if (!wrap_buffer(buf, 0))
|
2020-03-24 17:46:48 +01:00
|
|
|
goto err;
|
|
|
|
|
}
|
|
|
|
|
|
2020-03-25 18:26:58 +01:00
|
|
|
off_t new_offset = buf->offset + diff;
|
2021-01-16 20:16:00 +00:00
|
|
|
xassert(new_offset > buf->offset);
|
2021-07-15 22:30:08 +02:00
|
|
|
xassert(new_offset + buf->size <= max_pool_size);
|
shm: new function, shm_scroll()
This function "scrolls" the buffer by the specified number of (pixel)
rows.
The idea is move the image offset by re-sizing the underlying memfd
object. I.e. to scroll forward, increase the size of the memfd file,
and move the pixman image offset forward (and the Wayland SHM buffer
as well).
Only increasing the file size would, obviously, cause the memfd file
to grow indefinitely. To deal with this, we "punch" a whole from the
beginning of the file to the new offset. This frees the associated
memory.
Thus, while we have a memfd file whose size is (as seen by
e.g. fstat()) is ever growing, the actual file size is always the
original buffer size.
Some notes:
* FALLOC_FL_PUNCH_HOLE can be quite slow when the number of used pages
to drop is large.
* all normal fallocate() usages have been replaced with ftruncate(),
as this is *much* faster. fallocate() guarantees subsequent writes
wont fail. I.e. it actually reserves (disk) space. While it doesn't
allocate on-disk blocks for on-disk files, it *does* zero-initialize
the in-memory blocks. And this is slow. ftruncate() doesn't do this.
TODO: implement reverse scrolling (i.e. a negative row count).
2020-03-22 20:06:44 +01:00
|
|
|
|
|
|
|
|
#if TIME_SCROLL
|
2022-01-15 14:56:13 +05:30
|
|
|
struct timespec tot;
|
|
|
|
|
struct timespec time1;
|
|
|
|
|
clock_gettime(CLOCK_MONOTONIC, &time1);
|
shm: new function, shm_scroll()
This function "scrolls" the buffer by the specified number of (pixel)
rows.
The idea is move the image offset by re-sizing the underlying memfd
object. I.e. to scroll forward, increase the size of the memfd file,
and move the pixman image offset forward (and the Wayland SHM buffer
as well).
Only increasing the file size would, obviously, cause the memfd file
to grow indefinitely. To deal with this, we "punch" a whole from the
beginning of the file to the new offset. This frees the associated
memory.
Thus, while we have a memfd file whose size is (as seen by
e.g. fstat()) is ever growing, the actual file size is always the
original buffer size.
Some notes:
* FALLOC_FL_PUNCH_HOLE can be quite slow when the number of used pages
to drop is large.
* all normal fallocate() usages have been replaced with ftruncate(),
as this is *much* faster. fallocate() guarantees subsequent writes
wont fail. I.e. it actually reserves (disk) space. While it doesn't
allocate on-disk blocks for on-disk files, it *does* zero-initialize
the in-memory blocks. And this is slow. ftruncate() doesn't do this.
TODO: implement reverse scrolling (i.e. a negative row count).
2020-03-22 20:06:44 +01:00
|
|
|
|
2022-01-15 14:56:13 +05:30
|
|
|
struct timespec time2 = time1;
|
2020-03-23 20:45:27 +01:00
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
if (top_keep_rows > 0) {
|
|
|
|
|
/* Copy current 'top' region to its new location */
|
2021-07-15 22:17:12 +02:00
|
|
|
const int stride = buf->public.stride;
|
2021-07-15 22:18:09 +02:00
|
|
|
uint8_t *base = buf->public.data;
|
2021-07-15 22:17:12 +02:00
|
|
|
|
2020-03-23 20:45:27 +01:00
|
|
|
memmove(
|
2021-07-15 22:17:12 +02:00
|
|
|
base + (top_margin + rows) * stride,
|
|
|
|
|
base + (top_margin + 0) * stride,
|
|
|
|
|
top_keep_rows * stride);
|
2020-03-23 20:45:27 +01:00
|
|
|
|
|
|
|
|
#if TIME_SCROLL
|
2022-01-15 14:56:13 +05:30
|
|
|
clock_gettime(CLOCK_MONOTONIC, &time2);
|
|
|
|
|
timespec_sub(&time2, &time1, &tot);
|
|
|
|
|
LOG_INFO("memmove (top region): %lds %ldns",
|
|
|
|
|
(long)tot.tv_sec, tot.tv_nsec);
|
shm: new function, shm_scroll()
This function "scrolls" the buffer by the specified number of (pixel)
rows.
The idea is move the image offset by re-sizing the underlying memfd
object. I.e. to scroll forward, increase the size of the memfd file,
and move the pixman image offset forward (and the Wayland SHM buffer
as well).
Only increasing the file size would, obviously, cause the memfd file
to grow indefinitely. To deal with this, we "punch" a whole from the
beginning of the file to the new offset. This frees the associated
memory.
Thus, while we have a memfd file whose size is (as seen by
e.g. fstat()) is ever growing, the actual file size is always the
original buffer size.
Some notes:
* FALLOC_FL_PUNCH_HOLE can be quite slow when the number of used pages
to drop is large.
* all normal fallocate() usages have been replaced with ftruncate(),
as this is *much* faster. fallocate() guarantees subsequent writes
wont fail. I.e. it actually reserves (disk) space. While it doesn't
allocate on-disk blocks for on-disk files, it *does* zero-initialize
the in-memory blocks. And this is slow. ftruncate() doesn't do this.
TODO: implement reverse scrolling (i.e. a negative row count).
2020-03-22 20:06:44 +01:00
|
|
|
#endif
|
2020-03-23 20:45:27 +01:00
|
|
|
}
|
shm: new function, shm_scroll()
This function "scrolls" the buffer by the specified number of (pixel)
rows.
The idea is move the image offset by re-sizing the underlying memfd
object. I.e. to scroll forward, increase the size of the memfd file,
and move the pixman image offset forward (and the Wayland SHM buffer
as well).
Only increasing the file size would, obviously, cause the memfd file
to grow indefinitely. To deal with this, we "punch" a whole from the
beginning of the file to the new offset. This frees the associated
memory.
Thus, while we have a memfd file whose size is (as seen by
e.g. fstat()) is ever growing, the actual file size is always the
original buffer size.
Some notes:
* FALLOC_FL_PUNCH_HOLE can be quite slow when the number of used pages
to drop is large.
* all normal fallocate() usages have been replaced with ftruncate(),
as this is *much* faster. fallocate() guarantees subsequent writes
wont fail. I.e. it actually reserves (disk) space. While it doesn't
allocate on-disk blocks for on-disk files, it *does* zero-initialize
the in-memory blocks. And this is slow. ftruncate() doesn't do this.
TODO: implement reverse scrolling (i.e. a negative row count).
2020-03-22 20:06:44 +01:00
|
|
|
|
|
|
|
|
/* Destroy old objects (they point to the old offset) */
|
2021-07-15 22:17:12 +02:00
|
|
|
buffer_destroy_dont_close(&buf->public);
|
shm: new function, shm_scroll()
This function "scrolls" the buffer by the specified number of (pixel)
rows.
The idea is move the image offset by re-sizing the underlying memfd
object. I.e. to scroll forward, increase the size of the memfd file,
and move the pixman image offset forward (and the Wayland SHM buffer
as well).
Only increasing the file size would, obviously, cause the memfd file
to grow indefinitely. To deal with this, we "punch" a whole from the
beginning of the file to the new offset. This frees the associated
memory.
Thus, while we have a memfd file whose size is (as seen by
e.g. fstat()) is ever growing, the actual file size is always the
original buffer size.
Some notes:
* FALLOC_FL_PUNCH_HOLE can be quite slow when the number of used pages
to drop is large.
* all normal fallocate() usages have been replaced with ftruncate(),
as this is *much* faster. fallocate() guarantees subsequent writes
wont fail. I.e. it actually reserves (disk) space. While it doesn't
allocate on-disk blocks for on-disk files, it *does* zero-initialize
the in-memory blocks. And this is slow. ftruncate() doesn't do this.
TODO: implement reverse scrolling (i.e. a negative row count).
2020-03-22 20:06:44 +01:00
|
|
|
|
2020-03-25 18:26:58 +01:00
|
|
|
/* Free unused memory - everything up until the new offset */
|
|
|
|
|
const off_t trim_ofs = 0;
|
|
|
|
|
const off_t trim_len = new_offset;
|
|
|
|
|
|
|
|
|
|
if (fallocate(
|
2021-07-15 18:32:19 +02:00
|
|
|
pool->fd,
|
2020-03-25 18:26:58 +01:00
|
|
|
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
|
|
|
|
|
trim_ofs, trim_len) < 0)
|
2020-03-24 17:46:48 +01:00
|
|
|
{
|
2020-03-25 18:26:58 +01:00
|
|
|
LOG_ERRNO("failed to trim SHM backing memory file");
|
2020-03-24 17:46:48 +01:00
|
|
|
goto err;
|
shm: new function, shm_scroll()
This function "scrolls" the buffer by the specified number of (pixel)
rows.
The idea is move the image offset by re-sizing the underlying memfd
object. I.e. to scroll forward, increase the size of the memfd file,
and move the pixman image offset forward (and the Wayland SHM buffer
as well).
Only increasing the file size would, obviously, cause the memfd file
to grow indefinitely. To deal with this, we "punch" a whole from the
beginning of the file to the new offset. This frees the associated
memory.
Thus, while we have a memfd file whose size is (as seen by
e.g. fstat()) is ever growing, the actual file size is always the
original buffer size.
Some notes:
* FALLOC_FL_PUNCH_HOLE can be quite slow when the number of used pages
to drop is large.
* all normal fallocate() usages have been replaced with ftruncate(),
as this is *much* faster. fallocate() guarantees subsequent writes
wont fail. I.e. it actually reserves (disk) space. While it doesn't
allocate on-disk blocks for on-disk files, it *does* zero-initialize
the in-memory blocks. And this is slow. ftruncate() doesn't do this.
TODO: implement reverse scrolling (i.e. a negative row count).
2020-03-22 20:06:44 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#if TIME_SCROLL
|
2022-01-15 14:56:13 +05:30
|
|
|
struct timespec time3;
|
|
|
|
|
clock_gettime(CLOCK_MONOTONIC, &time3);
|
|
|
|
|
timespec_sub(&time3, &time2, &tot);
|
|
|
|
|
LOG_INFO("PUNCH HOLE: %lds %ldns", (long)tot.tv_sec, tot.tv_nsec);
|
shm: new function, shm_scroll()
This function "scrolls" the buffer by the specified number of (pixel)
rows.
The idea is move the image offset by re-sizing the underlying memfd
object. I.e. to scroll forward, increase the size of the memfd file,
and move the pixman image offset forward (and the Wayland SHM buffer
as well).
Only increasing the file size would, obviously, cause the memfd file
to grow indefinitely. To deal with this, we "punch" a whole from the
beginning of the file to the new offset. This frees the associated
memory.
Thus, while we have a memfd file whose size is (as seen by
e.g. fstat()) is ever growing, the actual file size is always the
original buffer size.
Some notes:
* FALLOC_FL_PUNCH_HOLE can be quite slow when the number of used pages
to drop is large.
* all normal fallocate() usages have been replaced with ftruncate(),
as this is *much* faster. fallocate() guarantees subsequent writes
wont fail. I.e. it actually reserves (disk) space. While it doesn't
allocate on-disk blocks for on-disk files, it *does* zero-initialize
the in-memory blocks. And this is slow. ftruncate() doesn't do this.
TODO: implement reverse scrolling (i.e. a negative row count).
2020-03-22 20:06:44 +01:00
|
|
|
#endif
|
|
|
|
|
|
2020-03-24 17:46:48 +01:00
|
|
|
/* Re-instantiate pixman+wl_buffer+raw pointersw */
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
bool ret = instantiate_offset(buf, new_offset);
|
2020-03-23 20:45:27 +01:00
|
|
|
|
2020-03-25 18:26:58 +01:00
|
|
|
#if TIME_SCROLL
|
2022-01-15 14:56:13 +05:30
|
|
|
struct timespec time4;
|
|
|
|
|
clock_gettime(CLOCK_MONOTONIC, &time4);
|
|
|
|
|
timespec_sub(&time4, &time3, &tot);
|
|
|
|
|
LOG_INFO("instantiate offset: %lds %ldns", (long)tot.tv_sec, tot.tv_nsec);
|
2020-03-25 18:26:58 +01:00
|
|
|
#endif
|
|
|
|
|
|
2020-03-23 20:45:27 +01:00
|
|
|
if (ret && bottom_keep_rows > 0) {
|
|
|
|
|
/* Copy 'bottom' region to its new location */
|
2021-07-15 22:30:08 +02:00
|
|
|
const size_t size = buf->size;
|
2021-07-15 22:17:12 +02:00
|
|
|
const int stride = buf->public.stride;
|
2021-07-15 22:18:09 +02:00
|
|
|
uint8_t *base = buf->public.data;
|
2021-07-15 22:17:12 +02:00
|
|
|
|
2020-03-23 20:45:27 +01:00
|
|
|
memmove(
|
2021-07-15 22:17:12 +02:00
|
|
|
base + size - (bottom_margin + bottom_keep_rows) * stride,
|
|
|
|
|
base + size - (bottom_margin + rows + bottom_keep_rows) * stride,
|
|
|
|
|
bottom_keep_rows * stride);
|
2020-03-23 20:45:27 +01:00
|
|
|
|
|
|
|
|
#if TIME_SCROLL
|
2022-01-15 14:56:13 +05:30
|
|
|
struct timespec time5;
|
|
|
|
|
clock_gettime(CLOCK_MONOTONIC, &time5);
|
2020-03-23 20:45:27 +01:00
|
|
|
|
2022-01-15 14:56:13 +05:30
|
|
|
timespec_sub(&time5, &time4, &tot);
|
|
|
|
|
LOG_INFO("memmove (bottom region): %lds %ldns",
|
|
|
|
|
(long)tot.tv_sec, tot.tv_nsec);
|
2020-03-23 20:45:27 +01:00
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return ret;
|
2020-03-24 17:46:48 +01:00
|
|
|
|
|
|
|
|
err:
|
|
|
|
|
abort();
|
|
|
|
|
return false;
|
shm: new function, shm_scroll()
This function "scrolls" the buffer by the specified number of (pixel)
rows.
The idea is move the image offset by re-sizing the underlying memfd
object. I.e. to scroll forward, increase the size of the memfd file,
and move the pixman image offset forward (and the Wayland SHM buffer
as well).
Only increasing the file size would, obviously, cause the memfd file
to grow indefinitely. To deal with this, we "punch" a whole from the
beginning of the file to the new offset. This frees the associated
memory.
Thus, while we have a memfd file whose size is (as seen by
e.g. fstat()) is ever growing, the actual file size is always the
original buffer size.
Some notes:
* FALLOC_FL_PUNCH_HOLE can be quite slow when the number of used pages
to drop is large.
* all normal fallocate() usages have been replaced with ftruncate(),
as this is *much* faster. fallocate() guarantees subsequent writes
wont fail. I.e. it actually reserves (disk) space. While it doesn't
allocate on-disk blocks for on-disk files, it *does* zero-initialize
the in-memory blocks. And this is slow. ftruncate() doesn't do this.
TODO: implement reverse scrolling (i.e. a negative row count).
2020-03-22 20:06:44 +01:00
|
|
|
}
|
|
|
|
|
|
2020-03-23 21:14:51 +01:00
|
|
|
static bool
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
shm_scroll_reverse(struct buffer_private *buf, int rows,
|
2020-03-23 21:14:51 +01:00
|
|
|
int top_margin, int top_keep_rows,
|
|
|
|
|
int bottom_margin, int bottom_keep_rows)
|
|
|
|
|
{
|
2021-01-16 20:16:00 +00:00
|
|
|
xassert(rows > 0);
|
2020-03-23 21:14:51 +01:00
|
|
|
|
2021-07-15 18:32:19 +02:00
|
|
|
struct buffer_pool *pool = buf->pool;
|
|
|
|
|
xassert(pool->ref_count == 1);
|
|
|
|
|
|
2021-07-15 22:17:12 +02:00
|
|
|
const off_t diff = rows * buf->public.stride;
|
2020-03-24 17:46:48 +01:00
|
|
|
if (diff > buf->offset) {
|
2020-03-25 18:26:58 +01:00
|
|
|
LOG_DBG("memfd offset reverse wrap-around");
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
if (!wrap_buffer(buf, (max_pool_size - buf->size) & ~(page_size() - 1)))
|
2020-03-24 17:46:48 +01:00
|
|
|
goto err;
|
|
|
|
|
}
|
2020-03-23 21:14:51 +01:00
|
|
|
|
2020-03-24 17:46:48 +01:00
|
|
|
off_t new_offset = buf->offset - diff;
|
2021-01-16 20:16:00 +00:00
|
|
|
xassert(new_offset < buf->offset);
|
|
|
|
|
xassert(new_offset <= max_pool_size);
|
2020-03-23 21:14:51 +01:00
|
|
|
|
|
|
|
|
#if TIME_SCROLL
|
2022-01-15 14:56:13 +05:30
|
|
|
struct timespec time0;
|
|
|
|
|
clock_gettime(CLOCK_MONOTONIC, &time0);
|
2020-03-23 21:14:51 +01:00
|
|
|
|
2022-01-15 14:56:13 +05:30
|
|
|
struct timespec tot;
|
|
|
|
|
struct timespec time1 = time0;
|
2020-03-23 21:14:51 +01:00
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
if (bottom_keep_rows > 0) {
|
|
|
|
|
/* Copy 'bottom' region to its new location */
|
2021-07-15 22:30:08 +02:00
|
|
|
const size_t size = buf->size;
|
2021-07-15 22:17:12 +02:00
|
|
|
const int stride = buf->public.stride;
|
2021-07-15 22:18:09 +02:00
|
|
|
uint8_t *base = buf->public.data;
|
2021-07-15 22:17:12 +02:00
|
|
|
|
2020-03-23 21:14:51 +01:00
|
|
|
memmove(
|
2021-07-15 22:17:12 +02:00
|
|
|
base + size - (bottom_margin + rows + bottom_keep_rows) * stride,
|
|
|
|
|
base + size - (bottom_margin + bottom_keep_rows) * stride,
|
|
|
|
|
bottom_keep_rows * stride);
|
2020-03-23 21:14:51 +01:00
|
|
|
|
|
|
|
|
#if TIME_SCROLL
|
2022-01-15 14:56:13 +05:30
|
|
|
clock_gettime(CLOCK_MONOTONIC, &time1);
|
|
|
|
|
timespec_sub(&time1, &time0, &tot);
|
|
|
|
|
LOG_INFO("memmove (bottom region): %lds %ldns",
|
|
|
|
|
(long)tot.tv_sec, tot.tv_nsec);
|
2020-03-23 21:14:51 +01:00
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Destroy old objects (they point to the old offset) */
|
2021-07-15 22:17:12 +02:00
|
|
|
buffer_destroy_dont_close(&buf->public);
|
2020-03-23 21:14:51 +01:00
|
|
|
|
2020-03-25 18:26:58 +01:00
|
|
|
/* Free unused memory - everything after the relocated buffer */
|
2021-07-15 22:30:08 +02:00
|
|
|
const off_t trim_ofs = new_offset + buf->size;
|
2021-07-15 18:32:19 +02:00
|
|
|
const off_t trim_len = pool->mmap_size - trim_ofs;
|
2020-03-25 18:26:58 +01:00
|
|
|
|
|
|
|
|
if (fallocate(
|
2021-07-15 18:32:19 +02:00
|
|
|
pool->fd,
|
2020-03-25 18:26:58 +01:00
|
|
|
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
|
|
|
|
|
trim_ofs, trim_len) < 0)
|
|
|
|
|
{
|
|
|
|
|
LOG_ERRNO("failed to trim SHM backing memory");
|
2020-03-24 17:46:48 +01:00
|
|
|
goto err;
|
2020-03-23 21:14:51 +01:00
|
|
|
}
|
|
|
|
|
#if TIME_SCROLL
|
2022-01-15 14:56:13 +05:30
|
|
|
struct timespec time2;
|
|
|
|
|
clock_gettime(CLOCK_MONOTONIC, &time2);
|
|
|
|
|
timespec_sub(&time2, &time1, &tot);
|
|
|
|
|
LOG_INFO("fallocate: %lds %ldns", (long)tot.tv_sec, tot.tv_nsec);
|
2020-03-23 21:14:51 +01:00
|
|
|
#endif
|
|
|
|
|
|
2020-03-24 17:46:48 +01:00
|
|
|
/* Re-instantiate pixman+wl_buffer+raw pointers */
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
bool ret = instantiate_offset(buf, new_offset);
|
2020-03-23 21:14:51 +01:00
|
|
|
|
2020-03-25 18:26:58 +01:00
|
|
|
#if TIME_SCROLL
|
2022-01-15 14:56:13 +05:30
|
|
|
struct timespec time3;
|
|
|
|
|
clock_gettime(CLOCK_MONOTONIC, &time3);
|
|
|
|
|
timespec_sub(&time3, &time2, &tot);
|
|
|
|
|
LOG_INFO("instantiate offset: %lds %ldns", (long)tot.tv_sec, tot.tv_nsec);
|
2020-03-25 18:26:58 +01:00
|
|
|
#endif
|
|
|
|
|
|
2020-03-23 21:14:51 +01:00
|
|
|
if (ret && top_keep_rows > 0) {
|
|
|
|
|
/* Copy current 'top' region to its new location */
|
2021-07-15 22:17:12 +02:00
|
|
|
const int stride = buf->public.stride;
|
2021-07-15 22:18:09 +02:00
|
|
|
uint8_t *base = buf->public.data;
|
2021-07-15 22:17:12 +02:00
|
|
|
|
2020-03-23 21:14:51 +01:00
|
|
|
memmove(
|
2021-07-15 22:17:12 +02:00
|
|
|
base + (top_margin + 0) * stride,
|
|
|
|
|
base + (top_margin + rows) * stride,
|
|
|
|
|
top_keep_rows * stride);
|
2020-03-23 21:14:51 +01:00
|
|
|
|
|
|
|
|
#if TIME_SCROLL
|
2022-01-15 14:56:13 +05:30
|
|
|
struct timespec time4;
|
|
|
|
|
clock_gettime(CLOCK_MONOTONIC, &time4);
|
|
|
|
|
timespec_sub(&time4, &time3, &tot);
|
|
|
|
|
LOG_INFO("memmove (top region): %lds %ldns",
|
|
|
|
|
(long)tot.tv_sec, tot.tv_nsec);
|
2020-03-23 21:14:51 +01:00
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return ret;
|
2020-03-24 17:46:48 +01:00
|
|
|
|
|
|
|
|
err:
|
|
|
|
|
abort();
|
|
|
|
|
return false;
|
2020-03-23 21:14:51 +01:00
|
|
|
}
|
shm: disable fallocate optimization if not supported
shm.c:301:26: error: implicit declaration of function 'fallocate' is invalid in C99 [-Werror,-Wimplicit-function-declaration]
can_punch_hole = fallocate(
^
shm.c:302:22: error: use of undeclared identifier 'FALLOC_FL_PUNCH_HOLE'
pool_fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, 1) == 0;
^
shm.c:302:45: error: use of undeclared identifier 'FALLOC_FL_KEEP_SIZE'
pool_fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, 1) == 0;
^
shm.c:432:9: error: implicit declaration of function 'fallocate' is invalid in C99 [-Werror,-Wimplicit-function-declaration]
if (fallocate(
^
shm.c:434:13: error: use of undeclared identifier 'FALLOC_FL_PUNCH_HOLE'
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
^
shm.c:434:36: error: use of undeclared identifier 'FALLOC_FL_KEEP_SIZE'
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
^
shm.c:501:9: error: implicit declaration of function 'fallocate' is invalid in C99 [-Werror,-Wimplicit-function-declaration]
if (fallocate(
^
shm.c:503:13: error: use of undeclared identifier 'FALLOC_FL_PUNCH_HOLE'
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
^
shm.c:503:36: error: use of undeclared identifier 'FALLOC_FL_KEEP_SIZE'
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
^
shm.c:597:9: error: implicit declaration of function 'fallocate' is invalid in C99 [-Werror,-Wimplicit-function-declaration]
if (fallocate(
^
shm.c:599:13: error: use of undeclared identifier 'FALLOC_FL_PUNCH_HOLE'
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
^
shm.c:599:36: error: use of undeclared identifier 'FALLOC_FL_KEEP_SIZE'
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
^
2021-01-19 15:22:51 +00:00
|
|
|
#endif /* FALLOC_FL_PUNCH_HOLE */
|
2020-03-23 21:14:51 +01:00
|
|
|
|
|
|
|
|
bool
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
shm_scroll(struct buffer *_buf, int rows,
|
2020-03-23 21:14:51 +01:00
|
|
|
int top_margin, int top_keep_rows,
|
|
|
|
|
int bottom_margin, int bottom_keep_rows)
|
|
|
|
|
{
|
2021-01-21 12:39:51 +01:00
|
|
|
#if __SIZEOF_POINTER__ == 8 && defined(FALLOC_FL_PUNCH_HOLE)
|
2021-07-15 22:17:12 +02:00
|
|
|
if (!shm_can_scroll(_buf))
|
2020-03-26 18:04:30 +01:00
|
|
|
return false;
|
|
|
|
|
|
2021-07-15 22:17:12 +02:00
|
|
|
struct buffer_private *buf = (struct buffer_private *)_buf;
|
|
|
|
|
|
2021-01-16 20:16:00 +00:00
|
|
|
xassert(rows != 0);
|
2020-03-23 21:14:51 +01:00
|
|
|
return rows > 0
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
? shm_scroll_forward(buf, rows, top_margin, top_keep_rows, bottom_margin, bottom_keep_rows)
|
|
|
|
|
: shm_scroll_reverse(buf, -rows, top_margin, top_keep_rows, bottom_margin, bottom_keep_rows);
|
shm: disable fallocate optimization if not supported
shm.c:301:26: error: implicit declaration of function 'fallocate' is invalid in C99 [-Werror,-Wimplicit-function-declaration]
can_punch_hole = fallocate(
^
shm.c:302:22: error: use of undeclared identifier 'FALLOC_FL_PUNCH_HOLE'
pool_fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, 1) == 0;
^
shm.c:302:45: error: use of undeclared identifier 'FALLOC_FL_KEEP_SIZE'
pool_fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, 1) == 0;
^
shm.c:432:9: error: implicit declaration of function 'fallocate' is invalid in C99 [-Werror,-Wimplicit-function-declaration]
if (fallocate(
^
shm.c:434:13: error: use of undeclared identifier 'FALLOC_FL_PUNCH_HOLE'
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
^
shm.c:434:36: error: use of undeclared identifier 'FALLOC_FL_KEEP_SIZE'
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
^
shm.c:501:9: error: implicit declaration of function 'fallocate' is invalid in C99 [-Werror,-Wimplicit-function-declaration]
if (fallocate(
^
shm.c:503:13: error: use of undeclared identifier 'FALLOC_FL_PUNCH_HOLE'
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
^
shm.c:503:36: error: use of undeclared identifier 'FALLOC_FL_KEEP_SIZE'
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
^
shm.c:597:9: error: implicit declaration of function 'fallocate' is invalid in C99 [-Werror,-Wimplicit-function-declaration]
if (fallocate(
^
shm.c:599:13: error: use of undeclared identifier 'FALLOC_FL_PUNCH_HOLE'
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
^
shm.c:599:36: error: use of undeclared identifier 'FALLOC_FL_KEEP_SIZE'
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
^
2021-01-19 15:22:51 +00:00
|
|
|
#else
|
|
|
|
|
return false;
|
|
|
|
|
#endif
|
2020-03-23 21:14:51 +01:00
|
|
|
}
|
|
|
|
|
|
2020-12-04 18:42:16 +01:00
|
|
|
void
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
shm_purge(struct buffer_chain *chain)
|
2019-11-02 00:49:00 +01:00
|
|
|
{
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
LOG_DBG("chain: %p: purging all buffers", (void *)chain);
|
2019-11-02 00:49:00 +01:00
|
|
|
|
|
|
|
|
/* Purge old buffers associated with this cookie */
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
tll_foreach(chain->bufs, it) {
|
|
|
|
|
if (buffer_unref_no_remove_from_chain(it->item))
|
|
|
|
|
tll_remove(chain->bufs, it);
|
2021-07-16 16:47:57 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
shm_addref(struct buffer *_buf)
|
|
|
|
|
{
|
|
|
|
|
struct buffer_private *buf = (struct buffer_private *)_buf;
|
|
|
|
|
buf->ref_count++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
shm_unref(struct buffer *_buf)
|
|
|
|
|
{
|
|
|
|
|
if (_buf == NULL)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
struct buffer_private *buf = (struct buffer_private *)_buf;
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
struct buffer_chain *chain = buf->chain;
|
|
|
|
|
|
|
|
|
|
tll_foreach(chain->bufs, it) {
|
2021-07-16 16:48:18 +02:00
|
|
|
if (it->item != buf)
|
2021-07-16 16:47:57 +02:00
|
|
|
continue;
|
|
|
|
|
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
if (buffer_unref_no_remove_from_chain(buf))
|
|
|
|
|
tll_remove(chain->bufs, it);
|
2021-07-16 16:47:57 +02:00
|
|
|
break;
|
2019-11-02 00:49:00 +01:00
|
|
|
}
|
|
|
|
|
}
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
|
|
|
|
|
struct buffer_chain *
|
|
|
|
|
shm_chain_new(struct wl_shm *shm, bool scrollable, size_t pix_instances)
|
|
|
|
|
{
|
|
|
|
|
struct buffer_chain *chain = xmalloc(sizeof(*chain));
|
|
|
|
|
*chain = (struct buffer_chain){
|
|
|
|
|
.bufs = tll_init(),
|
|
|
|
|
.shm = shm,
|
|
|
|
|
.pix_instances = pix_instances,
|
|
|
|
|
.scrollable = scrollable,
|
|
|
|
|
};
|
|
|
|
|
return chain;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
shm_chain_free(struct buffer_chain *chain)
|
|
|
|
|
{
|
|
|
|
|
if (chain == NULL)
|
|
|
|
|
return;
|
|
|
|
|
|
2021-07-16 16:49:27 +02:00
|
|
|
shm_purge(chain);
|
2021-07-16 16:49:52 +02:00
|
|
|
|
|
|
|
|
if (tll_length(chain->bufs) > 0) {
|
|
|
|
|
BUG("chain=%p: there are buffers remaining; "
|
|
|
|
|
"is there a missing call to shm_unref()?", (void *)chain);
|
|
|
|
|
}
|
|
|
|
|
|
shm: refactor: move away from a single, global, buffer list
Up until now, *all* buffers have been tracked in a single, global
buffer list. We've used 'cookies' to separate buffers from different
contexts (so that shm_get_buffer() doesn't try to re-use e.g. a
search-box buffer for the main grid).
This patch refactors this, and completely removes the global
list.
Instead of cookies, we now use 'chains'. A chain tracks both the
properties to apply to newly created buffers (scrollable, number of
pixman instances to instantiate etc), as well as the instantiated
buffers themselves.
This means there's strictly speaking not much use for shm_fini()
anymore, since its up to the chain owner to call shm_chain_free(),
which will also purge all buffers.
However, since purging a buffer may be deferred, if the buffer is
owned by the compositor at the time of the call to shm_purge() or
shm_chain_free(), we still keep a global 'deferred' list, on to which
deferred buffers are pushed. shm_fini() iterates this list and
destroys the buffers _even_ if they are still owned by the
compositor. This only happens at program termination, and not when
destroying a terminal instance. I.e. closing a window in a “foot
--server” does *not* trigger this.
Each terminal instatiates a number of chains, and these chains are
destroyed when the terminal instance is destroyed. Note that some
buffers may be put on the deferred list, as mentioned above.
2021-07-16 16:48:49 +02:00
|
|
|
free(chain);
|
|
|
|
|
}
|