shm: add shm_get_many() - allows buffers to share a single pool

shm_get_many() always returns new buffers (i.e. never old, cached
ones). The newly allocated buffers are also marked for immediate
purging, meaning they’ll be destroyed on the next call to either
shm_get_buffer(), or shm_get_many().

Furthermore, we add a new attribute, ‘locked’, to the buffer
struct. When auto purging buffers, look at this instead of comparing
cookies.

Buffer consumers are expected to set ‘locked’ while they hold a
reference to it, and don’t want it destroyed behind their back.
This commit is contained in:
Daniel Eklöf 2021-07-15 18:32:19 +02:00
parent 5e64c67c25
commit 7533684d8f
No known key found for this signature in database
GPG key ID: 5BBD4992C116573F
3 changed files with 274 additions and 129 deletions

View file

@ -2322,6 +2322,7 @@ grid_render(struct terminal *term)
} }
if (term->render.last_buf != NULL) { if (term->render.last_buf != NULL) {
term->render.last_buf->locked = false;
free(term->render.last_buf->scroll_damage); free(term->render.last_buf->scroll_damage);
term->render.last_buf->scroll_damage = NULL; term->render.last_buf->scroll_damage = NULL;
} }
@ -2330,6 +2331,7 @@ grid_render(struct terminal *term)
term->render.was_flashing = term->flash.active; term->render.was_flashing = term->flash.active;
term->render.was_searching = term->is_searching; term->render.was_searching = term->is_searching;
buf->locked = true;
buf->age = 0; buf->age = 0;
xassert(buf->scroll_damage == NULL); xassert(buf->scroll_damage == NULL);
@ -3416,6 +3418,8 @@ damage_view:
tll_free(term->normal.scroll_damage); tll_free(term->normal.scroll_damage);
tll_free(term->alt.scroll_damage); tll_free(term->alt.scroll_damage);
if (term->render.last_buf != NULL)
term->render.last_buf->locked = false;
term->render.last_buf = NULL; term->render.last_buf = NULL;
term_damage_view(term); term_damage_view(term);
render_refresh_csd(term); render_refresh_csd(term);

338
shm.c
View file

@ -88,20 +88,37 @@ buffer_destroy_dont_close(struct buffer *buf)
buf->mmapped = NULL; buf->mmapped = NULL;
} }
static void
pool_unref(struct buffer_pool *pool)
{
if (pool == NULL)
return;
xassert(pool->ref_count > 0);
pool->ref_count--;
if (pool->ref_count > 0)
return;
if (pool->real_mmapped != MAP_FAILED)
munmap(pool->real_mmapped, pool->mmap_size);
if (pool->wl_pool != NULL)
wl_shm_pool_destroy(pool->wl_pool);
if (pool->fd >= 0)
close(pool->fd);
pool->real_mmapped = MAP_FAILED;
pool->wl_pool = NULL;
pool->fd = -1;
free(pool);
}
static void static void
buffer_destroy(struct buffer *buf) buffer_destroy(struct buffer *buf)
{ {
buffer_destroy_dont_close(buf); buffer_destroy_dont_close(buf);
if (buf->real_mmapped != MAP_FAILED) pool_unref(buf->pool);
munmap(buf->real_mmapped, buf->mmap_size);
if (buf->pool != NULL)
wl_shm_pool_destroy(buf->pool);
if (buf->fd >= 0)
close(buf->fd);
buf->real_mmapped = MAP_FAILED;
buf->pool = NULL; buf->pool = NULL;
buf->fd = -1;
free(buf->scroll_damage); free(buf->scroll_damage);
pixman_region32_fini(&buf->dirty); pixman_region32_fini(&buf->dirty);
@ -124,7 +141,10 @@ static void
buffer_release(void *data, struct wl_buffer *wl_buffer) buffer_release(void *data, struct wl_buffer *wl_buffer)
{ {
struct buffer *buffer = data; struct buffer *buffer = data;
LOG_DBG("release: cookie=%lx (buf=%p)", buffer->cookie, (void *)buffer);
LOG_DBG("release: cookie=%lx (buf=%p, total buffer count: %zu)",
buffer->cookie, (void *)buffer, tll_length(buffers));
xassert(buffer->wl_buf == wl_buffer); xassert(buffer->wl_buf == wl_buffer);
xassert(buffer->busy); xassert(buffer->busy);
buffer->busy = false; buffer->busy = false;
@ -159,15 +179,20 @@ instantiate_offset(struct wl_shm *shm, struct buffer *buf, off_t new_offset)
xassert(buf->mmapped == NULL); xassert(buf->mmapped == NULL);
xassert(buf->wl_buf == NULL); xassert(buf->wl_buf == NULL);
xassert(buf->pix == NULL); xassert(buf->pix == NULL);
xassert(buf->pool != NULL);
const struct buffer_pool *pool = buf->pool;
void *mmapped = MAP_FAILED; void *mmapped = MAP_FAILED;
struct wl_buffer *wl_buf = NULL; struct wl_buffer *wl_buf = NULL;
pixman_image_t **pix = xcalloc(buf->pix_instances, sizeof(*pix)); pixman_image_t **pix = xcalloc(buf->pix_instances, sizeof(*pix));
mmapped = (uint8_t *)buf->real_mmapped + new_offset; mmapped = (uint8_t *)pool->real_mmapped + new_offset;
wl_buf = wl_shm_pool_create_buffer( wl_buf = wl_shm_pool_create_buffer(
buf->pool, new_offset, buf->width, buf->height, buf->stride, WL_SHM_FORMAT_ARGB8888); pool->wl_pool, new_offset, buf->width, buf->height, buf->stride,
WL_SHM_FORMAT_ARGB8888);
if (wl_buf == NULL) { if (wl_buf == NULL) {
LOG_ERR("failed to create SHM buffer"); LOG_ERR("failed to create SHM buffer");
goto err; goto err;
@ -183,8 +208,8 @@ instantiate_offset(struct wl_shm *shm, struct buffer *buf, off_t new_offset)
} }
} }
buf->offset = new_offset;
buf->mmapped = mmapped; buf->mmapped = mmapped;
buf->offset = new_offset;
buf->wl_buf = wl_buf; buf->wl_buf = wl_buf;
buf->pix = pix; buf->pix = pix;
@ -205,18 +230,19 @@ err:
return false; return false;
} }
struct buffer * static void NOINLINE
shm_get_buffer(struct wl_shm *shm, int width, int height, unsigned long cookie, bool scrollable, size_t pix_instances) destroy_all_purgeables(void)
{ {
/* Purge buffers marked for purging */ /* Purge buffers marked for purging */
tll_foreach(buffers, it) { tll_foreach(buffers, it) {
if (it->item.cookie != cookie) if (it->item.locked)
continue; continue;
if (!it->item.purge) if (!it->item.purge)
continue; continue;
xassert(!it->item.busy); if (it->item.busy)
continue;
LOG_DBG("cookie=%lx: purging buffer %p (width=%d, height=%d): %zu KB", LOG_DBG("cookie=%lx: purging buffer %p (width=%d, height=%d): %zu KB",
cookie, (void *)&it->item, it->item.width, it->item.height, cookie, (void *)&it->item, it->item.width, it->item.height,
@ -225,56 +251,15 @@ shm_get_buffer(struct wl_shm *shm, int width, int height, unsigned long cookie,
buffer_destroy(&it->item); buffer_destroy(&it->item);
tll_remove(buffers, it); tll_remove(buffers, it);
} }
}
struct buffer *cached = NULL; static void NOINLINE
get_new_buffers(struct wl_shm *shm, size_t count,
tll_foreach(buffers, it) { struct buffer_description info[static count],
if (it->item.width != width) struct buffer *bufs[static count],
continue; size_t pix_instances, bool scrollable, bool immediate_purge)
if (it->item.height != height) {
continue; xassert(count == 1 || !scrollable);
if (it->item.cookie != cookie)
continue;
if (it->item.busy)
it->item.age++;
else
#if FORCED_DOUBLE_BUFFERING
if (it->item.age == 0)
it->item.age++;
else
#endif
{
LOG_DBG("cookie=%lx: re-using buffer from cache (buf=%p)",
cookie, (void *)&it->item);
it->item.busy = true;
it->item.purge = false;
pixman_region32_clear(&it->item.dirty);
free(it->item.scroll_damage);
it->item.scroll_damage = NULL;
xassert(it->item.pix_instances == pix_instances);
cached = &it->item;
}
}
if (cached != NULL)
return cached;
/* Purge old buffers associated with this cookie */
tll_foreach(buffers, it) {
if (it->item.cookie != cookie)
continue;
if (it->item.busy)
continue;
if (it->item.width == width && it->item.height == height)
continue;
LOG_DBG("cookie=%lx: marking buffer %p for purging", cookie, (void *)&it->item);
it->item.purge = true;
}
/* /*
* No existing buffer available. Create a new one by: * No existing buffer available. Create a new one by:
* *
@ -285,14 +270,21 @@ shm_get_buffer(struct wl_shm *shm, int width, int height, unsigned long cookie,
* The pixman image and the wayland buffer are now sharing memory. * The pixman image and the wayland buffer are now sharing memory.
*/ */
int stride[count];
int sizes[count];
size_t total_size = 0;
for (size_t i = 0; i < count; i++) {
stride[i] = stride_for_format_and_width(PIXMAN_a8r8g8b8, info[i].width);
sizes[i] = stride[i] * info[i].height;
total_size += sizes[i];
}
int pool_fd = -1; int pool_fd = -1;
const int stride = stride_for_format_and_width(PIXMAN_a8r8g8b8, width);
const size_t size = stride * height;
void *real_mmapped = MAP_FAILED; void *real_mmapped = MAP_FAILED;
struct wl_shm_pool *pool = NULL; struct wl_shm_pool *wl_pool = NULL;
struct buffer_pool *pool = NULL;
LOG_DBG("cookie=%lx: allocating new buffer: %zu KB", cookie, size / 1024);
/* Backing memory for SHM */ /* Backing memory for SHM */
#if defined(MEMFD_CREATE) #if defined(MEMFD_CREATE)
@ -311,14 +303,16 @@ shm_get_buffer(struct wl_shm *shm, int width, int height, unsigned long cookie,
} }
#if __SIZEOF_POINTER__ == 8 #if __SIZEOF_POINTER__ == 8
off_t initial_offset = scrollable && max_pool_size > 0 ? (max_pool_size / 4) & ~(page_size() - 1) : 0; off_t offset = scrollable && max_pool_size > 0 ? (max_pool_size / 4) & ~(page_size() - 1) : 0;
off_t memfd_size = scrollable && max_pool_size > 0 ? max_pool_size : size; off_t memfd_size = scrollable && max_pool_size > 0 ? max_pool_size : total_size;
#else #else
off_t initial_offset = 0; off_t offset = 0;
off_t memfd_size = size; off_t memfd_size = total_size;
#endif #endif
LOG_DBG("memfd-size: %lu, initial offset: %lu", memfd_size, initial_offset); xassert(scrollable || (offset == 0 && memfd_size == total_size));
LOG_DBG("memfd-size: %lu, initial offset: %lu", memfd_size, offset);
if (ftruncate(pool_fd, memfd_size) == -1) { if (ftruncate(pool_fd, memfd_size) == -1) {
LOG_ERRNO("failed to set size of SHM backing memory file"); LOG_ERRNO("failed to set size of SHM backing memory file");
@ -344,8 +338,8 @@ shm_get_buffer(struct wl_shm *shm, int width, int height, unsigned long cookie,
} }
if (scrollable && !can_punch_hole) { if (scrollable && !can_punch_hole) {
initial_offset = 0; offset = 0;
memfd_size = size; memfd_size = total_size;
scrollable = false; scrollable = false;
if (ftruncate(pool_fd, memfd_size) < 0) { if (ftruncate(pool_fd, memfd_size) < 0) {
@ -374,37 +368,55 @@ shm_get_buffer(struct wl_shm *shm, int width, int height, unsigned long cookie,
} }
#endif #endif
pool = wl_shm_create_pool(shm, pool_fd, memfd_size); wl_pool = wl_shm_create_pool(shm, pool_fd, memfd_size);
if (pool == NULL) { if (wl_pool == NULL) {
LOG_ERR("failed to create SHM pool"); LOG_ERR("failed to create SHM pool");
goto err; goto err;
} }
/* Push to list of available buffers, but marked as 'busy' */ pool = malloc(sizeof(*pool));
tll_push_back( if (pool == NULL) {
buffers, LOG_ERRNO("failed to allocate buffer pool");
((struct buffer){
.cookie = cookie,
.width = width,
.height = height,
.stride = stride,
.busy = true,
.size = size,
.pix_instances = pix_instances,
.fd = pool_fd,
.pool = pool,
.scrollable = scrollable,
.real_mmapped = real_mmapped,
.mmap_size = memfd_size,
.offset = 0,
.age = 1234, /* Force a full repaint */
}));
struct buffer *ret = &tll_back(buffers);
if (!instantiate_offset(shm, ret, initial_offset))
goto err; goto err;
}
pixman_region32_init(&ret->dirty); *pool = (struct buffer_pool){
.fd = pool_fd,
.wl_pool = wl_pool,
.real_mmapped = real_mmapped,
.mmap_size = memfd_size,
.ref_count = 0,
};
for (size_t i = 0; i < count; i++) {
/* Push to list of available buffers, but marked as 'busy' */
tll_push_front(
buffers,
((struct buffer){
.cookie = info[i].cookie,
.width = info[i].width,
.height = info[i].height,
.stride = stride[i],
.busy = true,
.purge = immediate_purge,
.size = sizes[i],
.pix_instances = pix_instances,
.pool = pool,
.scrollable = scrollable,
.offset = 0,
.age = 1234, /* Force a full repaint */
}));
struct buffer *buf = &tll_front(buffers);
if (!instantiate_offset(shm, buf, offset))
goto err;
pixman_region32_init(&buf->dirty);
pool->ref_count++;
offset += buf->size;
bufs[i] = buf;
}
#if defined(MEASURE_SHM_ALLOCS) && MEASURE_SHM_ALLOCS #if defined(MEASURE_SHM_ALLOCS) && MEASURE_SHM_ALLOCS
{ {
@ -416,18 +428,19 @@ shm_get_buffer(struct wl_shm *shm, int width, int height, unsigned long cookie,
} }
#endif #endif
if (!shm_can_scroll(ret)) { if (!shm_can_scroll(bufs[0])) {
/* We only need to keep the pool FD open if were going to SHM /* We only need to keep the pool FD open if were going to SHM
* scroll it */ * scroll it */
close(pool_fd); close(pool_fd);
ret->fd = -1; pool->fd = -1;
} }
return ret; return;
err: err:
if (pool != NULL) pool_unref(pool);
wl_shm_pool_destroy(pool); if (wl_pool != NULL)
wl_shm_pool_destroy(wl_pool);
if (real_mmapped != MAP_FAILED) if (real_mmapped != MAP_FAILED)
munmap(real_mmapped, memfd_size); munmap(real_mmapped, memfd_size);
if (pool_fd != -1) if (pool_fd != -1)
@ -435,7 +448,78 @@ err:
/* We don't handle this */ /* We don't handle this */
abort(); abort();
return NULL; }
void
shm_get_many(struct wl_shm *shm, size_t count,
struct buffer_description info[static count],
struct buffer *bufs[static count],
size_t pix_instances)
{
destroy_all_purgeables();
get_new_buffers(shm, count, info, bufs, pix_instances, false, true);
}
struct buffer *
shm_get_buffer(struct wl_shm *shm, int width, int height, unsigned long cookie,
bool scrollable, size_t pix_instances)
{
destroy_all_purgeables();
struct buffer *cached = NULL;
tll_foreach(buffers, it) {
if (it->item.width != width)
continue;
if (it->item.height != height)
continue;
if (it->item.cookie != cookie)
continue;
if (it->item.busy)
it->item.age++;
else
#if FORCED_DOUBLE_BUFFERING
if (it->item.age == 0)
it->item.age++;
else
#endif
{
if (cached == NULL) {
LOG_DBG("cookie=%lx: re-using buffer from cache (buf=%p)",
cookie, (void *)&it->item);
it->item.busy = true;
it->item.purge = false;
pixman_region32_clear(&it->item.dirty);
free(it->item.scroll_damage);
it->item.scroll_damage = NULL;
xassert(it->item.pix_instances == pix_instances);
cached = &it->item;
}
}
}
if (cached != NULL)
return cached;
/* Mark old buffers associated with this cookie for purging */
tll_foreach(buffers, it) {
if (it->item.cookie != cookie)
continue;
if (it->item.busy)
continue;
if (it->item.width == width && it->item.height == height)
continue;
LOG_DBG("cookie=%lx: marking buffer %p for purging", cookie, (void *)&it->item);
it->item.purge = true;
}
struct buffer *ret;
get_new_buffers(shm, 1, &(struct buffer_description){width, height, cookie},
&ret, pix_instances, scrollable, false);
return ret;
} }
bool bool
@ -453,12 +537,15 @@ shm_can_scroll(const struct buffer *buf)
static bool static bool
wrap_buffer(struct wl_shm *shm, struct buffer *buf, off_t new_offset) wrap_buffer(struct wl_shm *shm, struct buffer *buf, off_t new_offset)
{ {
struct buffer_pool *pool = buf->pool;
xassert(pool->ref_count == 1);
/* We don't allow overlapping offsets */ /* We don't allow overlapping offsets */
off_t UNUSED diff = off_t UNUSED diff =
new_offset < buf->offset ? buf->offset - new_offset : new_offset - buf->offset; new_offset < buf->offset ? buf->offset - new_offset : new_offset - buf->offset;
xassert(diff > buf->size); xassert(diff > buf->size);
memcpy((uint8_t *)buf->real_mmapped + new_offset, buf->mmapped, buf->size); memcpy((uint8_t *)pool->real_mmapped + new_offset, buf->mmapped, buf->size);
off_t trim_ofs, trim_len; off_t trim_ofs, trim_len;
if (new_offset > buf->offset) { if (new_offset > buf->offset) {
@ -468,11 +555,11 @@ wrap_buffer(struct wl_shm *shm, struct buffer *buf, off_t new_offset)
} else { } else {
/* Trim everything *after* the new buffer location */ /* Trim everything *after* the new buffer location */
trim_ofs = new_offset + buf->size; trim_ofs = new_offset + buf->size;
trim_len = buf->mmap_size - trim_ofs; trim_len = pool->mmap_size - trim_ofs;
} }
if (fallocate( if (fallocate(
buf->fd, pool->fd,
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
trim_ofs, trim_len) < 0) trim_ofs, trim_len) < 0)
{ {
@ -490,11 +577,15 @@ shm_scroll_forward(struct wl_shm *shm, struct buffer *buf, int rows,
int top_margin, int top_keep_rows, int top_margin, int top_keep_rows,
int bottom_margin, int bottom_keep_rows) int bottom_margin, int bottom_keep_rows)
{ {
struct buffer_pool *pool = buf->pool;
xassert(can_punch_hole); xassert(can_punch_hole);
xassert(buf->busy); xassert(buf->busy);
xassert(buf->pix); xassert(buf->pix);
xassert(buf->wl_buf); xassert(buf->wl_buf);
xassert(buf->fd >= 0); xassert(pool != NULL);
xassert(pool->ref_count == 1);
xassert(pool->fd >= 0);
LOG_DBG("scrolling %d rows (%d bytes)", rows, rows * buf->stride); LOG_DBG("scrolling %d rows (%d bytes)", rows, rows * buf->stride);
@ -541,7 +632,7 @@ shm_scroll_forward(struct wl_shm *shm, struct buffer *buf, int rows,
const off_t trim_len = new_offset; const off_t trim_len = new_offset;
if (fallocate( if (fallocate(
buf->fd, pool->fd,
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
trim_ofs, trim_len) < 0) trim_ofs, trim_len) < 0)
{ {
@ -596,6 +687,9 @@ shm_scroll_reverse(struct wl_shm *shm, struct buffer *buf, int rows,
{ {
xassert(rows > 0); xassert(rows > 0);
struct buffer_pool *pool = buf->pool;
xassert(pool->ref_count == 1);
const off_t diff = rows * buf->stride; const off_t diff = rows * buf->stride;
if (diff > buf->offset) { if (diff > buf->offset) {
LOG_DBG("memfd offset reverse wrap-around"); LOG_DBG("memfd offset reverse wrap-around");
@ -634,10 +728,10 @@ shm_scroll_reverse(struct wl_shm *shm, struct buffer *buf, int rows,
/* Free unused memory - everything after the relocated buffer */ /* Free unused memory - everything after the relocated buffer */
const off_t trim_ofs = new_offset + buf->size; const off_t trim_ofs = new_offset + buf->size;
const off_t trim_len = buf->mmap_size - trim_ofs; const off_t trim_len = pool->mmap_size - trim_ofs;
if (fallocate( if (fallocate(
buf->fd, pool->fd,
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
trim_ofs, trim_len) < 0) trim_ofs, trim_len) < 0)
{ {
@ -712,9 +806,13 @@ shm_purge(struct wl_shm *shm, unsigned long cookie)
if (it->item.cookie != cookie) if (it->item.cookie != cookie)
continue; continue;
xassert(!it->item.busy); if (it->item.busy) {
LOG_WARN("deferring purge of 'busy' buffer (width=%d, height=%d)",
buffer_destroy(&it->item); it->item.width, it->item.height);
tll_remove(buffers, it); it->item.purge = true;
} else {
buffer_destroy(&it->item);
tll_remove(buffers, it);
}
} }
} }

61
shm.h
View file

@ -9,6 +9,16 @@
#include "terminal.h" #include "terminal.h"
struct buffer_pool {
int fd; /* memfd */
struct wl_shm_pool *wl_pool;
void *real_mmapped; /* Address returned from mmap */
size_t mmap_size; /* Size of mmap (>= size) */
size_t ref_count;
};
struct buffer { struct buffer {
unsigned long cookie; unsigned long cookie;
@ -16,7 +26,8 @@ struct buffer {
int height; int height;
int stride; int stride;
bool busy; bool locked; /* Caller owned, shm wont destroy it */
bool busy; /* Owned by compositor */
size_t size; /* Buffer size */ size_t size; /* Buffer size */
void *mmapped; /* Raw data (TODO: rename) */ void *mmapped; /* Raw data (TODO: rename) */
@ -25,11 +36,7 @@ struct buffer {
size_t pix_instances; size_t pix_instances;
/* Internal */ /* Internal */
int fd; /* memfd */ struct buffer_pool *pool;
struct wl_shm_pool *pool;
void *real_mmapped; /* Address returned from mmap */
size_t mmap_size; /* Size of mmap (>= size) */
off_t offset; /* Offset into memfd where data begins */ off_t offset; /* Offset into memfd where data begins */
bool scrollable; bool scrollable;
@ -41,11 +48,47 @@ struct buffer {
pixman_region32_t dirty; pixman_region32_t dirty;
}; };
struct buffer *shm_get_buffer( struct buffer_description {
struct wl_shm *shm, int width, int height, unsigned long cookie, bool scrollable, size_t pix_instances); int width;
void shm_fini(void); int height;
unsigned long cookie;
};
void shm_fini(void);
void shm_set_max_pool_size(off_t max_pool_size); void shm_set_max_pool_size(off_t max_pool_size);
/*
* Returns a single buffer.
*
* May returned a cached buffer. If so, the buffers age indicates how
* many shm_get_buffer() calls have been made for the same
* width/height/cookie while the buffer was still busy.
*
* A newly allocated buffer has an age of 1234.
*/
struct buffer *shm_get_buffer(
struct wl_shm *shm, int width, int height, unsigned long cookie,
bool scrollable, size_t pix_instances);
/*
* Returns many buffers, described by info, all sharing the same SHM
* buffer pool.
*
* Never returns cached buffers. However, the newly created buffers
* are all inserted into the regular buffer cache, and are treated
* just like buffers created by shm_get_buffer().
*
* This function is useful when allocating many small buffers, with
* (roughly) the same life time.
*
* Buffers are tagged for immediate purging, and will be destroyed as
* soon as the compositor releases them.
*/
void shm_get_many(
struct wl_shm *shm, size_t count,
struct buffer_description info[static count],
struct buffer *bufs[static count], size_t pix_instances);
bool shm_can_scroll(const struct buffer *buf); bool shm_can_scroll(const struct buffer *buf);
bool shm_scroll(struct wl_shm *shm, struct buffer *buf, int rows, bool shm_scroll(struct wl_shm *shm, struct buffer *buf, int rows,
int top_margin, int top_keep_rows, int top_margin, int top_keep_rows,