2019-06-12 20:08:54 +02:00
|
|
|
|
#pragma once
|
|
|
|
|
|
|
|
|
|
|
|
#include <stdbool.h>
|
|
|
|
|
|
#include <stddef.h>
|
2020-03-24 17:46:48 +01:00
|
|
|
|
#include <sys/types.h>
|
2019-06-12 20:08:54 +02:00
|
|
|
|
|
2019-08-16 20:40:32 +02:00
|
|
|
|
#include <pixman.h>
|
2019-06-12 20:08:54 +02:00
|
|
|
|
#include <wayland-client.h>
|
|
|
|
|
|
|
2021-05-07 20:20:47 +02:00
|
|
|
|
#include "terminal.h"
|
|
|
|
|
|
|
2019-06-12 20:08:54 +02:00
|
|
|
|
struct buffer {
|
2021-07-15 22:17:12 +02:00
|
|
|
|
bool locked; /* Caller owned, shm won’t destroy it */
|
2019-11-02 00:33:37 +01:00
|
|
|
|
|
2019-06-12 20:08:54 +02:00
|
|
|
|
int width;
|
|
|
|
|
|
int height;
|
2019-08-16 20:40:32 +02:00
|
|
|
|
int stride;
|
2019-06-12 20:08:54 +02:00
|
|
|
|
|
shm: new function, shm_scroll()
This function "scrolls" the buffer by the specified number of (pixel)
rows.
The idea is move the image offset by re-sizing the underlying memfd
object. I.e. to scroll forward, increase the size of the memfd file,
and move the pixman image offset forward (and the Wayland SHM buffer
as well).
Only increasing the file size would, obviously, cause the memfd file
to grow indefinitely. To deal with this, we "punch" a whole from the
beginning of the file to the new offset. This frees the associated
memory.
Thus, while we have a memfd file whose size is (as seen by
e.g. fstat()) is ever growing, the actual file size is always the
original buffer size.
Some notes:
* FALLOC_FL_PUNCH_HOLE can be quite slow when the number of used pages
to drop is large.
* all normal fallocate() usages have been replaced with ftruncate(),
as this is *much* faster. fallocate() guarantees subsequent writes
wont fail. I.e. it actually reserves (disk) space. While it doesn't
allocate on-disk blocks for on-disk files, it *does* zero-initialize
the in-memory blocks. And this is slow. ftruncate() doesn't do this.
TODO: implement reverse scrolling (i.e. a negative row count).
2020-03-22 20:06:44 +01:00
|
|
|
|
size_t size; /* Buffer size */
|
2021-07-15 22:18:09 +02:00
|
|
|
|
void *data; /* Raw data (TODO: rename) */
|
2019-06-12 20:08:54 +02:00
|
|
|
|
|
|
|
|
|
|
struct wl_buffer *wl_buf;
|
2020-06-04 15:39:19 +02:00
|
|
|
|
pixman_image_t **pix;
|
|
|
|
|
|
size_t pix_instances;
|
shm: new function, shm_scroll()
This function "scrolls" the buffer by the specified number of (pixel)
rows.
The idea is move the image offset by re-sizing the underlying memfd
object. I.e. to scroll forward, increase the size of the memfd file,
and move the pixman image offset forward (and the Wayland SHM buffer
as well).
Only increasing the file size would, obviously, cause the memfd file
to grow indefinitely. To deal with this, we "punch" a whole from the
beginning of the file to the new offset. This frees the associated
memory.
Thus, while we have a memfd file whose size is (as seen by
e.g. fstat()) is ever growing, the actual file size is always the
original buffer size.
Some notes:
* FALLOC_FL_PUNCH_HOLE can be quite slow when the number of used pages
to drop is large.
* all normal fallocate() usages have been replaced with ftruncate(),
as this is *much* faster. fallocate() guarantees subsequent writes
wont fail. I.e. it actually reserves (disk) space. While it doesn't
allocate on-disk blocks for on-disk files, it *does* zero-initialize
the in-memory blocks. And this is slow. ftruncate() doesn't do this.
TODO: implement reverse scrolling (i.e. a negative row count).
2020-03-22 20:06:44 +01:00
|
|
|
|
|
2021-05-08 10:25:14 +02:00
|
|
|
|
unsigned age;
|
|
|
|
|
|
struct damage *scroll_damage;
|
|
|
|
|
|
size_t scroll_damage_count;
|
2021-05-07 20:20:47 +02:00
|
|
|
|
pixman_region32_t dirty;
|
2019-06-12 20:08:54 +02:00
|
|
|
|
};
|
|
|
|
|
|
|
2021-07-15 18:32:19 +02:00
|
|
|
|
struct buffer_description {
|
|
|
|
|
|
int width;
|
|
|
|
|
|
int height;
|
|
|
|
|
|
unsigned long cookie;
|
|
|
|
|
|
};
|
2019-11-02 00:49:00 +01:00
|
|
|
|
|
2021-07-15 18:32:19 +02:00
|
|
|
|
void shm_fini(void);
|
2020-03-25 20:48:02 +01:00
|
|
|
|
void shm_set_max_pool_size(off_t max_pool_size);
|
2021-07-15 18:32:19 +02:00
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
|
* Returns a single buffer.
|
|
|
|
|
|
*
|
|
|
|
|
|
* May returned a cached buffer. If so, the buffer’s age indicates how
|
|
|
|
|
|
* many shm_get_buffer() calls have been made for the same
|
|
|
|
|
|
* width/height/cookie while the buffer was still busy.
|
|
|
|
|
|
*
|
|
|
|
|
|
* A newly allocated buffer has an age of 1234.
|
|
|
|
|
|
*/
|
|
|
|
|
|
struct buffer *shm_get_buffer(
|
|
|
|
|
|
struct wl_shm *shm, int width, int height, unsigned long cookie,
|
|
|
|
|
|
bool scrollable, size_t pix_instances);
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
|
* Returns many buffers, described by ‘info’, all sharing the same SHM
|
|
|
|
|
|
* buffer pool.
|
|
|
|
|
|
*
|
|
|
|
|
|
* Never returns cached buffers. However, the newly created buffers
|
|
|
|
|
|
* are all inserted into the regular buffer cache, and are treated
|
|
|
|
|
|
* just like buffers created by shm_get_buffer().
|
|
|
|
|
|
*
|
|
|
|
|
|
* This function is useful when allocating many small buffers, with
|
|
|
|
|
|
* (roughly) the same life time.
|
|
|
|
|
|
*
|
|
|
|
|
|
* Buffers are tagged for immediate purging, and will be destroyed as
|
|
|
|
|
|
* soon as the compositor releases them.
|
|
|
|
|
|
*/
|
|
|
|
|
|
void shm_get_many(
|
|
|
|
|
|
struct wl_shm *shm, size_t count,
|
|
|
|
|
|
struct buffer_description info[static count],
|
|
|
|
|
|
struct buffer *bufs[static count], size_t pix_instances);
|
|
|
|
|
|
|
2020-03-25 18:26:58 +01:00
|
|
|
|
bool shm_can_scroll(const struct buffer *buf);
|
2020-03-23 20:45:27 +01:00
|
|
|
|
bool shm_scroll(struct wl_shm *shm, struct buffer *buf, int rows,
|
|
|
|
|
|
int top_margin, int top_keep_rows,
|
|
|
|
|
|
int bottom_margin, int bottom_keep_rows);
|
shm: new function, shm_scroll()
This function "scrolls" the buffer by the specified number of (pixel)
rows.
The idea is move the image offset by re-sizing the underlying memfd
object. I.e. to scroll forward, increase the size of the memfd file,
and move the pixman image offset forward (and the Wayland SHM buffer
as well).
Only increasing the file size would, obviously, cause the memfd file
to grow indefinitely. To deal with this, we "punch" a whole from the
beginning of the file to the new offset. This frees the associated
memory.
Thus, while we have a memfd file whose size is (as seen by
e.g. fstat()) is ever growing, the actual file size is always the
original buffer size.
Some notes:
* FALLOC_FL_PUNCH_HOLE can be quite slow when the number of used pages
to drop is large.
* all normal fallocate() usages have been replaced with ftruncate(),
as this is *much* faster. fallocate() guarantees subsequent writes
wont fail. I.e. it actually reserves (disk) space. While it doesn't
allocate on-disk blocks for on-disk files, it *does* zero-initialize
the in-memory blocks. And this is slow. ftruncate() doesn't do this.
TODO: implement reverse scrolling (i.e. a negative row count).
2020-03-22 20:06:44 +01:00
|
|
|
|
|
2019-11-02 00:49:00 +01:00
|
|
|
|
void shm_purge(struct wl_shm *shm, unsigned long cookie);
|
2020-03-18 16:52:33 +01:00
|
|
|
|
|
|
|
|
|
|
struct terminal;
|
|
|
|
|
|
static inline unsigned long shm_cookie_grid(const struct terminal *term) { return (unsigned long)((uintptr_t)term + 0); }
|
|
|
|
|
|
static inline unsigned long shm_cookie_search(const struct terminal *term) { return (unsigned long)((uintptr_t)term + 1); }
|
2020-07-26 10:00:21 +02:00
|
|
|
|
static inline unsigned long shm_cookie_scrollback_indicator(const struct terminal *term) { return (unsigned long)(uintptr_t)term + 2; }
|
2020-08-13 18:35:17 +02:00
|
|
|
|
static inline unsigned long shm_cookie_render_timer(const struct terminal *term) { return (unsigned long)(uintptr_t)term + 3; }
|
|
|
|
|
|
static inline unsigned long shm_cookie_csd(const struct terminal *term, int n) { return (unsigned long)((uintptr_t)term + 4 + (n)); }
|
2021-01-31 11:12:07 +01:00
|
|
|
|
|
|
|
|
|
|
struct url;
|
|
|
|
|
|
static inline unsigned long shm_cookie_url(const struct url *url) { return (unsigned long)(uintptr_t)url; }
|