2019-06-12 20:08:54 +02:00
|
|
|
#pragma once
|
|
|
|
|
|
|
|
|
|
#include <stdbool.h>
|
|
|
|
|
#include <stddef.h>
|
2020-03-24 17:46:48 +01:00
|
|
|
#include <sys/types.h>
|
2019-06-12 20:08:54 +02:00
|
|
|
|
2019-08-16 20:40:32 +02:00
|
|
|
#include <pixman.h>
|
2019-06-12 20:08:54 +02:00
|
|
|
#include <wayland-client.h>
|
|
|
|
|
|
|
|
|
|
struct buffer {
|
2019-11-02 00:33:37 +01:00
|
|
|
unsigned long cookie;
|
|
|
|
|
|
2019-06-12 20:08:54 +02:00
|
|
|
int width;
|
|
|
|
|
int height;
|
2019-08-16 20:40:32 +02:00
|
|
|
int stride;
|
2019-06-12 20:08:54 +02:00
|
|
|
|
|
|
|
|
bool busy;
|
shm: new function, shm_scroll()
This function "scrolls" the buffer by the specified number of (pixel)
rows.
The idea is move the image offset by re-sizing the underlying memfd
object. I.e. to scroll forward, increase the size of the memfd file,
and move the pixman image offset forward (and the Wayland SHM buffer
as well).
Only increasing the file size would, obviously, cause the memfd file
to grow indefinitely. To deal with this, we "punch" a whole from the
beginning of the file to the new offset. This frees the associated
memory.
Thus, while we have a memfd file whose size is (as seen by
e.g. fstat()) is ever growing, the actual file size is always the
original buffer size.
Some notes:
* FALLOC_FL_PUNCH_HOLE can be quite slow when the number of used pages
to drop is large.
* all normal fallocate() usages have been replaced with ftruncate(),
as this is *much* faster. fallocate() guarantees subsequent writes
wont fail. I.e. it actually reserves (disk) space. While it doesn't
allocate on-disk blocks for on-disk files, it *does* zero-initialize
the in-memory blocks. And this is slow. ftruncate() doesn't do this.
TODO: implement reverse scrolling (i.e. a negative row count).
2020-03-22 20:06:44 +01:00
|
|
|
size_t size; /* Buffer size */
|
2020-03-25 18:26:58 +01:00
|
|
|
void *mmapped; /* Raw data (TODO: rename) */
|
2019-06-12 20:08:54 +02:00
|
|
|
|
|
|
|
|
struct wl_buffer *wl_buf;
|
2019-08-16 22:54:05 +02:00
|
|
|
pixman_image_t *pix;
|
shm: new function, shm_scroll()
This function "scrolls" the buffer by the specified number of (pixel)
rows.
The idea is move the image offset by re-sizing the underlying memfd
object. I.e. to scroll forward, increase the size of the memfd file,
and move the pixman image offset forward (and the Wayland SHM buffer
as well).
Only increasing the file size would, obviously, cause the memfd file
to grow indefinitely. To deal with this, we "punch" a whole from the
beginning of the file to the new offset. This frees the associated
memory.
Thus, while we have a memfd file whose size is (as seen by
e.g. fstat()) is ever growing, the actual file size is always the
original buffer size.
Some notes:
* FALLOC_FL_PUNCH_HOLE can be quite slow when the number of used pages
to drop is large.
* all normal fallocate() usages have been replaced with ftruncate(),
as this is *much* faster. fallocate() guarantees subsequent writes
wont fail. I.e. it actually reserves (disk) space. While it doesn't
allocate on-disk blocks for on-disk files, it *does* zero-initialize
the in-memory blocks. And this is slow. ftruncate() doesn't do this.
TODO: implement reverse scrolling (i.e. a negative row count).
2020-03-22 20:06:44 +01:00
|
|
|
|
|
|
|
|
/* Internal */
|
|
|
|
|
int fd; /* memfd */
|
2020-03-25 18:26:58 +01:00
|
|
|
struct wl_shm_pool *pool;
|
|
|
|
|
|
shm: new function, shm_scroll()
This function "scrolls" the buffer by the specified number of (pixel)
rows.
The idea is move the image offset by re-sizing the underlying memfd
object. I.e. to scroll forward, increase the size of the memfd file,
and move the pixman image offset forward (and the Wayland SHM buffer
as well).
Only increasing the file size would, obviously, cause the memfd file
to grow indefinitely. To deal with this, we "punch" a whole from the
beginning of the file to the new offset. This frees the associated
memory.
Thus, while we have a memfd file whose size is (as seen by
e.g. fstat()) is ever growing, the actual file size is always the
original buffer size.
Some notes:
* FALLOC_FL_PUNCH_HOLE can be quite slow when the number of used pages
to drop is large.
* all normal fallocate() usages have been replaced with ftruncate(),
as this is *much* faster. fallocate() guarantees subsequent writes
wont fail. I.e. it actually reserves (disk) space. While it doesn't
allocate on-disk blocks for on-disk files, it *does* zero-initialize
the in-memory blocks. And this is slow. ftruncate() doesn't do this.
TODO: implement reverse scrolling (i.e. a negative row count).
2020-03-22 20:06:44 +01:00
|
|
|
void *real_mmapped; /* Address returned from mmap */
|
|
|
|
|
size_t mmap_size; /* Size of mmap (>= size) */
|
2020-03-24 17:46:48 +01:00
|
|
|
off_t offset; /* Offset into memfd where data begins */
|
2020-03-25 18:26:58 +01:00
|
|
|
|
|
|
|
|
bool scrollable;
|
shm: new function, shm_scroll()
This function "scrolls" the buffer by the specified number of (pixel)
rows.
The idea is move the image offset by re-sizing the underlying memfd
object. I.e. to scroll forward, increase the size of the memfd file,
and move the pixman image offset forward (and the Wayland SHM buffer
as well).
Only increasing the file size would, obviously, cause the memfd file
to grow indefinitely. To deal with this, we "punch" a whole from the
beginning of the file to the new offset. This frees the associated
memory.
Thus, while we have a memfd file whose size is (as seen by
e.g. fstat()) is ever growing, the actual file size is always the
original buffer size.
Some notes:
* FALLOC_FL_PUNCH_HOLE can be quite slow when the number of used pages
to drop is large.
* all normal fallocate() usages have been replaced with ftruncate(),
as this is *much* faster. fallocate() guarantees subsequent writes
wont fail. I.e. it actually reserves (disk) space. While it doesn't
allocate on-disk blocks for on-disk files, it *does* zero-initialize
the in-memory blocks. And this is slow. ftruncate() doesn't do this.
TODO: implement reverse scrolling (i.e. a negative row count).
2020-03-22 20:06:44 +01:00
|
|
|
bool purge; /* True if this buffer should be destroyed */
|
2019-06-12 20:08:54 +02:00
|
|
|
};
|
|
|
|
|
|
2019-11-02 00:33:37 +01:00
|
|
|
struct buffer *shm_get_buffer(
|
2020-03-25 18:26:58 +01:00
|
|
|
struct wl_shm *shm, int width, int height, unsigned long cookie, bool scrollable);
|
2019-06-12 20:08:54 +02:00
|
|
|
void shm_fini(void);
|
2019-11-02 00:49:00 +01:00
|
|
|
|
2020-03-25 18:26:58 +01:00
|
|
|
bool shm_can_scroll(const struct buffer *buf);
|
2020-03-23 20:45:27 +01:00
|
|
|
bool shm_scroll(struct wl_shm *shm, struct buffer *buf, int rows,
|
|
|
|
|
int top_margin, int top_keep_rows,
|
|
|
|
|
int bottom_margin, int bottom_keep_rows);
|
shm: new function, shm_scroll()
This function "scrolls" the buffer by the specified number of (pixel)
rows.
The idea is move the image offset by re-sizing the underlying memfd
object. I.e. to scroll forward, increase the size of the memfd file,
and move the pixman image offset forward (and the Wayland SHM buffer
as well).
Only increasing the file size would, obviously, cause the memfd file
to grow indefinitely. To deal with this, we "punch" a whole from the
beginning of the file to the new offset. This frees the associated
memory.
Thus, while we have a memfd file whose size is (as seen by
e.g. fstat()) is ever growing, the actual file size is always the
original buffer size.
Some notes:
* FALLOC_FL_PUNCH_HOLE can be quite slow when the number of used pages
to drop is large.
* all normal fallocate() usages have been replaced with ftruncate(),
as this is *much* faster. fallocate() guarantees subsequent writes
wont fail. I.e. it actually reserves (disk) space. While it doesn't
allocate on-disk blocks for on-disk files, it *does* zero-initialize
the in-memory blocks. And this is slow. ftruncate() doesn't do this.
TODO: implement reverse scrolling (i.e. a negative row count).
2020-03-22 20:06:44 +01:00
|
|
|
|
2019-11-02 00:49:00 +01:00
|
|
|
void shm_purge(struct wl_shm *shm, unsigned long cookie);
|
2020-03-18 16:52:33 +01:00
|
|
|
|
|
|
|
|
struct terminal;
|
|
|
|
|
static inline unsigned long shm_cookie_grid(const struct terminal *term) { return (unsigned long)((uintptr_t)term + 0); }
|
|
|
|
|
static inline unsigned long shm_cookie_search(const struct terminal *term) { return (unsigned long)((uintptr_t)term + 1); }
|
|
|
|
|
static inline unsigned long shm_cookie_csd(const struct terminal *term, int n) { return (unsigned long)((uintptr_t)term + 2 + (n)); }
|