config: tweak.surface-bit-depth: add 16f-bit

This adds support for 16-bit floating point surfaces, using the new
PIXMAN_rgba_float16 image buffer type. This maps to
WL_SHM_ABGR161616F.
This commit is contained in:
Daniel Eklöf 2025-05-01 09:37:47 +02:00
parent 970e13db8d
commit 81e979b228
No known key found for this signature in database
GPG key ID: 5BBD4992C116573F
9 changed files with 64 additions and 10 deletions

View file

@ -90,6 +90,8 @@
* `16-bit` to `tweak.surface-bit-depth`. Makes foot use 16-bit image
buffers. They provide the necessary color precision required by
`gamma-correct-blending=yes`.
* `16f-bit` to `tweak.surface-bit-depth`. Makes foot use 16-bit
floating point image buffers.
[2025]: https://codeberg.org/dnkl/foot/issues/2025
[1975]: https://codeberg.org/dnkl/foot/issues/1975

View file

@ -2811,7 +2811,12 @@ parse_section_tweak(struct context *ctx)
_Static_assert(sizeof(conf->tweak.surface_bit_depth) == sizeof(int),
"enum is not 32-bit");
#if defined(HAVE_PIXMAN_RGBA_16)
#if defined(HAVE_PIXMAN_RGBA_FLOAT16)
return value_to_enum(
ctx,
(const char *[]){"auto", "8-bit", "10-bit", "16-bit", "16f-bit", NULL},
(int *)&conf->tweak.surface_bit_depth);
#elif defined(HAVE_PIXMAN_RGBA_16)
return value_to_enum(
ctx,
(const char *[]){"auto", "8-bit", "10-bit", "16-bit", NULL},

View file

@ -200,6 +200,7 @@ enum shm_bit_depth {
SHM_BITS_8,
SHM_BITS_10,
SHM_BITS_16,
SHM_BITS_16F,
};
struct config {

View file

@ -2024,7 +2024,7 @@ any of these options.
*surface-bit-depth*
Selects which RGB bit depth to use for image buffers. One of
*auto*, *8-bit*, *10-bit* or *16-bit*.
*auto*, *8-bit*, *10-bit*, *16-bit*, or *16f-bit*.
*auto* chooses bit depth depending on other settings, and
availability.
@ -2036,13 +2036,15 @@ any of these options.
alpha channel. Thus, it provides higher precision color channels,
but a lower precision alpha channel.
*16-bit* 16 bits for each color channel, alpha included. If
available, this is the default when *gamma-correct-blending=yes*.
*16-bit* and *16f-bit* uses 16 bits (with *16f-bit* being floating
point) for each color channel, alpha included. If available, this
is the default when *gamma-correct-blending=yes* (with *16-bit*
being preferred over *16f-bit*).
Note that both *10-bit* and *16-bit* are much slower than *8-bit*;
if you want to use gamma-correct blending, and if you prefer speed
(throughput and input latency) over accurate colors, you can set
*surface-bit-depth=8-bit* explicitly.
Note that both *10-bit*, *16-bit* and *16f-bit* are much slower than
*8-bit*; if you want to use gamma-correct blending, and if you
prefer speed (throughput and input latency) over accurate colors,
you can set *surface-bit-depth=8-bit* explicitly.
Default: _auto_

View file

@ -149,6 +149,11 @@ if pixman.version().version_compare('>=0.46.0')
add_project_arguments('-DHAVE_PIXMAN_RGBA_16', language: 'c')
endif
# TODO: should be 0.47.0?
if pixman.version().version_compare('>=0.44.3')
add_project_arguments('-DHAVE_PIXMAN_RGBA_FLOAT16', language: 'c')
endif
tllist = dependency('tllist', version: '>=1.1.0', fallback: 'tllist')
fcft = dependency('fcft', version: ['>=3.3.1', '<4.0.0'], fallback: 'fcft')

34
shm.c
View file

@ -1013,6 +1013,36 @@ shm_chain_new(struct wayland *wayl, bool scrollable, size_t pix_instances,
}
#endif
#if defined(HAVE_PIXMAN_RGBA_FLOAT16)
static bool have_logged_16f_fallback = false;
if (desired_bit_depth == SHM_BITS_16F ||
(desired_bit_depth == SHM_BITS_16 &&
pixman_fmt_with_alpha == PIXMAN_a8r8g8b8))
{
if (wayl->shm_have_abgr161616f && wayl->shm_have_xbgr161616f) {
pixman_fmt_without_alpha = PIXMAN_rgba_float16;
shm_fmt_without_alpha = WL_SHM_FORMAT_XBGR16161616F;
pixman_fmt_with_alpha = PIXMAN_rgba_float16;
shm_fmt_with_alpha = WL_SHM_FORMAT_ABGR16161616F;
if (!have_logged) {
have_logged = true;
LOG_INFO("using 16-bit (float) BGR surfaces");
}
} else {
if (!have_logged_16f_fallback) {
have_logged_16f_fallback = true;
LOG_WARN(
"16f-bit surfaces requested, but compositor does not "
"implement ABGR161616F+XBGR161616F");
}
}
}
#endif
if (desired_bit_depth >= SHM_BITS_10 &&
pixman_fmt_with_alpha == PIXMAN_a8r8g8b8)
{
@ -1101,6 +1131,10 @@ shm_chain_bit_depth(const struct buffer_chain *chain)
#if defined(HAVE_PIXMAN_RGBA_16)
: fmt == PIXMAN_a16b16g16r16
? SHM_BITS_16
#endif
#if defined(HAVE_PIXMAN_RGBA_FLOAT16)
: fmt == PIXMAN_rgba_float16
? SHM_BITS_16F
#endif
: SHM_BITS_10;
}

View file

@ -121,8 +121,9 @@ sixel_init(struct terminal *term, int p1, int p2, int p3)
* blending, and b) use the same pixman format as the main
* surfaces, for (hopefully) better performance.
*
* For now, don't support 16-bit surfaces (too much sixel logic
* that assumes 32-bit pixels).
* For now, don't support 16-bit surfaces, neither integer nor
* floating point (too much sixel logic that assumes 32-bit
* pixels).
*/
if (shm_chain_bit_depth(term->render.chains.grid) >= SHM_BITS_10) {
if (term->wl->shm_have_argb2101010 && term->wl->shm_have_xrgb2101010) {

View file

@ -246,6 +246,8 @@ shm_format(void *data, struct wl_shm *wl_shm, uint32_t format)
case WL_SHM_FORMAT_ABGR2101010: wayl->shm_have_abgr2101010 = true; break;
case WL_SHM_FORMAT_XBGR16161616: wayl->shm_have_xbgr161616 = true; break;
case WL_SHM_FORMAT_ABGR16161616: wayl->shm_have_abgr161616 = true; break;
case WL_SHM_FORMAT_XBGR16161616F: wayl->shm_have_xbgr161616f = true; break;
case WL_SHM_FORMAT_ABGR16161616F: wayl->shm_have_abgr161616f = true; break;
}
#if defined(_DEBUG)

View file

@ -498,6 +498,8 @@ struct wayland {
bool shm_have_xbgr2101010:1;
bool shm_have_abgr161616:1;
bool shm_have_xbgr161616:1;
bool shm_have_abgr161616f:1;
bool shm_have_xbgr161616f:1;
};
struct wayland *wayl_init(