diff --git a/src/examples/meson.build b/src/examples/meson.build index 27743d4f7..ad40f0069 100644 --- a/src/examples/meson.build +++ b/src/examples/meson.build @@ -10,10 +10,12 @@ examples = [ 'audio-capture', 'video-play', 'video-src', + 'video-src-sync', 'video-dsp-play', 'video-dsp-src', 'video-play-pull', 'video-play-reneg', + 'video-play-sync', 'video-src-alloc', 'video-src-reneg', 'video-src-fixate', @@ -39,6 +41,7 @@ examples_extra_deps = { 'video-play-reneg': [sdl_dep], 'video-play-fixate': [sdl_dep, drm_dep], 'video-play-pull': [sdl_dep], + 'video-play-sync': [sdl_dep], 'video-dsp-play': [sdl_dep], 'local-v4l2': [sdl_dep], 'export-sink': [sdl_dep], diff --git a/src/examples/video-play-sync.c b/src/examples/video-play-sync.c new file mode 100644 index 000000000..4b16e494f --- /dev/null +++ b/src/examples/video-play-sync.c @@ -0,0 +1,530 @@ +/* PipeWire */ +/* SPDX-FileCopyrightText: Copyright © 2025 Wim Taymans */ +/* SPDX-License-Identifier: MIT */ + +/* + [title] + Video input stream using \ref pw_stream "pw_stream" and sync timeline. + [title] + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include + +#define WIDTH 1920 +#define HEIGHT 1080 +#define RATE 30 + +#define MAX_BUFFERS 64 + +#include "sdl.h" + +struct pixel { + float r, g, b, a; +}; + +struct data { + const char *path; + + SDL_Renderer *renderer; + SDL_Window *window; + SDL_Texture *texture; + + struct pw_main_loop *loop; + + struct pw_stream *stream; + struct spa_hook stream_listener; + + struct spa_io_position *position; + + struct spa_video_info format; + int32_t stride; + struct spa_rectangle size; + + int counter; + SDL_Rect rect; + bool is_yuv; + + bool with_synctimeline; + bool with_synctimeline_release; +}; + +static void handle_events(struct data *data) +{ + SDL_Event event; + while (SDL_PollEvent(&event)) { + switch (event.type) { + case SDL_QUIT: + pw_main_loop_quit(data->loop); + break; + } + } +} + +/* our data processing function is in general: + * + * struct pw_buffer *b; + * b = pw_stream_dequeue_buffer(stream); + * + * .. do stuff with buffer ... + * + * pw_stream_queue_buffer(stream, b); + */ +static void +on_process(void *_data) +{ + struct data *data = _data; + struct pw_stream *stream = data->stream; + struct pw_buffer *b; + struct spa_buffer *buf; + void *sdata, *ddata; + int sstride, dstride, ostride; + struct spa_meta_header *h; + struct spa_meta_sync_timeline *stl = NULL; + uint32_t i, j; + uint8_t *src, *dst; + uint64_t cmd; + + b = NULL; + while (true) { + struct pw_buffer *t; + if ((t = pw_stream_dequeue_buffer(stream)) == NULL) + break; + if (b) + pw_stream_queue_buffer(stream, b); + b = t; + } + if (b == NULL) { + pw_log_warn("out of buffers: %m"); + return; + } + + buf = b->buffer; + + pw_log_trace("new buffer %p", buf); + + handle_events(data); + + if ((sdata = buf->datas[0].data) == NULL) + goto done; + + if ((h = spa_buffer_find_meta_data(buf, SPA_META_Header, sizeof(*h)))) { + uint64_t now = pw_stream_get_nsec(stream); + pw_log_debug("now:%"PRIu64" pts:%"PRIu64" diff:%"PRIi64, + now, h->pts, now - h->pts); + } + if ((stl = spa_buffer_find_meta_data(buf, SPA_META_SyncTimeline, sizeof(*stl))) && + stl->acquire_point) { + /* wait before we can use the buffer */ + if (read(buf->datas[1].fd, &cmd, sizeof(cmd)) < 0) + pw_log_warn("acquire_point wait error %m"); + pw_log_debug("acquire_point:%"PRIu64, stl->acquire_point); + } + + /* copy video image in texture */ + if (data->is_yuv) { + void *datas[4]; + sstride = data->stride; + if (buf->n_datas == 1) { + SDL_UpdateTexture(data->texture, NULL, + sdata, sstride); + } else { + datas[0] = sdata; + datas[1] = buf->datas[1].data; + datas[2] = buf->datas[2].data; + SDL_UpdateYUVTexture(data->texture, NULL, + datas[0], sstride, + datas[1], sstride / 2, + datas[2], sstride / 2); + } + } + else { + if (SDL_LockTexture(data->texture, NULL, &ddata, &dstride) < 0) { + fprintf(stderr, "Couldn't lock texture: %s\n", SDL_GetError()); + } + + sstride = buf->datas[0].chunk->stride; + if (sstride == 0) + sstride = buf->datas[0].chunk->size / data->size.height; + ostride = SPA_MIN(sstride, dstride); + + src = sdata; + dst = ddata; + + if (data->format.media_subtype == SPA_MEDIA_SUBTYPE_dsp) { + for (i = 0; i < data->size.height; i++) { + struct pixel *p = (struct pixel *) src; + for (j = 0; j < data->size.width; j++) { + dst[j * 4 + 0] = SPA_CLAMP((uint8_t)(p[j].r * 255.0f), 0u, 255u); + dst[j * 4 + 1] = SPA_CLAMP((uint8_t)(p[j].g * 255.0f), 0u, 255u); + dst[j * 4 + 2] = SPA_CLAMP((uint8_t)(p[j].b * 255.0f), 0u, 255u); + dst[j * 4 + 3] = SPA_CLAMP((uint8_t)(p[j].a * 255.0f), 0u, 255u); + } + src += sstride; + dst += dstride; + } + } else { + for (i = 0; i < data->size.height; i++) { + memcpy(dst, src, ostride); + src += sstride; + dst += dstride; + } + } + SDL_UnlockTexture(data->texture); + } + + SDL_RenderClear(data->renderer); + /* now render the video */ + SDL_RenderCopy(data->renderer, data->texture, &data->rect, NULL); + SDL_RenderPresent(data->renderer); + + done: + pw_stream_queue_buffer(stream, b); + + if (stl != NULL && stl->release_point) { + /* we promise to signal the release point */ + if (data->with_synctimeline_release) + SPA_FLAG_CLEAR(stl->flags, SPA_META_SYNC_TIMELINE_UNSCHEDULED_RELEASE); + cmd = 1; + /* signal buffer release point */ + write(buf->datas[2].fd, &cmd, sizeof(cmd)); + pw_log_debug("release:%"PRIu64, stl->release_point); + } +} + +static void on_stream_state_changed(void *_data, enum pw_stream_state old, + enum pw_stream_state state, const char *error) +{ + struct data *data = _data; + fprintf(stderr, "stream state: \"%s\"\n", pw_stream_state_as_string(state)); + switch (state) { + case PW_STREAM_STATE_UNCONNECTED: + pw_main_loop_quit(data->loop); + break; + case PW_STREAM_STATE_PAUSED: + /* because we started inactive, activate ourselves now */ + pw_stream_set_active(data->stream, true); + break; + default: + break; + } +} + +static void +on_stream_io_changed(void *_data, uint32_t id, void *area, uint32_t size) +{ + struct data *data = _data; + + switch (id) { + case SPA_IO_Position: + data->position = area; + break; + } +} + +/* Be notified when the stream param changes. We're only looking at the + * format changes. + * + * We are now supposed to call pw_stream_finish_format() with success or + * failure, depending on if we can support the format. Because we gave + * a list of supported formats, this should be ok. + * + * As part of pw_stream_finish_format() we can provide parameters that + * will control the buffer memory allocation. This includes the metadata + * that we would like on our buffer, the size, alignment, etc. + */ +static void +on_stream_param_changed(void *_data, uint32_t id, const struct spa_pod *param) +{ + struct data *data = _data; + struct pw_stream *stream = data->stream; + uint8_t params_buffer[1024]; + struct spa_pod_builder b = SPA_POD_BUILDER_INIT(params_buffer, sizeof(params_buffer)); + struct spa_pod_frame f; + const struct spa_pod *params[5]; + uint32_t n_params = 0; + Uint32 sdl_format; + void *d; + int32_t mult, size, blocks; + + if (param != NULL && id == SPA_PARAM_Tag) { + spa_debug_pod(0, NULL, param); + return; + } + if (param != NULL && id == SPA_PARAM_Latency) { + struct spa_latency_info info; + if (spa_latency_parse(param, &info) >= 0) + fprintf(stderr, "got latency: %"PRIu64"\n", (info.min_ns + info.max_ns) / 2); + return; + } + /* NULL means to clear the format */ + if (param == NULL || id != SPA_PARAM_Format) + return; + + fprintf(stderr, "got format:\n"); + spa_debug_format(2, NULL, param); + + if (spa_format_parse(param, &data->format.media_type, &data->format.media_subtype) < 0) + return; + + if (data->format.media_type != SPA_MEDIA_TYPE_video) + return; + + switch (data->format.media_subtype) { + case SPA_MEDIA_SUBTYPE_raw: + /* call a helper function to parse the format for us. */ + spa_format_video_raw_parse(param, &data->format.info.raw); + sdl_format = id_to_sdl_format(data->format.info.raw.format); + data->size = SPA_RECTANGLE(data->format.info.raw.size.width, + data->format.info.raw.size.height); + mult = 1; + break; + case SPA_MEDIA_SUBTYPE_dsp: + spa_format_video_dsp_parse(param, &data->format.info.dsp); + if (data->format.info.dsp.format != SPA_VIDEO_FORMAT_DSP_F32) + return; + sdl_format = SDL_PIXELFORMAT_RGBA32; + data->size = SPA_RECTANGLE(data->position->video.size.width, + data->position->video.size.height); + mult = 4; + break; + default: + sdl_format = SDL_PIXELFORMAT_UNKNOWN; + break; + } + + if (sdl_format == SDL_PIXELFORMAT_UNKNOWN) { + pw_stream_set_error(stream, -EINVAL, "unknown pixel format"); + return; + } + if (data->size.width == 0 || data->size.height == 0) { + pw_stream_set_error(stream, -EINVAL, "invalid size"); + return; + } + + data->texture = SDL_CreateTexture(data->renderer, + sdl_format, + SDL_TEXTUREACCESS_STREAMING, + data->size.width, + data->size.height); + switch(sdl_format) { + case SDL_PIXELFORMAT_YV12: + case SDL_PIXELFORMAT_IYUV: + data->stride = data->size.width; + size = (data->stride * data->size.height) * 3 / 2; + data->is_yuv = true; + blocks = 3; + break; + case SDL_PIXELFORMAT_YUY2: + data->is_yuv = true; + data->stride = data->size.width * 2; + size = (data->stride * data->size.height); + blocks = 1; + break; + default: + if (SDL_LockTexture(data->texture, NULL, &d, &data->stride) < 0) { + fprintf(stderr, "Couldn't lock texture: %s\n", SDL_GetError()); + data->stride = data->size.width * 2; + } else + SDL_UnlockTexture(data->texture); + size = data->stride * data->size.height; + blocks = 1; + break; + } + + data->rect.x = 0; + data->rect.y = 0; + data->rect.w = data->size.width; + data->rect.h = data->size.height; + + if (data->with_synctimeline) { + /* first add Buffer with 3 blocks (1 data, 2 sync fds). */ + spa_pod_builder_push_object(&b, &f, SPA_TYPE_OBJECT_ParamBuffers, SPA_PARAM_Buffers); + spa_pod_builder_add(&b, + SPA_PARAM_BUFFERS_buffers, SPA_POD_CHOICE_RANGE_Int(8, 2, MAX_BUFFERS), + SPA_PARAM_BUFFERS_blocks, SPA_POD_Int(3), + SPA_PARAM_BUFFERS_size, SPA_POD_Int(size * mult), + SPA_PARAM_BUFFERS_stride, SPA_POD_Int(data->stride * mult), + SPA_PARAM_BUFFERS_dataType, SPA_POD_CHOICE_FLAGS_Int((1<with_synctimeline_release) { + /* drop features flags if not provided by both sides */ + spa_pod_builder_prop(&b, SPA_PARAM_META_features, SPA_POD_PROP_FLAG_DROP); + spa_pod_builder_int(&b, SPA_META_FEATURE_SYNC_TIMELINE_RELEASE); + } + params[n_params++] = spa_pod_builder_pop(&b, &f); + } + + /* fallback for when the synctimeline is not negotiated */ + params[n_params++] = spa_pod_builder_add_object(&b, + SPA_TYPE_OBJECT_ParamBuffers, SPA_PARAM_Buffers, + SPA_PARAM_BUFFERS_buffers, SPA_POD_CHOICE_RANGE_Int(8, 2, MAX_BUFFERS), + SPA_PARAM_BUFFERS_blocks, SPA_POD_Int(blocks), + SPA_PARAM_BUFFERS_size, SPA_POD_Int(size * mult), + SPA_PARAM_BUFFERS_stride, SPA_POD_Int(data->stride * mult), + SPA_PARAM_BUFFERS_dataType, SPA_POD_CHOICE_FLAGS_Int((1<renderer, &info); + params[n_params++] = sdl_build_formats(&info, b); + + fprintf(stderr, "supported SDL formats:\n"); + spa_debug_format(2, NULL, params[0]); + + params[n_params++] = spa_pod_builder_add_object(b, + SPA_TYPE_OBJECT_Format, SPA_PARAM_EnumFormat, + SPA_FORMAT_mediaType, SPA_POD_Id(SPA_MEDIA_TYPE_video), + SPA_FORMAT_mediaSubtype, SPA_POD_Id(SPA_MEDIA_SUBTYPE_dsp), + SPA_FORMAT_VIDEO_format, SPA_POD_Id(SPA_VIDEO_FORMAT_DSP_F32)); + + fprintf(stderr, "supported DSP formats:\n"); + spa_debug_format(2, NULL, params[1]); + + return n_params; +} + +static void do_quit(void *userdata, int signal_number) +{ + struct data *data = userdata; + pw_main_loop_quit(data->loop); +} + +int main(int argc, char *argv[]) +{ + struct data data = { 0, }; + const struct spa_pod *params[3]; + uint8_t buffer[1024]; + struct spa_pod_builder b = SPA_POD_BUILDER_INIT(buffer, sizeof(buffer)); + struct pw_properties *props; + int res, n_params; + + pw_init(&argc, &argv); + + data.with_synctimeline = true; + data.with_synctimeline_release = true; + + /* create a main loop */ + data.loop = pw_main_loop_new(NULL); + + pw_loop_add_signal(pw_main_loop_get_loop(data.loop), SIGINT, do_quit, &data); + pw_loop_add_signal(pw_main_loop_get_loop(data.loop), SIGTERM, do_quit, &data); + + /* create a simple stream, the simple stream manages to core and remote + * objects for you if you don't need to deal with them + * + * If you plan to autoconnect your stream, you need to provide at least + * media, category and role properties + * + * Pass your events and a user_data pointer as the last arguments. This + * will inform you about the stream state. The most important event + * you need to listen to is the process event where you need to consume + * the data provided to you. + */ + props = pw_properties_new(PW_KEY_MEDIA_TYPE, "Video", + PW_KEY_MEDIA_CATEGORY, "Capture", + PW_KEY_MEDIA_ROLE, "Camera", + NULL), + data.path = argc > 1 ? argv[1] : NULL; + if (data.path) + pw_properties_set(props, PW_KEY_TARGET_OBJECT, data.path); + + data.stream = pw_stream_new_simple( + pw_main_loop_get_loop(data.loop), + "video-play", + props, + &stream_events, + &data); + + if (SDL_Init(SDL_INIT_VIDEO) < 0) { + fprintf(stderr, "can't initialize SDL: %s\n", SDL_GetError()); + return -1; + } + + if (SDL_CreateWindowAndRenderer + (WIDTH, HEIGHT, SDL_WINDOW_RESIZABLE, &data.window, &data.renderer)) { + fprintf(stderr, "can't create window: %s\n", SDL_GetError()); + return -1; + } + + /* build the extra parameters to connect with. To connect, we can provide + * a list of supported formats. We use a builder that writes the param + * object to the stack. */ + n_params = build_format(&data, &b, params); + + /* now connect the stream, we need a direction (input/output), + * an optional target node to connect to, some flags and parameters + */ + if ((res = pw_stream_connect(data.stream, + PW_DIRECTION_INPUT, + PW_ID_ANY, + PW_STREAM_FLAG_AUTOCONNECT | /* try to automatically connect this stream */ + PW_STREAM_FLAG_INACTIVE | /* we will activate ourselves */ + PW_STREAM_FLAG_MAP_BUFFERS, /* mmap the buffer data for us */ + params, n_params)) /* extra parameters, see above */ < 0) { + fprintf(stderr, "can't connect: %s\n", spa_strerror(res)); + return -1; + } + + /* do things until we quit the mainloop */ + pw_main_loop_run(data.loop); + + pw_stream_destroy(data.stream); + pw_main_loop_destroy(data.loop); + + SDL_DestroyTexture(data.texture); + SDL_DestroyRenderer(data.renderer); + SDL_DestroyWindow(data.window); + pw_deinit(); + + return 0; +} diff --git a/src/examples/video-src-sync.c b/src/examples/video-src-sync.c new file mode 100644 index 000000000..b34d779ec --- /dev/null +++ b/src/examples/video-src-sync.c @@ -0,0 +1,437 @@ +/* PipeWire */ +/* SPDX-FileCopyrightText: Copyright © 2025 Wim Taymans */ +/* SPDX-License-Identifier: MIT */ + +/* + [title] + Video source using \ref pw_stream and sync_timeline. + [title] + */ + +#include "config.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +#define BPP 4 +#define CURSOR_WIDTH 64 +#define CURSOR_HEIGHT 64 +#define CURSOR_BPP 4 + +#define MAX_BUFFERS 64 + +#define M_PI_M2 ( M_PI + M_PI ) + +struct data { + struct pw_main_loop *loop; + struct spa_source *timer; + + struct pw_context *context; + struct pw_core *core; + + struct pw_stream *stream; + struct spa_hook stream_listener; + + struct spa_video_info_raw format; + int32_t stride; + + int counter; + uint32_t seq; + + int res; + + bool with_synctimeline; + bool with_synctimeline_release; +}; + +static void on_process(void *userdata) +{ + struct data *data = userdata; + struct pw_buffer *b; + struct spa_buffer *buf; + uint32_t i, j; + uint8_t *p; + struct spa_meta_header *h; + struct spa_meta_sync_timeline *stl; + uint64_t cmd; + + if ((b = pw_stream_dequeue_buffer(data->stream)) == NULL) { + pw_log_warn("out of buffers: %m"); + return; + } + + buf = b->buffer; + if ((p = buf->datas[0].data) == NULL) + return; + + if ((h = spa_buffer_find_meta_data(buf, SPA_META_Header, sizeof(*h)))) { +#if 0 + h->pts = pw_stream_get_nsec(data->stream); +#else + h->pts = -1; +#endif + h->flags = 0; + h->seq = data->seq++; + h->dts_offset = 0; + } + if ((stl = spa_buffer_find_meta_data(buf, SPA_META_SyncTimeline, sizeof(*stl))) && + stl->release_point) { + if (!SPA_FLAG_IS_SET(stl->flags, SPA_META_SYNC_TIMELINE_UNSCHEDULED_RELEASE)) { + /* The other end promised to schedule the release point, wait before we + * can use the buffer */ + if (read(buf->datas[2].fd, &cmd, sizeof(cmd)) < 0) + pw_log_warn("release_point wait error %m"); + pw_log_debug("release_point:%"PRIu64, stl->release_point); + } else if (spa_buffer_has_meta_features(buf, SPA_META_SyncTimeline, + SPA_META_FEATURE_SYNC_TIMELINE_RELEASE)) { + /* this happens when the other end did not get the buffer or + * will not trigger the release point, There is no point waiting, + * we can use the buffer right away */ + pw_log_warn("release_point not scheduled:%"PRIu64, stl->release_point); + } else { + /* The other end does not support the RELEASE flag, we don't + * know if the buffer was used or not or if the release point will + * ever be scheduled, we must assume we can reuse the buffer */ + pw_log_debug("assume buffer was released:%"PRIu64, stl->release_point); + } + } + + for (i = 0; i < data->format.size.height; i++) { + for (j = 0; j < data->format.size.width * BPP; j++) + p[j] = data->counter + j * i; + p += data->stride; + data->counter += 13; + } + + buf->datas[0].chunk->offset = 0; + buf->datas[0].chunk->size = data->format.size.height * data->stride; + buf->datas[0].chunk->stride = data->stride; + + if (stl) { + /* set the UNSCHEDULED_RELEASE flag, the consumer will clear this if + * it promises to signal the release point */ + SPA_FLAG_SET(stl->flags, SPA_META_SYNC_TIMELINE_UNSCHEDULED_RELEASE); + cmd = 1; + stl->acquire_point = data->seq; + stl->release_point = data->seq; + /* write the acquire point */ + write(buf->datas[1].fd, &cmd, sizeof(cmd)); + } + pw_stream_queue_buffer(data->stream, b); +} + +static void on_timeout(void *userdata, uint64_t expirations) +{ + struct data *data = userdata; + pw_log_trace("timeout"); + pw_stream_trigger_process(data->stream); +} + +static void on_stream_state_changed(void *_data, enum pw_stream_state old, enum pw_stream_state state, + const char *error) +{ + struct data *data = _data; + + printf("stream state: \"%s\" %s\n", pw_stream_state_as_string(state), error ? error : ""); + + switch (state) { + case PW_STREAM_STATE_ERROR: + case PW_STREAM_STATE_UNCONNECTED: + pw_main_loop_quit(data->loop); + break; + + case PW_STREAM_STATE_PAUSED: + printf("node id: %d\n", pw_stream_get_node_id(data->stream)); + pw_loop_update_timer(pw_main_loop_get_loop(data->loop), + data->timer, NULL, NULL, false); + break; + case PW_STREAM_STATE_STREAMING: + { + struct timespec timeout, interval; + + timeout.tv_sec = 0; + timeout.tv_nsec = 1; + interval.tv_sec = 0; + interval.tv_nsec = 40 * SPA_NSEC_PER_MSEC; + + printf("driving:%d\n", pw_stream_is_driving(data->stream)); + + if (pw_stream_is_driving(data->stream)) + pw_loop_update_timer(pw_main_loop_get_loop(data->loop), + data->timer, &timeout, &interval, false); + break; + } + default: + break; + } +} + +static void +on_stream_param_changed(void *_data, uint32_t id, const struct spa_pod *param) +{ + struct data *data = _data; + struct pw_stream *stream = data->stream; + uint8_t params_buffer[1024]; + struct spa_pod_builder b = SPA_POD_BUILDER_INIT(params_buffer, sizeof(params_buffer)); + const struct spa_pod *params[5]; + uint32_t n_params = 0; + struct spa_pod_frame f; + + if (param != NULL && id == SPA_PARAM_Tag) { + spa_debug_pod(0, NULL, param); + return; + } + if (param == NULL || id != SPA_PARAM_Format) + return; + + fprintf(stderr, "got format:\n"); + spa_debug_format(2, NULL, param); + + spa_format_video_raw_parse(param, &data->format); + + data->stride = SPA_ROUND_UP_N(data->format.size.width * BPP, 4); + + /* first add Buffer with 3 blocks (1 data, 2 sync fds). */ + if (data->with_synctimeline) { + spa_pod_builder_push_object(&b, &f, SPA_TYPE_OBJECT_ParamBuffers, SPA_PARAM_Buffers); + spa_pod_builder_add(&b, + SPA_PARAM_BUFFERS_buffers, SPA_POD_CHOICE_RANGE_Int(8, 2, MAX_BUFFERS), + SPA_PARAM_BUFFERS_blocks, SPA_POD_Int(3), + SPA_PARAM_BUFFERS_size, SPA_POD_Int(data->stride * data->format.size.height), + SPA_PARAM_BUFFERS_stride, SPA_POD_Int(data->stride), + SPA_PARAM_BUFFERS_dataType, SPA_POD_CHOICE_FLAGS_Int((1<with_synctimeline_release) { + /* drop features flags if not provided by both sides */ + spa_pod_builder_prop(&b, SPA_PARAM_META_features, SPA_POD_PROP_FLAG_DROP); + spa_pod_builder_int(&b, SPA_META_FEATURE_SYNC_TIMELINE_RELEASE); + } + params[n_params++] = spa_pod_builder_pop(&b, &f); + } + + /* fallback for when the synctimeline is not negotiated */ + params[n_params++] = spa_pod_builder_add_object(&b, + SPA_TYPE_OBJECT_ParamBuffers, SPA_PARAM_Buffers, + SPA_PARAM_BUFFERS_buffers, SPA_POD_CHOICE_RANGE_Int(8, 2, MAX_BUFFERS), + SPA_PARAM_BUFFERS_blocks, SPA_POD_Int(1), + SPA_PARAM_BUFFERS_size, SPA_POD_Int(data->stride * data->format.size.height), + SPA_PARAM_BUFFERS_stride, SPA_POD_Int(data->stride), + SPA_PARAM_BUFFERS_dataType, SPA_POD_CHOICE_FLAGS_Int((1<buffer; + struct spa_data *d; +#ifdef HAVE_MEMFD_CREATE + unsigned int seals; +#endif + struct spa_meta_sync_timeline *s; + + d = buf->datas; + + pw_log_debug("add buffer %p", buffer); + if ((d[0].type & (1<stride * data->format.size.height; + + /* truncate to the right size before we set seals */ + if (ftruncate(d[0].fd, d[0].maxsize) < 0) { + pw_log_error("can't truncate to %d: %m", d[0].maxsize); + return; + } +#ifdef HAVE_MEMFD_CREATE + /* not enforced yet but server might require SEAL_SHRINK later */ + seals = F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL; + if (fcntl(d[0].fd, F_ADD_SEALS, seals) == -1) { + pw_log_warn("Failed to add seals: %m"); + } +#endif + + /* now mmap so we can write to it in the process function above */ + d[0].data = mmap(NULL, d[0].maxsize, PROT_READ|PROT_WRITE, + MAP_SHARED, d[0].fd, d[0].mapoffset); + if (d[0].data == MAP_FAILED) { + pw_log_error("can't mmap memory: %m"); + return; + } + + if ((s = spa_buffer_find_meta_data(buf, SPA_META_SyncTimeline, sizeof(*s))) && buf->n_datas >= 3) { + pw_log_debug("got sync timeline"); + /* acquire fd (just an example, not really syncobj here) */ + d[1].type = SPA_DATA_SyncObj; + d[1].flags = SPA_DATA_FLAG_READWRITE; + d[1].fd = eventfd(0, EFD_CLOEXEC); + d[1].mapoffset = 0; + d[1].maxsize = 0; + if (d[1].fd == -1) { + pw_log_error("can't create acquire fd: %m"); + return; + } + /* release fd (just an example, not really syncobj here) */ + d[2].type = SPA_DATA_SyncObj; + d[2].flags = SPA_DATA_FLAG_READWRITE; + d[2].fd = eventfd(0, EFD_CLOEXEC); + d[2].mapoffset = 0; + d[2].maxsize = 0; + if (d[2].fd == -1) { + pw_log_error("can't create release fd: %m"); + return; + } + } + if (spa_buffer_has_meta_features(buf, SPA_META_SyncTimeline, + SPA_META_FEATURE_SYNC_TIMELINE_RELEASE)) { + pw_log_debug("got sync timeline release"); + } +} + +/* close the memfd we set on the buffers here */ +static void on_stream_remove_buffer(void *_data, struct pw_buffer *buffer) +{ + struct spa_buffer *buf = buffer->buffer; + struct spa_data *d; + + d = buf->datas; + pw_log_debug("remove buffer %p", buffer); + + munmap(d[0].data, d[0].maxsize); + close(d[0].fd); + if (buf->n_datas >= 3) { + close(d[1].fd); + close(d[2].fd); + } + } + + +static const struct pw_stream_events stream_events = { + PW_VERSION_STREAM_EVENTS, + .process = on_process, + .state_changed = on_stream_state_changed, + .param_changed = on_stream_param_changed, + .add_buffer = on_stream_add_buffer, + .remove_buffer = on_stream_remove_buffer, +}; + +static void do_quit(void *userdata, int signal_number) +{ + struct data *data = userdata; + pw_main_loop_quit(data->loop); +} + +int main(int argc, char *argv[]) +{ + struct data data = { 0, }; + const struct spa_pod *params[2]; + uint32_t n_params = 0; + uint8_t buffer[1024]; + struct spa_pod_builder b = SPA_POD_BUILDER_INIT(buffer, sizeof(buffer)); + + pw_init(&argc, &argv); + + data.loop = pw_main_loop_new(NULL); + + data.with_synctimeline = true; + data.with_synctimeline_release = true; + + pw_loop_add_signal(pw_main_loop_get_loop(data.loop), SIGINT, do_quit, &data); + pw_loop_add_signal(pw_main_loop_get_loop(data.loop), SIGTERM, do_quit, &data); + + data.context = pw_context_new(pw_main_loop_get_loop(data.loop), NULL, 0); + + data.timer = pw_loop_add_timer(pw_main_loop_get_loop(data.loop), on_timeout, &data); + + data.core = pw_context_connect(data.context, NULL, 0); + if (data.core == NULL) { + fprintf(stderr, "can't connect: %m\n"); + data.res = -errno; + goto cleanup; + } + + data.stream = pw_stream_new(data.core, "video-src-sync", + pw_properties_new( + PW_KEY_MEDIA_CLASS, "Video/Source", + NULL)); + + params[n_params++] = spa_pod_builder_add_object(&b, + SPA_TYPE_OBJECT_Format, SPA_PARAM_EnumFormat, + SPA_FORMAT_mediaType, SPA_POD_Id(SPA_MEDIA_TYPE_video), + SPA_FORMAT_mediaSubtype, SPA_POD_Id(SPA_MEDIA_SUBTYPE_raw), + SPA_FORMAT_VIDEO_format, SPA_POD_Id(SPA_VIDEO_FORMAT_BGRA), + SPA_FORMAT_VIDEO_size, SPA_POD_CHOICE_RANGE_Rectangle( + &SPA_RECTANGLE(320, 240), + &SPA_RECTANGLE(1, 1), + &SPA_RECTANGLE(4096, 4096)), + SPA_FORMAT_VIDEO_framerate, SPA_POD_Fraction(&SPA_FRACTION(25, 1))); + + pw_stream_add_listener(data.stream, + &data.stream_listener, + &stream_events, + &data); + + pw_stream_connect(data.stream, + PW_DIRECTION_OUTPUT, + PW_ID_ANY, + PW_STREAM_FLAG_DRIVER | + PW_STREAM_FLAG_ALLOC_BUFFERS | + PW_STREAM_FLAG_MAP_BUFFERS, + params, n_params); + + pw_main_loop_run(data.loop); + +cleanup: + pw_context_destroy(data.context); + pw_main_loop_destroy(data.loop); + pw_deinit(); + + return data.res; +}