examples: improve examples

This commit is contained in:
Wim Taymans 2019-01-31 11:58:35 +01:00
parent 31dacd9d6f
commit 550a2af044
4 changed files with 106 additions and 25 deletions

View file

@ -41,22 +41,17 @@
struct data { struct data {
struct pw_main_loop *loop; struct pw_main_loop *loop;
struct pw_core *core;
struct pw_remote *remote;
struct pw_stream *stream; struct pw_stream *stream;
double accumulator; double accumulator;
}; };
static void fill_f32(struct data *d, void *dest, int avail) static void fill_f32(struct data *d, void *dest, int n_frames)
{ {
float *dst = dest, val; float *dst = dest, val;
int n_samples = avail / (sizeof(float) * DEFAULT_CHANNELS);
int i, c; int i, c;
for (i = 0; i < n_samples; i++) { for (i = 0; i < n_frames; i++) {
d->accumulator += M_PI_M2 * 440 / DEFAULT_RATE; d->accumulator += M_PI_M2 * 440 / DEFAULT_RATE;
if (d->accumulator >= M_PI_M2) if (d->accumulator >= M_PI_M2)
d->accumulator -= M_PI_M2; d->accumulator -= M_PI_M2;
@ -67,11 +62,21 @@ static void fill_f32(struct data *d, void *dest, int avail)
} }
} }
/* our data processing function is in general:
*
* struct pw_buffer *b;
* b = pw_stream_dequeue_buffer(stream);
*
* .. generate stuff in the buffer ...
*
* pw_stream_queue_buffer(stream, b);
*/
static void on_process(void *userdata) static void on_process(void *userdata)
{ {
struct data *data = userdata; struct data *data = userdata;
struct pw_buffer *b; struct pw_buffer *b;
struct spa_buffer *buf; struct spa_buffer *buf;
int n_frames, stride;
uint8_t *p; uint8_t *p;
if ((b = pw_stream_dequeue_buffer(data->stream)) == NULL) if ((b = pw_stream_dequeue_buffer(data->stream)) == NULL)
@ -81,9 +86,14 @@ static void on_process(void *userdata)
if ((p = buf->datas[0].data) == NULL) if ((p = buf->datas[0].data) == NULL)
return; return;
fill_f32(data, p, buf->datas[0].maxsize); stride = sizeof(float) * DEFAULT_CHANNELS;
n_frames = buf->datas[0].maxsize / stride;
buf->datas[0].chunk->size = buf->datas[0].maxsize; fill_f32(data, p, n_frames);
buf->datas[0].chunk->offset = 0;
buf->datas[0].chunk->stride = stride;
buf->datas[0].chunk->size = n_frames * stride;
pw_stream_queue_buffer(data->stream, b); pw_stream_queue_buffer(data->stream, b);
} }
@ -102,8 +112,21 @@ int main(int argc, char *argv[])
pw_init(&argc, &argv); pw_init(&argc, &argv);
/* make a main loop. If you already have another main loop, you can add
* the fd of this pipewire mainloop to it. */
data.loop = pw_main_loop_new(NULL); data.loop = pw_main_loop_new(NULL);
/* create a simple stream, the simple stream manages to core and remote
* objects for you if you don't need to deal with them
*
* If you plan to autoconnect your stream, you need to provide at least
* media, category and role properties
*
* Pass your events and a use_data pointer as the last arguments. This
* will inform you about the stream state. The most important event
* you need to listen to is the process event where you need to produce
* the data.
*/
data.stream = pw_stream_new_simple( data.stream = pw_stream_new_simple(
pw_main_loop_get_loop(data.loop), pw_main_loop_get_loop(data.loop),
"audio-src", "audio-src",
@ -115,14 +138,16 @@ int main(int argc, char *argv[])
&stream_events, &stream_events,
&data); &data);
data.remote = pw_stream_get_remote(data.stream); /* make one parameter with the supported formats. The SPA_PARAM_EnumFormat
* id means that this is a format enumeration. */
params[0] = spa_format_audio_raw_build(&b, SPA_PARAM_EnumFormat, params[0] = spa_format_audio_raw_build(&b, SPA_PARAM_EnumFormat,
&SPA_AUDIO_INFO_RAW_INIT( &SPA_AUDIO_INFO_RAW_INIT(
.format = SPA_AUDIO_FORMAT_F32, .format = SPA_AUDIO_FORMAT_F32,
.channels = DEFAULT_CHANNELS, .channels = DEFAULT_CHANNELS,
.rate = DEFAULT_RATE )); .rate = DEFAULT_RATE ));
/* now connect this stream. We ask that our process function is
* called in a realtime thread. */
pw_stream_connect(data.stream, pw_stream_connect(data.stream,
PW_DIRECTION_OUTPUT, PW_DIRECTION_OUTPUT,
argc > 1 ? (uint32_t)atoi(argv[1]) : SPA_ID_INVALID, argc > 1 ? (uint32_t)atoi(argv[1]) : SPA_ID_INVALID,
@ -131,6 +156,7 @@ int main(int argc, char *argv[])
PW_STREAM_FLAG_RT_PROCESS, PW_STREAM_FLAG_RT_PROCESS,
params, 1); params, 1);
/* and wait */
pw_main_loop_run(data.loop); pw_main_loop_run(data.loop);
pw_stream_destroy(data.stream); pw_stream_destroy(data.stream);

View file

@ -56,7 +56,6 @@ struct data {
struct pw_remote *remote; struct pw_remote *remote;
struct spa_hook remote_listener; struct spa_hook remote_listener;
struct pw_node *node;
struct spa_port_info port_info; struct spa_port_info port_info;
struct spa_dict port_props; struct spa_dict port_props;
struct spa_dict_item port_items[1]; struct spa_dict_item port_items[1];

View file

@ -103,14 +103,21 @@ static struct spa_pod *sdl_build_formats(SDL_RendererInfo *info, struct spa_pod_
uint32_t i, c; uint32_t i, c;
struct spa_pod_frame f[2]; struct spa_pod_frame f[2];
/* make an oject of type SPA_TYPE_OBJECT_Format and id SPA_PARAM_EnumFormat.
* The object type is important because it defines the properties that are
* acceptable. The id gives more context about what the object is meant to
* contain. In this case we enumerate supported formats. */
spa_pod_builder_push_object(b, &f[0], SPA_TYPE_OBJECT_Format, SPA_PARAM_EnumFormat); spa_pod_builder_push_object(b, &f[0], SPA_TYPE_OBJECT_Format, SPA_PARAM_EnumFormat);
/* add media type and media subtype properties */
spa_pod_builder_prop(b, SPA_FORMAT_mediaType, 0); spa_pod_builder_prop(b, SPA_FORMAT_mediaType, 0);
spa_pod_builder_id(b, SPA_MEDIA_TYPE_video); spa_pod_builder_id(b, SPA_MEDIA_TYPE_video);
spa_pod_builder_prop(b, SPA_FORMAT_mediaSubtype, 0); spa_pod_builder_prop(b, SPA_FORMAT_mediaSubtype, 0);
spa_pod_builder_id(b, SPA_MEDIA_SUBTYPE_raw); spa_pod_builder_id(b, SPA_MEDIA_SUBTYPE_raw);
/* build an enumeration of formats */
spa_pod_builder_prop(b, SPA_FORMAT_VIDEO_format, 0); spa_pod_builder_prop(b, SPA_FORMAT_VIDEO_format, 0);
spa_pod_builder_push_choice(b, &f[1], SPA_CHOICE_Enum, 0); spa_pod_builder_push_choice(b, &f[1], SPA_CHOICE_Enum, 0);
/* first the formats supported by the textures */
for (i = 0, c = 0; i < info->num_texture_formats; i++) { for (i = 0, c = 0; i < info->num_texture_formats; i++) {
uint32_t id = sdl_format_to_id(info->texture_formats[i]); uint32_t id = sdl_format_to_id(info->texture_formats[i]);
if (id == 0) if (id == 0)
@ -119,12 +126,14 @@ static struct spa_pod *sdl_build_formats(SDL_RendererInfo *info, struct spa_pod_
spa_pod_builder_id(b, id); spa_pod_builder_id(b, id);
spa_pod_builder_id(b, id); spa_pod_builder_id(b, id);
} }
/* then all the other ones SDL can convert from/to */
for (i = 0; i < SPA_N_ELEMENTS(sdl_video_formats); i++) { for (i = 0; i < SPA_N_ELEMENTS(sdl_video_formats); i++) {
uint32_t id = sdl_video_formats[i].id; uint32_t id = sdl_video_formats[i].id;
if (id != SPA_VIDEO_FORMAT_UNKNOWN) if (id != SPA_VIDEO_FORMAT_UNKNOWN)
spa_pod_builder_id(b, id); spa_pod_builder_id(b, id);
} }
spa_pod_builder_pop(b, &f[1]); spa_pod_builder_pop(b, &f[1]);
/* add size and framerate ranges */
spa_pod_builder_add(b, spa_pod_builder_add(b,
SPA_FORMAT_VIDEO_size, SPA_POD_CHOICE_RANGE_Rectangle( SPA_FORMAT_VIDEO_size, SPA_POD_CHOICE_RANGE_Rectangle(
&SPA_RECTANGLE(WIDTH, HEIGHT), &SPA_RECTANGLE(WIDTH, HEIGHT),

View file

@ -50,10 +50,6 @@ struct data {
struct pw_main_loop *loop; struct pw_main_loop *loop;
struct pw_core *core;
struct pw_remote *remote;
struct spa_hook remote_listener;
struct pw_stream *stream; struct pw_stream *stream;
struct spa_hook stream_listener; struct spa_hook stream_listener;
@ -77,6 +73,15 @@ static void handle_events(struct data *data)
} }
} }
/* our data processing function is in general:
*
* struct pw_buffer *b;
* b = pw_stream_dequeue_buffer(stream);
*
* .. do stuff with buffer ...
*
* pw_stream_queue_buffer(stream, b);
*/
static void static void
on_process(void *_data) on_process(void *_data)
{ {
@ -103,13 +108,14 @@ on_process(void *_data)
handle_events(data); handle_events(data);
if ((sdata = buf->datas[0].data) == NULL) if ((sdata = buf->datas[0].data) == NULL)
return; goto done;
if (SDL_LockTexture(data->texture, NULL, &ddata, &dstride) < 0) { if (SDL_LockTexture(data->texture, NULL, &ddata, &dstride) < 0) {
fprintf(stderr, "Couldn't lock texture: %s\n", SDL_GetError()); fprintf(stderr, "Couldn't lock texture: %s\n", SDL_GetError());
return; goto done;
} }
/* get the videocrop metadata if any */
if ((mc = spa_buffer_find_meta_data(buf, SPA_META_VideoCrop, sizeof(*mc))) && if ((mc = spa_buffer_find_meta_data(buf, SPA_META_VideoCrop, sizeof(*mc))) &&
spa_meta_region_is_valid(mc)) { spa_meta_region_is_valid(mc)) {
data->rect.x = mc->region.position.x; data->rect.x = mc->region.position.x;
@ -117,6 +123,7 @@ on_process(void *_data)
data->rect.w = mc->region.size.width; data->rect.w = mc->region.size.width;
data->rect.h = mc->region.size.height; data->rect.h = mc->region.size.height;
} }
/* get cursor metadata */
if ((mcs = spa_buffer_find_meta_data(buf, SPA_META_Cursor, sizeof(*mcs))) && if ((mcs = spa_buffer_find_meta_data(buf, SPA_META_Cursor, sizeof(*mcs))) &&
spa_meta_cursor_is_valid(mcs)) { spa_meta_cursor_is_valid(mcs)) {
struct spa_meta_bitmap *mb; struct spa_meta_bitmap *mb;
@ -144,6 +151,7 @@ on_process(void *_data)
goto done; goto done;
} }
/* copy the cursor bitmap into the texture */
src = SPA_MEMBER(mb, mb->offset, uint8_t); src = SPA_MEMBER(mb, mb->offset, uint8_t);
dst = cdata; dst = cdata;
ostride = SPA_MIN(cstride, mb->stride); ostride = SPA_MIN(cstride, mb->stride);
@ -158,6 +166,7 @@ on_process(void *_data)
render_cursor = true; render_cursor = true;
} }
/* copy video image in texture */
sstride = buf->datas[0].chunk->stride; sstride = buf->datas[0].chunk->stride;
ostride = SPA_MIN(sstride, dstride); ostride = SPA_MIN(sstride, dstride);
@ -171,6 +180,7 @@ on_process(void *_data)
SDL_UnlockTexture(data->texture); SDL_UnlockTexture(data->texture);
SDL_RenderClear(data->renderer); SDL_RenderClear(data->renderer);
/* now render the video and then the cursor if any */
SDL_RenderCopy(data->renderer, data->texture, &data->rect, NULL); SDL_RenderCopy(data->renderer, data->texture, &data->rect, NULL);
if (render_cursor) { if (render_cursor) {
SDL_RenderCopy(data->renderer, data->cursor, NULL, &data->cursor_rect); SDL_RenderCopy(data->renderer, data->cursor, NULL, &data->cursor_rect);
@ -191,6 +201,7 @@ static void on_stream_state_changed(void *_data, enum pw_stream_state old,
pw_main_loop_quit(data->loop); pw_main_loop_quit(data->loop);
break; break;
case PW_STREAM_STATE_CONFIGURE: case PW_STREAM_STATE_CONFIGURE:
/* because we started inactive, activate ourselves now */
pw_stream_set_active(data->stream, true); pw_stream_set_active(data->stream, true);
break; break;
default: default:
@ -198,6 +209,16 @@ static void on_stream_state_changed(void *_data, enum pw_stream_state old,
} }
} }
/* Be notified when the stream format changes.
*
* We are now supposed to call pw_stream_finish_format() with success or
* failure, depending on if we can support the format. Because we gave
* a list of supported formats, this should be ok.
*
* As part of pw_stream_finish_format() we can provide parameters that
* will control the buffer memory allocation. This includes the metadata
* that we would like on our buffer, the size, alignment, etc.
*/
static void static void
on_stream_format_changed(void *_data, const struct spa_pod *format) on_stream_format_changed(void *_data, const struct spa_pod *format)
{ {
@ -209,6 +230,7 @@ on_stream_format_changed(void *_data, const struct spa_pod *format)
Uint32 sdl_format; Uint32 sdl_format;
void *d; void *d;
/* NULL means to clear the format */
if (format == NULL) { if (format == NULL) {
pw_stream_finish_format(stream, 0, NULL, 0); pw_stream_finish_format(stream, 0, NULL, 0);
return; return;
@ -217,6 +239,7 @@ on_stream_format_changed(void *_data, const struct spa_pod *format)
fprintf(stderr, "got format:\n"); fprintf(stderr, "got format:\n");
spa_debug_format(2, NULL, format); spa_debug_format(2, NULL, format);
/* call a helper function to parse the format for us. */
spa_format_video_raw_parse(format, &data->format); spa_format_video_raw_parse(format, &data->format);
sdl_format = id_to_sdl_format(data->format.format); sdl_format = id_to_sdl_format(data->format.format);
@ -238,6 +261,8 @@ on_stream_format_changed(void *_data, const struct spa_pod *format)
data->rect.w = data->format.size.width; data->rect.w = data->format.size.width;
data->rect.h = data->format.size.height; data->rect.h = data->format.size.height;
/* a SPA_TYPE_OBJECT_ParamBuffers object defines the acceptable size,
* number, stride etc of the buffers */
params[0] = spa_pod_builder_add_object(&b, params[0] = spa_pod_builder_add_object(&b,
SPA_TYPE_OBJECT_ParamBuffers, SPA_PARAM_Buffers, SPA_TYPE_OBJECT_ParamBuffers, SPA_PARAM_Buffers,
SPA_PARAM_BUFFERS_buffers, SPA_POD_CHOICE_RANGE_Int(8, 2, MAX_BUFFERS), SPA_PARAM_BUFFERS_buffers, SPA_POD_CHOICE_RANGE_Int(8, 2, MAX_BUFFERS),
@ -246,16 +271,19 @@ on_stream_format_changed(void *_data, const struct spa_pod *format)
SPA_PARAM_BUFFERS_stride, SPA_POD_Int(data->stride), SPA_PARAM_BUFFERS_stride, SPA_POD_Int(data->stride),
SPA_PARAM_BUFFERS_align, SPA_POD_Int(16)); SPA_PARAM_BUFFERS_align, SPA_POD_Int(16));
/* a header metadata with timing information */
params[1] = spa_pod_builder_add_object(&b, params[1] = spa_pod_builder_add_object(&b,
SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta, SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
SPA_PARAM_META_type, SPA_POD_Id(SPA_META_Header), SPA_PARAM_META_type, SPA_POD_Id(SPA_META_Header),
SPA_PARAM_META_size, SPA_POD_Int(sizeof(struct spa_meta_header))); SPA_PARAM_META_size, SPA_POD_Int(sizeof(struct spa_meta_header)));
/* video cropping information */
params[2] = spa_pod_builder_add_object(&b, params[2] = spa_pod_builder_add_object(&b,
SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta, SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
SPA_PARAM_META_type, SPA_POD_Id(SPA_META_VideoCrop), SPA_PARAM_META_type, SPA_POD_Id(SPA_META_VideoCrop),
SPA_PARAM_META_size, SPA_POD_Int(sizeof(struct spa_meta_region))); SPA_PARAM_META_size, SPA_POD_Int(sizeof(struct spa_meta_region)));
#define CURSOR_META_SIZE(w,h) (sizeof(struct spa_meta_cursor) + \ #define CURSOR_META_SIZE(w,h) (sizeof(struct spa_meta_cursor) + \
sizeof(struct spa_meta_bitmap) + w * h * 4) sizeof(struct spa_meta_bitmap) + w * h * 4)
/* cursor information */
params[3] = spa_pod_builder_add_object(&b, params[3] = spa_pod_builder_add_object(&b,
SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta, SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
SPA_PARAM_META_type, SPA_POD_Id(SPA_META_Cursor), SPA_PARAM_META_type, SPA_POD_Id(SPA_META_Cursor),
@ -264,9 +292,11 @@ on_stream_format_changed(void *_data, const struct spa_pod *format)
CURSOR_META_SIZE(1,1), CURSOR_META_SIZE(1,1),
CURSOR_META_SIZE(256,256))); CURSOR_META_SIZE(256,256)));
/* we are done */
pw_stream_finish_format(stream, 0, params, 4); pw_stream_finish_format(stream, 0, params, 4);
} }
/* these are the stream events we listen for */
static const struct pw_stream_events stream_events = { static const struct pw_stream_events stream_events = {
PW_VERSION_STREAM_EVENTS, PW_VERSION_STREAM_EVENTS,
.state_changed = on_stream_state_changed, .state_changed = on_stream_state_changed,
@ -296,8 +326,20 @@ int main(int argc, char *argv[])
pw_init(&argc, &argv); pw_init(&argc, &argv);
/* create a main loop */
data.loop = pw_main_loop_new(NULL); data.loop = pw_main_loop_new(NULL);
/* create a simple stream, the simple stream manages to core and remote
* objects for you if you don't need to deal with them
*
* If you plan to autoconnect your stream, you need to provide at least
* media, category and role properties
*
* Pass your events and a use_data pointer as the last arguments. This
* will inform you about the stream state. The most important event
* you need to listen to is the process event where you need to consume
* the data provided to you.
*/
data.stream = pw_stream_new_simple( data.stream = pw_stream_new_simple(
pw_main_loop_get_loop(data.loop), pw_main_loop_get_loop(data.loop),
"video-play", "video-play",
@ -309,8 +351,6 @@ int main(int argc, char *argv[])
&stream_events, &stream_events,
&data); &data);
data.remote = pw_stream_get_remote(data.stream);
data.core = pw_remote_get_core(data.remote);
data.path = argc > 1 ? argv[1] : NULL; data.path = argc > 1 ? argv[1] : NULL;
if (SDL_Init(SDL_INIT_VIDEO) < 0) { if (SDL_Init(SDL_INIT_VIDEO) < 0) {
@ -324,17 +364,24 @@ int main(int argc, char *argv[])
return -1; return -1;
} }
/* build the extra parameters to connect with. To connect, we can provide
* a list of supported formats. We use a builder that writes the param
* object to the stack. */
build_format(&data, &b, params); build_format(&data, &b, params);
/* now connect the stream, we need a direction (input/output),
* an optional target node to connect to, some flags and parameters
*/
pw_stream_connect(data.stream, pw_stream_connect(data.stream,
PW_DIRECTION_INPUT, PW_DIRECTION_INPUT,
data.path ? (uint32_t)atoi(data.path) : SPA_ID_INVALID, data.path ? (uint32_t)atoi(data.path) : SPA_ID_INVALID,
PW_STREAM_FLAG_AUTOCONNECT | PW_STREAM_FLAG_AUTOCONNECT | /* try to automatically connect this stream */
PW_STREAM_FLAG_INACTIVE | PW_STREAM_FLAG_INACTIVE | /* we will activate ourselves */
PW_STREAM_FLAG_EXCLUSIVE | PW_STREAM_FLAG_EXCLUSIVE | /* require exclusive access */
PW_STREAM_FLAG_MAP_BUFFERS, PW_STREAM_FLAG_MAP_BUFFERS, /* mmap the buffer data for us */
params, 1); params, 1); /* extra parameters, see above */
/* do things until we quit the mainloop */
pw_main_loop_run(data.loop); pw_main_loop_run(data.loop);
pw_stream_destroy(data.stream); pw_stream_destroy(data.stream);