2018-08-02 11:25:27 +02:00
|
|
|
/* PipeWire
|
|
|
|
|
*
|
2018-11-05 17:48:52 +01:00
|
|
|
* Copyright © 2018 Wim Taymans
|
2018-08-02 11:25:27 +02:00
|
|
|
*
|
2018-11-05 17:48:52 +01:00
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
2018-08-02 11:25:27 +02:00
|
|
|
*
|
2018-11-05 17:48:52 +01:00
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
|
* Software.
|
|
|
|
|
*
|
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
|
|
|
* DEALINGS IN THE SOFTWARE.
|
2018-08-02 11:25:27 +02:00
|
|
|
*/
|
|
|
|
|
|
2020-08-13 11:31:57 +02:00
|
|
|
#include "config.h"
|
|
|
|
|
|
2018-08-02 11:25:27 +02:00
|
|
|
#include <string.h>
|
|
|
|
|
#include <stdio.h>
|
|
|
|
|
#include <errno.h>
|
|
|
|
|
#include <math.h>
|
2020-03-30 14:09:44 +02:00
|
|
|
#include <getopt.h>
|
2018-08-02 11:25:27 +02:00
|
|
|
#include <time.h>
|
2020-08-13 11:31:57 +02:00
|
|
|
#include <unistd.h>
|
|
|
|
|
#include <limits.h>
|
|
|
|
|
#include <fcntl.h>
|
2020-06-01 18:14:53 +02:00
|
|
|
#include <signal.h>
|
2020-08-13 11:31:57 +02:00
|
|
|
#include <sys/stat.h>
|
2020-12-01 12:05:08 +01:00
|
|
|
#include <sys/mman.h>
|
2020-08-13 11:31:57 +02:00
|
|
|
#include <sys/types.h>
|
|
|
|
|
#if HAVE_PWD_H
|
|
|
|
|
#include <pwd.h>
|
|
|
|
|
#endif
|
2018-08-02 11:25:27 +02:00
|
|
|
|
|
|
|
|
#include <spa/node/node.h>
|
|
|
|
|
#include <spa/utils/hook.h>
|
2020-01-02 13:48:50 +01:00
|
|
|
#include <spa/utils/result.h>
|
2020-12-01 12:05:08 +01:00
|
|
|
#include <spa/utils/json.h>
|
2021-05-18 11:36:13 +10:00
|
|
|
#include <spa/utils/string.h>
|
2018-08-02 11:25:27 +02:00
|
|
|
#include <spa/param/audio/format-utils.h>
|
2018-10-18 15:16:59 +02:00
|
|
|
#include <spa/param/props.h>
|
2018-11-16 16:41:13 +01:00
|
|
|
#include <spa/debug/pod.h>
|
2019-09-30 21:23:29 +02:00
|
|
|
#include <spa/support/dbus.h>
|
2019-12-11 10:57:10 +01:00
|
|
|
#include <spa/monitor/device.h>
|
2018-08-02 11:25:27 +02:00
|
|
|
|
2019-01-14 12:58:23 +01:00
|
|
|
#include "pipewire/pipewire.h"
|
2020-06-04 17:41:01 +02:00
|
|
|
#include "pipewire/private.h"
|
2021-02-10 19:57:35 +01:00
|
|
|
#include "pipewire/conf.h"
|
2021-06-18 17:51:24 +03:00
|
|
|
#include "pipewire/extensions/session-manager.h"
|
|
|
|
|
#include "pipewire/extensions/client-node.h"
|
2018-08-02 11:25:27 +02:00
|
|
|
|
2019-09-30 21:23:29 +02:00
|
|
|
#include <dbus/dbus.h>
|
|
|
|
|
|
2019-11-14 18:35:29 +01:00
|
|
|
#include "media-session.h"
|
|
|
|
|
|
2020-12-30 13:12:48 +01:00
|
|
|
#define NAME "media-session"
|
2021-02-10 19:57:35 +01:00
|
|
|
#define SESSION_PREFIX "media-session.d"
|
2020-12-30 13:12:48 +01:00
|
|
|
#define SESSION_CONF "media-session.conf"
|
2018-09-11 15:25:35 +02:00
|
|
|
|
2021-09-30 10:38:18 +10:00
|
|
|
PW_LOG_TOPIC(ms_topic, "ms.core");
|
|
|
|
|
#define PW_LOG_TOPIC_DEFAULT ms_topic
|
2021-09-21 18:25:54 +10:00
|
|
|
|
2019-11-26 12:53:28 +01:00
|
|
|
#define sm_object_emit(o,m,v,...) spa_hook_list_call(&(o)->hooks, struct sm_object_events, m, v, ##__VA_ARGS__)
|
|
|
|
|
|
|
|
|
|
#define sm_object_emit_update(s) sm_object_emit(s, update, 0)
|
2019-12-17 10:47:31 +01:00
|
|
|
#define sm_object_emit_destroy(s) sm_object_emit(s, destroy, 0)
|
2020-06-04 17:41:01 +02:00
|
|
|
#define sm_object_emit_free(s) sm_object_emit(s, free, 0)
|
2019-11-26 12:53:28 +01:00
|
|
|
|
2019-12-18 12:15:03 +01:00
|
|
|
#define sm_media_session_emit(s,m,v,...) spa_hook_list_call(&(s)->hooks, struct sm_media_session_events, m, v, ##__VA_ARGS__)
|
2019-11-14 18:35:29 +01:00
|
|
|
|
2020-01-09 15:52:53 +01:00
|
|
|
#define sm_media_session_emit_info(s,i) sm_media_session_emit(s, info, 0, i)
|
2019-11-26 12:53:28 +01:00
|
|
|
#define sm_media_session_emit_create(s,obj) sm_media_session_emit(s, create, 0, obj)
|
2019-11-14 18:35:29 +01:00
|
|
|
#define sm_media_session_emit_remove(s,obj) sm_media_session_emit(s, remove, 0, obj)
|
|
|
|
|
#define sm_media_session_emit_rescan(s,seq) sm_media_session_emit(s, rescan, 0, seq)
|
2020-12-01 12:03:28 +01:00
|
|
|
#define sm_media_session_emit_shutdown(s) sm_media_session_emit(s, shutdown, 0)
|
2019-12-18 12:15:03 +01:00
|
|
|
#define sm_media_session_emit_destroy(s) sm_media_session_emit(s, destroy, 0)
|
2021-04-10 16:49:14 +03:00
|
|
|
#define sm_media_session_emit_seat_active(s,...) sm_media_session_emit(s, seat_active, 0, __VA_ARGS__)
|
2021-05-17 15:17:54 +02:00
|
|
|
#define sm_media_session_emit_dbus_disconnected(s) sm_media_session_emit(s, dbus_disconnected, 0)
|
2019-12-18 12:15:03 +01:00
|
|
|
|
2020-07-16 12:50:33 +02:00
|
|
|
int sm_access_flatpak_start(struct sm_media_session *sess);
|
2020-07-16 17:54:18 +02:00
|
|
|
int sm_access_portal_start(struct sm_media_session *sess);
|
2020-08-13 11:32:42 +02:00
|
|
|
int sm_default_nodes_start(struct sm_media_session *sess);
|
2020-08-13 17:01:47 +02:00
|
|
|
int sm_default_profile_start(struct sm_media_session *sess);
|
2020-08-17 11:17:21 +02:00
|
|
|
int sm_default_routes_start(struct sm_media_session *sess);
|
2020-08-17 17:55:20 +02:00
|
|
|
int sm_restore_stream_start(struct sm_media_session *sess);
|
2021-01-17 00:31:47 +02:00
|
|
|
int sm_streams_follow_default_start(struct sm_media_session *sess);
|
2021-08-19 12:14:37 -04:00
|
|
|
int sm_alsa_no_dsp_start(struct sm_media_session *sess);
|
2019-12-18 12:15:03 +01:00
|
|
|
int sm_alsa_midi_start(struct sm_media_session *sess);
|
|
|
|
|
int sm_v4l2_monitor_start(struct sm_media_session *sess);
|
2020-04-20 12:26:50 +05:30
|
|
|
int sm_libcamera_monitor_start(struct sm_media_session *sess);
|
2019-12-18 12:15:03 +01:00
|
|
|
int sm_bluez5_monitor_start(struct sm_media_session *sess);
|
2021-07-30 19:40:32 +03:00
|
|
|
int sm_bluez5_autoswitch_start(struct sm_media_session *sess);
|
2019-12-18 12:15:03 +01:00
|
|
|
int sm_alsa_monitor_start(struct sm_media_session *sess);
|
2020-01-08 13:24:06 +01:00
|
|
|
int sm_suspend_node_start(struct sm_media_session *sess);
|
2021-04-10 16:49:14 +03:00
|
|
|
#ifdef HAVE_SYSTEMD
|
|
|
|
|
int sm_logind_start(struct sm_media_session *sess);
|
|
|
|
|
#endif
|
2020-01-07 16:07:51 +01:00
|
|
|
|
|
|
|
|
int sm_policy_node_start(struct sm_media_session *sess);
|
|
|
|
|
|
|
|
|
|
int sm_session_manager_start(struct sm_media_session *sess);
|
2019-11-14 18:35:29 +01:00
|
|
|
|
|
|
|
|
/** user data to add to an object */
|
|
|
|
|
struct data {
|
|
|
|
|
struct spa_list link;
|
|
|
|
|
const char *id;
|
|
|
|
|
size_t size;
|
|
|
|
|
};
|
2019-07-10 20:24:11 +02:00
|
|
|
|
2019-11-19 16:08:40 +01:00
|
|
|
struct param {
|
|
|
|
|
struct sm_param this;
|
|
|
|
|
};
|
|
|
|
|
|
2019-11-18 13:10:21 +01:00
|
|
|
struct sync {
|
|
|
|
|
struct spa_list link;
|
|
|
|
|
int seq;
|
|
|
|
|
void (*callback) (void *data);
|
|
|
|
|
void *data;
|
|
|
|
|
};
|
|
|
|
|
|
2018-08-02 11:25:27 +02:00
|
|
|
struct impl {
|
2019-11-14 18:35:29 +01:00
|
|
|
struct sm_media_session this;
|
2019-11-20 16:18:46 +01:00
|
|
|
|
2021-10-15 10:53:32 +10:00
|
|
|
const char *config_dir;
|
|
|
|
|
|
2020-12-30 13:12:48 +01:00
|
|
|
struct pw_properties *conf;
|
|
|
|
|
struct pw_properties *modules;
|
2020-11-25 16:13:20 +01:00
|
|
|
|
2019-11-20 16:18:46 +01:00
|
|
|
struct pw_main_loop *loop;
|
|
|
|
|
struct spa_dbus *dbus;
|
2021-05-17 15:17:54 +02:00
|
|
|
struct spa_hook dbus_connection_listener;
|
2018-08-02 11:25:27 +02:00
|
|
|
|
2019-12-11 07:46:59 +01:00
|
|
|
struct pw_core *monitor_core;
|
2019-12-06 11:48:40 +01:00
|
|
|
struct spa_hook monitor_listener;
|
2020-01-14 16:38:40 +01:00
|
|
|
int monitor_seq;
|
2018-09-11 15:25:35 +02:00
|
|
|
|
2019-12-11 07:46:59 +01:00
|
|
|
struct pw_core *policy_core;
|
2019-12-06 11:48:40 +01:00
|
|
|
struct spa_hook policy_listener;
|
2019-12-13 11:27:23 +01:00
|
|
|
struct spa_hook proxy_policy_listener;
|
2019-11-14 18:35:29 +01:00
|
|
|
|
2019-12-11 09:44:48 +01:00
|
|
|
struct pw_registry *registry;
|
2019-11-14 18:35:29 +01:00
|
|
|
struct spa_hook registry_listener;
|
|
|
|
|
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
struct pw_registry *monitor_registry;
|
|
|
|
|
struct spa_hook monitor_registry_listener;
|
|
|
|
|
|
2019-11-14 18:35:29 +01:00
|
|
|
struct pw_map globals;
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
struct spa_list object_list; /**< all sm_objects */
|
|
|
|
|
|
|
|
|
|
struct spa_list registry_event_list; /**< pending registry events */
|
2019-11-19 16:07:41 +01:00
|
|
|
|
2019-11-14 18:35:29 +01:00
|
|
|
struct spa_hook_list hooks;
|
|
|
|
|
|
2019-11-15 17:13:45 +01:00
|
|
|
struct spa_list endpoint_link_list; /** list of struct endpoint_link */
|
|
|
|
|
struct pw_map endpoint_links; /** map of endpoint_link */
|
|
|
|
|
|
2020-07-15 14:19:25 +02:00
|
|
|
struct spa_list link_list; /** list of struct link */
|
|
|
|
|
|
2019-11-18 13:10:21 +01:00
|
|
|
struct spa_list sync_list; /** list of struct sync */
|
2019-11-15 12:07:26 +01:00
|
|
|
int rescan_seq;
|
|
|
|
|
int last_seq;
|
2020-08-13 11:31:57 +02:00
|
|
|
|
2020-11-03 20:25:51 +01:00
|
|
|
unsigned int scanning:1;
|
|
|
|
|
unsigned int rescan_pending:1;
|
2021-04-10 16:49:14 +03:00
|
|
|
unsigned int seat_active:1;
|
2019-11-14 18:35:29 +01:00
|
|
|
};
|
|
|
|
|
|
2019-11-15 17:13:45 +01:00
|
|
|
struct endpoint_link {
|
|
|
|
|
uint32_t id;
|
|
|
|
|
|
|
|
|
|
struct pw_endpoint_link_info info;
|
|
|
|
|
|
|
|
|
|
struct impl *impl;
|
|
|
|
|
|
|
|
|
|
struct spa_list link; /**< link in struct impl endpoint_link_list */
|
|
|
|
|
struct spa_list link_list; /**< list of struct link */
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct link {
|
|
|
|
|
struct pw_proxy *proxy; /**< proxy for link */
|
|
|
|
|
struct spa_hook listener; /**< proxy listener */
|
|
|
|
|
|
|
|
|
|
uint32_t output_node;
|
|
|
|
|
uint32_t output_port;
|
|
|
|
|
uint32_t input_node;
|
|
|
|
|
uint32_t input_port;
|
|
|
|
|
|
|
|
|
|
struct endpoint_link *endpoint_link;
|
2020-07-15 14:19:25 +02:00
|
|
|
struct spa_list link; /**< link in struct endpoint_link link_list or
|
|
|
|
|
* struct impl link_list */
|
2019-11-15 17:13:45 +01:00
|
|
|
};
|
|
|
|
|
|
2019-11-29 13:21:55 +01:00
|
|
|
struct object_info {
|
2019-12-19 13:15:10 +01:00
|
|
|
const char *type;
|
2019-11-29 13:21:55 +01:00
|
|
|
uint32_t version;
|
|
|
|
|
const void *events;
|
|
|
|
|
size_t size;
|
|
|
|
|
int (*init) (void *object);
|
|
|
|
|
void (*destroy) (void *object);
|
|
|
|
|
};
|
|
|
|
|
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
struct registry_event {
|
|
|
|
|
uint32_t id;
|
|
|
|
|
uint32_t permissions;
|
|
|
|
|
const char *type;
|
|
|
|
|
uint32_t version;
|
|
|
|
|
const struct spa_dict *props;
|
|
|
|
|
|
|
|
|
|
struct pw_proxy *proxy;
|
|
|
|
|
|
|
|
|
|
int seq;
|
|
|
|
|
struct pw_properties *props_store;
|
|
|
|
|
|
|
|
|
|
struct spa_list link;
|
|
|
|
|
unsigned int monitor:1;
|
|
|
|
|
unsigned int allocated:1;
|
|
|
|
|
};
|
|
|
|
|
|
2019-12-18 12:15:03 +01:00
|
|
|
static void add_object(struct impl *impl, struct sm_object *obj, uint32_t id)
|
2019-11-14 18:35:29 +01:00
|
|
|
{
|
|
|
|
|
size_t size = pw_map_get_size(&impl->globals);
|
2019-12-18 12:15:03 +01:00
|
|
|
obj->id = id;
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
pw_log_debug("add global '%u' %p monitor:%d", obj->id, obj, obj->monitor_global);
|
2019-11-19 16:07:41 +01:00
|
|
|
while (obj->id > size)
|
|
|
|
|
pw_map_insert_at(&impl->globals, size++, NULL);
|
|
|
|
|
pw_map_insert_at(&impl->globals, obj->id, obj);
|
2019-12-18 12:15:03 +01:00
|
|
|
sm_media_session_emit_create(impl, obj);
|
2019-11-14 18:35:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void remove_object(struct impl *impl, struct sm_object *obj)
|
|
|
|
|
{
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
pw_log_debug("remove global '%u' %p monitor:%d", obj->id, obj, obj->monitor_global);
|
2019-11-19 16:07:41 +01:00
|
|
|
pw_map_insert_at(&impl->globals, obj->id, NULL);
|
2019-12-18 12:15:03 +01:00
|
|
|
sm_media_session_emit_remove(impl, obj);
|
|
|
|
|
obj->id = SPA_ID_INVALID;
|
2019-11-14 18:35:29 +01:00
|
|
|
}
|
|
|
|
|
|
2020-07-22 14:14:02 +02:00
|
|
|
static void *find_object(struct impl *impl, uint32_t id, const char *type)
|
2019-11-14 18:35:29 +01:00
|
|
|
{
|
2020-07-22 14:14:02 +02:00
|
|
|
struct sm_object *obj;
|
|
|
|
|
if ((obj = pw_map_lookup(&impl->globals, id)) == NULL)
|
|
|
|
|
return NULL;
|
2021-05-18 11:43:49 +10:00
|
|
|
if (type != NULL && !spa_streq(obj->type, type))
|
2020-07-22 14:14:02 +02:00
|
|
|
return NULL;
|
|
|
|
|
return obj;
|
2019-11-14 18:35:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct data *object_find_data(struct sm_object *obj, const char *id)
|
|
|
|
|
{
|
|
|
|
|
struct data *d;
|
|
|
|
|
spa_list_for_each(d, &obj->data, link) {
|
2021-05-18 11:36:13 +10:00
|
|
|
if (spa_streq(d->id, id))
|
2019-11-14 18:35:29 +01:00
|
|
|
return d;
|
|
|
|
|
}
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void *sm_object_add_data(struct sm_object *obj, const char *id, size_t size)
|
|
|
|
|
{
|
|
|
|
|
struct data *d;
|
|
|
|
|
|
|
|
|
|
d = object_find_data(obj, id);
|
|
|
|
|
if (d != NULL) {
|
|
|
|
|
if (d->size == size)
|
|
|
|
|
goto done;
|
|
|
|
|
spa_list_remove(&d->link);
|
|
|
|
|
free(d);
|
|
|
|
|
}
|
|
|
|
|
d = calloc(1, sizeof(struct data) + size);
|
|
|
|
|
d->id = id;
|
|
|
|
|
d->size = size;
|
|
|
|
|
|
|
|
|
|
spa_list_append(&obj->data, &d->link);
|
|
|
|
|
done:
|
2021-05-06 13:41:44 +10:00
|
|
|
return SPA_PTROFF(d, sizeof(struct data), void);
|
2019-11-14 18:35:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void *sm_object_get_data(struct sm_object *obj, const char *id)
|
|
|
|
|
{
|
|
|
|
|
struct data *d;
|
|
|
|
|
d = object_find_data(obj, id);
|
|
|
|
|
if (d == NULL)
|
|
|
|
|
return NULL;
|
2021-05-06 13:41:44 +10:00
|
|
|
return SPA_PTROFF(d, sizeof(struct data), void);
|
2019-11-14 18:35:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int sm_object_remove_data(struct sm_object *obj, const char *id)
|
|
|
|
|
{
|
|
|
|
|
struct data *d;
|
|
|
|
|
d = object_find_data(obj, id);
|
|
|
|
|
if (d == NULL)
|
|
|
|
|
return -ENOENT;
|
|
|
|
|
spa_list_remove(&d->link);
|
|
|
|
|
free(d);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
static int sm_object_destroy_maybe_free(struct sm_object *obj)
|
2019-11-14 18:35:29 +01:00
|
|
|
{
|
2020-06-04 17:41:01 +02:00
|
|
|
struct impl *impl = SPA_CONTAINER_OF(obj->session, struct impl, this);
|
|
|
|
|
struct data *d;
|
|
|
|
|
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: destroy object %p id:%d proxy:%p handle:%p monitor:%d destroyed:%d discarded:%d", obj->session,
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
obj, obj->id, obj->proxy, obj->handle, obj->monitor_global, obj->destroyed, obj->discarded);
|
2020-06-04 17:41:01 +02:00
|
|
|
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
if (obj->destroyed)
|
|
|
|
|
goto unref;
|
|
|
|
|
|
|
|
|
|
obj->destroyed = true;
|
2020-06-04 17:41:01 +02:00
|
|
|
|
|
|
|
|
sm_object_emit_destroy(obj);
|
|
|
|
|
|
|
|
|
|
if (SPA_FLAG_IS_SET(obj->mask, SM_OBJECT_CHANGE_MASK_LISTENER)) {
|
|
|
|
|
SPA_FLAG_CLEAR(obj->mask, SM_OBJECT_CHANGE_MASK_LISTENER);
|
|
|
|
|
spa_hook_remove(&obj->object_listener);
|
2019-12-19 13:39:05 +01:00
|
|
|
}
|
2020-06-04 17:41:01 +02:00
|
|
|
|
|
|
|
|
if (obj->id != SPA_ID_INVALID)
|
|
|
|
|
remove_object(impl, obj);
|
|
|
|
|
|
|
|
|
|
if (obj->destroy)
|
|
|
|
|
obj->destroy(obj);
|
|
|
|
|
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
spa_hook_remove(&obj->handle_listener);
|
|
|
|
|
|
|
|
|
|
if (obj->proxy) {
|
2020-06-04 17:41:01 +02:00
|
|
|
spa_hook_remove(&obj->proxy_listener);
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
if (obj->proxy != obj->handle)
|
|
|
|
|
pw_proxy_destroy(obj->proxy);
|
|
|
|
|
obj->proxy = NULL;
|
2019-12-19 13:39:05 +01:00
|
|
|
}
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
|
|
|
|
|
pw_proxy_ref(obj->handle);
|
|
|
|
|
pw_proxy_destroy(obj->handle);
|
2020-06-04 17:41:01 +02:00
|
|
|
|
|
|
|
|
sm_object_emit_free(obj);
|
|
|
|
|
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
unref:
|
|
|
|
|
if (!obj->discarded)
|
|
|
|
|
return 0;
|
|
|
|
|
|
2021-06-01 11:21:17 +10:00
|
|
|
pw_properties_free(obj->props);
|
|
|
|
|
obj->props = NULL;
|
2020-06-04 17:41:01 +02:00
|
|
|
|
|
|
|
|
spa_list_consume(d, &obj->data, link) {
|
|
|
|
|
spa_list_remove(&d->link);
|
|
|
|
|
free(d);
|
|
|
|
|
}
|
2021-01-05 10:04:15 +01:00
|
|
|
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
spa_list_remove(&obj->link);
|
|
|
|
|
pw_proxy_unref(obj->handle); /* frees obj */
|
2021-01-05 10:04:15 +01:00
|
|
|
|
2019-11-28 11:13:53 +01:00
|
|
|
return 0;
|
2019-11-14 18:35:29 +01:00
|
|
|
}
|
|
|
|
|
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
int sm_object_destroy(struct sm_object *obj)
|
|
|
|
|
{
|
|
|
|
|
sm_object_discard(obj);
|
|
|
|
|
return sm_object_destroy_maybe_free(obj);
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-19 16:08:40 +01:00
|
|
|
static struct param *add_param(struct spa_list *param_list,
|
2021-04-25 19:11:43 +03:00
|
|
|
int seq, int *param_seq, uint32_t id, const struct spa_pod *param)
|
2019-11-19 16:08:40 +01:00
|
|
|
{
|
|
|
|
|
struct param *p;
|
|
|
|
|
|
|
|
|
|
if (param == NULL || !spa_pod_is_object(param)) {
|
|
|
|
|
errno = EINVAL;
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
if (id == SPA_ID_INVALID)
|
|
|
|
|
id = SPA_POD_OBJECT_ID(param);
|
|
|
|
|
|
2021-04-25 19:11:43 +03:00
|
|
|
if (id >= SM_MAX_PARAMS) {
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_error("too big param id %d", id);
|
2021-04-25 19:11:43 +03:00
|
|
|
errno = EINVAL;
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (seq != param_seq[id]) {
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("ignoring param %d, seq:%d != current_seq:%d",
|
2021-04-25 19:11:43 +03:00
|
|
|
id, seq, param_seq[id]);
|
|
|
|
|
errno = EBUSY;
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-19 16:08:40 +01:00
|
|
|
p = malloc(sizeof(struct param) + SPA_POD_SIZE(param));
|
|
|
|
|
if (p == NULL)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
p->this.id = id;
|
2021-05-06 13:41:44 +10:00
|
|
|
p->this.param = SPA_PTROFF(p, sizeof(struct param), struct spa_pod);
|
2019-11-19 16:08:40 +01:00
|
|
|
memcpy(p->this.param, param, SPA_POD_SIZE(param));
|
|
|
|
|
|
2019-11-26 12:53:28 +01:00
|
|
|
spa_list_append(param_list, &p->this.link);
|
2019-11-19 16:08:40 +01:00
|
|
|
|
|
|
|
|
return p;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2019-11-27 12:19:09 +01:00
|
|
|
static uint32_t clear_params(struct spa_list *param_list, uint32_t id)
|
2019-11-19 16:08:40 +01:00
|
|
|
{
|
|
|
|
|
struct param *p, *t;
|
2019-11-27 12:19:09 +01:00
|
|
|
uint32_t count = 0;
|
2019-11-19 16:08:40 +01:00
|
|
|
|
2019-11-26 12:53:28 +01:00
|
|
|
spa_list_for_each_safe(p, t, param_list, this.link) {
|
2019-11-19 16:08:40 +01:00
|
|
|
if (id == SPA_ID_INVALID || p->this.id == id) {
|
2019-11-26 12:53:28 +01:00
|
|
|
spa_list_remove(&p->this.link);
|
2019-11-19 16:08:40 +01:00
|
|
|
free(p);
|
2019-11-27 12:19:09 +01:00
|
|
|
count++;
|
2019-11-19 16:08:40 +01:00
|
|
|
}
|
|
|
|
|
}
|
2019-11-27 12:19:09 +01:00
|
|
|
return count;
|
2019-11-19 16:08:40 +01:00
|
|
|
}
|
|
|
|
|
|
2020-07-16 17:48:00 +02:00
|
|
|
/**
|
|
|
|
|
* Core
|
|
|
|
|
*/
|
|
|
|
|
static const struct object_info core_object_info = {
|
|
|
|
|
.type = PW_TYPE_INTERFACE_Core,
|
|
|
|
|
.version = PW_VERSION_CORE,
|
|
|
|
|
.size = sizeof(struct sm_object),
|
|
|
|
|
.init = NULL,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Module
|
|
|
|
|
*/
|
|
|
|
|
static const struct object_info module_info = {
|
|
|
|
|
.type = PW_TYPE_INTERFACE_Module,
|
|
|
|
|
.version = PW_VERSION_MODULE,
|
|
|
|
|
.size = sizeof(struct sm_object),
|
|
|
|
|
.init = NULL,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Factory
|
|
|
|
|
*/
|
|
|
|
|
static const struct object_info factory_info = {
|
|
|
|
|
.type = PW_TYPE_INTERFACE_Factory,
|
|
|
|
|
.version = PW_VERSION_FACTORY,
|
|
|
|
|
.size = sizeof(struct sm_object),
|
|
|
|
|
.init = NULL,
|
|
|
|
|
};
|
|
|
|
|
|
2019-11-28 11:13:53 +01:00
|
|
|
/**
|
|
|
|
|
* Clients
|
|
|
|
|
*/
|
|
|
|
|
static void client_event_info(void *object, const struct pw_client_info *info)
|
|
|
|
|
{
|
|
|
|
|
struct sm_client *client = object;
|
|
|
|
|
struct impl *impl = SPA_CONTAINER_OF(client->obj.session, struct impl, this);
|
|
|
|
|
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: client %d info", impl, client->obj.id);
|
2021-09-03 13:26:15 +02:00
|
|
|
client->info = pw_client_info_merge(client->info, info, client->obj.changed == 0);
|
2019-11-28 11:13:53 +01:00
|
|
|
|
|
|
|
|
client->obj.avail |= SM_CLIENT_CHANGE_MASK_INFO;
|
|
|
|
|
client->obj.changed |= SM_CLIENT_CHANGE_MASK_INFO;
|
2019-12-16 13:37:19 +01:00
|
|
|
sm_object_sync_update(&client->obj);
|
2019-11-28 11:13:53 +01:00
|
|
|
}
|
|
|
|
|
|
2019-12-11 14:53:39 +01:00
|
|
|
static const struct pw_client_events client_events = {
|
|
|
|
|
PW_VERSION_CLIENT_EVENTS,
|
2019-11-28 11:13:53 +01:00
|
|
|
.info = client_event_info,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static void client_destroy(void *object)
|
|
|
|
|
{
|
|
|
|
|
struct sm_client *client = object;
|
|
|
|
|
if (client->info)
|
|
|
|
|
pw_client_info_free(client->info);
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-29 13:21:55 +01:00
|
|
|
static const struct object_info client_info = {
|
|
|
|
|
.type = PW_TYPE_INTERFACE_Client,
|
2019-12-11 14:53:39 +01:00
|
|
|
.version = PW_VERSION_CLIENT,
|
2019-11-29 13:21:55 +01:00
|
|
|
.events = &client_events,
|
|
|
|
|
.size = sizeof(struct sm_client),
|
|
|
|
|
.init = NULL,
|
|
|
|
|
.destroy = client_destroy,
|
|
|
|
|
};
|
|
|
|
|
|
2019-11-28 11:13:53 +01:00
|
|
|
/**
|
|
|
|
|
* Device
|
|
|
|
|
*/
|
|
|
|
|
static void device_event_info(void *object, const struct pw_device_info *info)
|
|
|
|
|
{
|
|
|
|
|
struct sm_device *device = object;
|
|
|
|
|
struct impl *impl = SPA_CONTAINER_OF(device->obj.session, struct impl, this);
|
2020-07-31 13:59:25 +02:00
|
|
|
uint32_t i;
|
2019-11-28 11:13:53 +01:00
|
|
|
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: device %d info", impl, device->obj.id);
|
2021-09-03 13:26:15 +02:00
|
|
|
info = device->info = pw_device_info_merge(device->info, info, device->obj.changed == 0);
|
2019-11-28 11:13:53 +01:00
|
|
|
|
|
|
|
|
device->obj.avail |= SM_DEVICE_CHANGE_MASK_INFO;
|
|
|
|
|
device->obj.changed |= SM_DEVICE_CHANGE_MASK_INFO;
|
|
|
|
|
|
|
|
|
|
if (info->change_mask & PW_DEVICE_CHANGE_MASK_PARAMS) {
|
2020-07-31 13:59:25 +02:00
|
|
|
for (i = 0; i < info->n_params; i++) {
|
|
|
|
|
uint32_t id = info->params[i].id;
|
|
|
|
|
|
|
|
|
|
if (info->params[i].user == 0)
|
|
|
|
|
continue;
|
|
|
|
|
|
2021-04-25 19:11:43 +03:00
|
|
|
if (id >= SM_MAX_PARAMS) {
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_error("%p: too big param id %d", impl, id);
|
2021-04-25 19:11:43 +03:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-31 13:59:25 +02:00
|
|
|
device->n_params -= clear_params(&device->param_list, id);
|
|
|
|
|
|
|
|
|
|
if (info->params[i].flags & SPA_PARAM_INFO_READ) {
|
2021-04-25 19:11:43 +03:00
|
|
|
int res;
|
|
|
|
|
res = pw_device_enum_params((struct pw_device*)device->obj.proxy,
|
|
|
|
|
++device->param_seq[id], id, 0, UINT32_MAX, NULL);
|
|
|
|
|
if (SPA_RESULT_IS_ASYNC(res))
|
|
|
|
|
device->param_seq[id] = res;
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: device %d enum params %d seq:%d", impl,
|
2021-04-25 19:11:43 +03:00
|
|
|
device->obj.id, id, device->param_seq[id]);
|
2020-07-31 13:59:25 +02:00
|
|
|
}
|
|
|
|
|
info->params[i].user = 0;
|
|
|
|
|
}
|
2019-11-28 11:13:53 +01:00
|
|
|
}
|
2019-12-16 13:37:19 +01:00
|
|
|
sm_object_sync_update(&device->obj);
|
2021-03-08 13:20:02 +01:00
|
|
|
sm_media_session_schedule_rescan(&impl->this);
|
2019-11-28 11:13:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void device_event_param(void *object, int seq,
|
|
|
|
|
uint32_t id, uint32_t index, uint32_t next,
|
|
|
|
|
const struct spa_pod *param)
|
|
|
|
|
{
|
|
|
|
|
struct sm_device *device = object;
|
|
|
|
|
struct impl *impl = SPA_CONTAINER_OF(device->obj.session, struct impl, this);
|
|
|
|
|
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: device %p param %d index:%d seq:%d", impl, device, id, index, seq);
|
2021-04-25 19:11:43 +03:00
|
|
|
if (add_param(&device->param_list, seq, device->param_seq, id, param) != NULL)
|
2019-11-28 11:13:53 +01:00
|
|
|
device->n_params++;
|
|
|
|
|
|
|
|
|
|
device->obj.avail |= SM_DEVICE_CHANGE_MASK_PARAMS;
|
|
|
|
|
device->obj.changed |= SM_DEVICE_CHANGE_MASK_PARAMS;
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-11 15:00:41 +01:00
|
|
|
static const struct pw_device_events device_events = {
|
|
|
|
|
PW_VERSION_DEVICE_EVENTS,
|
2019-11-28 11:13:53 +01:00
|
|
|
.info = device_event_info,
|
|
|
|
|
.param = device_event_param,
|
|
|
|
|
};
|
|
|
|
|
|
2019-11-29 13:21:55 +01:00
|
|
|
static int device_init(void *object)
|
|
|
|
|
{
|
|
|
|
|
struct sm_device *device = object;
|
|
|
|
|
spa_list_init(&device->node_list);
|
|
|
|
|
spa_list_init(&device->param_list);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-28 11:13:53 +01:00
|
|
|
static void device_destroy(void *object)
|
|
|
|
|
{
|
|
|
|
|
struct sm_device *device = object;
|
|
|
|
|
struct sm_node *node;
|
|
|
|
|
|
|
|
|
|
spa_list_consume(node, &device->node_list, link) {
|
|
|
|
|
node->device = NULL;
|
|
|
|
|
spa_list_remove(&node->link);
|
|
|
|
|
}
|
|
|
|
|
clear_params(&device->param_list, SPA_ID_INVALID);
|
|
|
|
|
device->n_params = 0;
|
|
|
|
|
|
|
|
|
|
if (device->info)
|
|
|
|
|
pw_device_info_free(device->info);
|
2019-12-19 13:15:10 +01:00
|
|
|
device->info = NULL;
|
2019-11-28 11:13:53 +01:00
|
|
|
}
|
|
|
|
|
|
2019-11-29 13:21:55 +01:00
|
|
|
static const struct object_info device_info = {
|
|
|
|
|
.type = PW_TYPE_INTERFACE_Device,
|
2019-12-11 15:00:41 +01:00
|
|
|
.version = PW_VERSION_DEVICE,
|
2019-11-29 13:21:55 +01:00
|
|
|
.events = &device_events,
|
|
|
|
|
.size = sizeof(struct sm_device),
|
|
|
|
|
.init = device_init,
|
|
|
|
|
.destroy = device_destroy,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static const struct object_info spa_device_info = {
|
|
|
|
|
.type = SPA_TYPE_INTERFACE_Device,
|
|
|
|
|
.version = SPA_VERSION_DEVICE,
|
|
|
|
|
.size = sizeof(struct sm_device),
|
|
|
|
|
.init = device_init,
|
|
|
|
|
.destroy = device_destroy,
|
|
|
|
|
};
|
|
|
|
|
|
2019-11-14 18:35:29 +01:00
|
|
|
/**
|
|
|
|
|
* Node
|
|
|
|
|
*/
|
|
|
|
|
static void node_event_info(void *object, const struct pw_node_info *info)
|
|
|
|
|
{
|
|
|
|
|
struct sm_node *node = object;
|
|
|
|
|
struct impl *impl = SPA_CONTAINER_OF(node->obj.session, struct impl, this);
|
2019-11-19 16:08:40 +01:00
|
|
|
uint32_t i;
|
2019-11-14 18:35:29 +01:00
|
|
|
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: node %d info", impl, node->obj.id);
|
2021-09-03 13:26:15 +02:00
|
|
|
info = node->info = pw_node_info_merge(node->info, info, node->obj.changed == 0);
|
2019-11-14 18:35:29 +01:00
|
|
|
|
2019-11-27 12:18:40 +01:00
|
|
|
node->obj.avail |= SM_NODE_CHANGE_MASK_INFO;
|
|
|
|
|
node->obj.changed |= SM_NODE_CHANGE_MASK_INFO;
|
2019-11-19 16:08:40 +01:00
|
|
|
|
|
|
|
|
if (info->change_mask & PW_NODE_CHANGE_MASK_PARAMS &&
|
2020-07-31 13:59:25 +02:00
|
|
|
(node->obj.mask & SM_NODE_CHANGE_MASK_PARAMS)) {
|
2019-11-19 16:08:40 +01:00
|
|
|
for (i = 0; i < info->n_params; i++) {
|
2020-07-31 13:59:25 +02:00
|
|
|
uint32_t id = info->params[i].id;
|
|
|
|
|
|
|
|
|
|
if (info->params[i].user == 0)
|
|
|
|
|
continue;
|
|
|
|
|
|
2021-04-25 19:11:43 +03:00
|
|
|
if (id >= SM_MAX_PARAMS) {
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_error("%p: too big param id %d", impl, id);
|
2021-04-25 19:11:43 +03:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-31 13:59:25 +02:00
|
|
|
node->n_params -= clear_params(&node->param_list, id);
|
|
|
|
|
|
|
|
|
|
if (info->params[i].flags & SPA_PARAM_INFO_READ) {
|
2021-04-25 19:11:43 +03:00
|
|
|
int res;
|
|
|
|
|
res = pw_node_enum_params((struct pw_node*)node->obj.proxy,
|
|
|
|
|
++node->param_seq[id], id, 0, UINT32_MAX, NULL);
|
|
|
|
|
if (SPA_RESULT_IS_ASYNC(res))
|
|
|
|
|
node->param_seq[id] = res;
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: node %d enum params %d seq:%d", impl,
|
2021-04-25 19:11:43 +03:00
|
|
|
node->obj.id, id, node->param_seq[id]);
|
2019-11-19 16:08:40 +01:00
|
|
|
}
|
2020-07-31 13:59:25 +02:00
|
|
|
info->params[i].user = 0;
|
2019-11-19 16:08:40 +01:00
|
|
|
}
|
|
|
|
|
}
|
2019-12-16 13:37:19 +01:00
|
|
|
sm_object_sync_update(&node->obj);
|
2021-03-08 13:20:02 +01:00
|
|
|
sm_media_session_schedule_rescan(&impl->this);
|
2019-11-19 16:08:40 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void node_event_param(void *object, int seq,
|
|
|
|
|
uint32_t id, uint32_t index, uint32_t next,
|
|
|
|
|
const struct spa_pod *param)
|
|
|
|
|
{
|
|
|
|
|
struct sm_node *node = object;
|
|
|
|
|
struct impl *impl = SPA_CONTAINER_OF(node->obj.session, struct impl, this);
|
|
|
|
|
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: node %p param %d index:%d seq:%d", impl, node, id, index, seq);
|
2021-04-25 19:11:43 +03:00
|
|
|
if (add_param(&node->param_list, seq, node->param_seq, id, param) != NULL)
|
2019-11-27 12:19:09 +01:00
|
|
|
node->n_params++;
|
2019-11-19 16:08:40 +01:00
|
|
|
|
2019-11-27 12:18:40 +01:00
|
|
|
node->obj.avail |= SM_NODE_CHANGE_MASK_PARAMS;
|
|
|
|
|
node->obj.changed |= SM_NODE_CHANGE_MASK_PARAMS;
|
2019-11-14 18:35:29 +01:00
|
|
|
}
|
|
|
|
|
|
2019-12-11 15:26:11 +01:00
|
|
|
static const struct pw_node_events node_events = {
|
|
|
|
|
PW_VERSION_NODE_EVENTS,
|
2019-11-14 18:35:29 +01:00
|
|
|
.info = node_event_info,
|
2019-11-19 16:08:40 +01:00
|
|
|
.param = node_event_param,
|
2019-11-14 18:35:29 +01:00
|
|
|
};
|
|
|
|
|
|
2019-11-29 13:21:55 +01:00
|
|
|
static int node_init(void *object)
|
|
|
|
|
{
|
|
|
|
|
struct sm_node *node = object;
|
|
|
|
|
struct impl *impl = SPA_CONTAINER_OF(node->obj.session, struct impl, this);
|
|
|
|
|
struct pw_properties *props = node->obj.props;
|
|
|
|
|
|
|
|
|
|
spa_list_init(&node->port_list);
|
|
|
|
|
spa_list_init(&node->param_list);
|
|
|
|
|
|
|
|
|
|
if (props) {
|
2021-10-12 14:17:16 +10:00
|
|
|
uint32_t id = SPA_ID_INVALID;
|
|
|
|
|
|
|
|
|
|
if (pw_properties_fetch_uint32(props, PW_KEY_DEVICE_ID, &id) == 0)
|
|
|
|
|
node->device = find_object(impl, id, NULL);
|
|
|
|
|
pw_log_debug("%p: node %d parent device %d (%p)", impl,
|
|
|
|
|
node->obj.id, id, node->device);
|
2019-11-29 13:21:55 +01:00
|
|
|
if (node->device) {
|
|
|
|
|
spa_list_append(&node->device->node_list, &node->link);
|
2019-12-16 13:37:19 +01:00
|
|
|
node->device->obj.avail |= SM_DEVICE_CHANGE_MASK_NODES;
|
2019-11-29 13:21:55 +01:00
|
|
|
node->device->obj.changed |= SM_DEVICE_CHANGE_MASK_NODES;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-14 18:35:29 +01:00
|
|
|
static void node_destroy(void *object)
|
|
|
|
|
{
|
|
|
|
|
struct sm_node *node = object;
|
|
|
|
|
struct sm_port *port;
|
|
|
|
|
|
|
|
|
|
spa_list_consume(port, &node->port_list, link) {
|
|
|
|
|
port->node = NULL;
|
|
|
|
|
spa_list_remove(&port->link);
|
|
|
|
|
}
|
2019-11-19 16:08:40 +01:00
|
|
|
clear_params(&node->param_list, SPA_ID_INVALID);
|
2019-11-27 12:19:09 +01:00
|
|
|
node->n_params = 0;
|
2019-11-19 16:08:40 +01:00
|
|
|
|
2019-11-28 11:13:53 +01:00
|
|
|
if (node->device) {
|
|
|
|
|
spa_list_remove(&node->link);
|
|
|
|
|
node->device->obj.changed |= SM_DEVICE_CHANGE_MASK_NODES;
|
|
|
|
|
}
|
2020-01-28 15:37:51 +01:00
|
|
|
if (node->info) {
|
2019-11-14 18:35:29 +01:00
|
|
|
pw_node_info_free(node->info);
|
2020-01-28 15:37:51 +01:00
|
|
|
node->info = NULL;
|
|
|
|
|
}
|
2020-10-14 12:35:27 +02:00
|
|
|
free(node->target_node);
|
|
|
|
|
node->target_node = NULL;
|
2019-11-14 18:35:29 +01:00
|
|
|
}
|
|
|
|
|
|
2019-11-29 13:21:55 +01:00
|
|
|
static const struct object_info node_info = {
|
|
|
|
|
.type = PW_TYPE_INTERFACE_Node,
|
2019-12-11 15:26:11 +01:00
|
|
|
.version = PW_VERSION_NODE,
|
2019-11-29 13:21:55 +01:00
|
|
|
.events = &node_events,
|
|
|
|
|
.size = sizeof(struct sm_node),
|
|
|
|
|
.init = node_init,
|
|
|
|
|
.destroy = node_destroy,
|
|
|
|
|
};
|
|
|
|
|
|
2019-11-14 18:35:29 +01:00
|
|
|
/**
|
|
|
|
|
* Port
|
|
|
|
|
*/
|
|
|
|
|
static void port_event_info(void *object, const struct pw_port_info *info)
|
|
|
|
|
{
|
|
|
|
|
struct sm_port *port = object;
|
|
|
|
|
struct impl *impl = SPA_CONTAINER_OF(port->obj.session, struct impl, this);
|
|
|
|
|
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: port %d info", impl, port->obj.id);
|
2021-09-03 13:26:15 +02:00
|
|
|
port->info = pw_port_info_merge(port->info, info, port->obj.changed == 0);
|
2019-11-14 18:35:29 +01:00
|
|
|
|
2019-11-27 12:18:40 +01:00
|
|
|
port->obj.avail |= SM_PORT_CHANGE_MASK_INFO;
|
|
|
|
|
port->obj.changed |= SM_PORT_CHANGE_MASK_INFO;
|
2019-12-16 13:37:19 +01:00
|
|
|
sm_object_sync_update(&port->obj);
|
2019-11-14 18:35:29 +01:00
|
|
|
}
|
|
|
|
|
|
2019-12-11 15:59:26 +01:00
|
|
|
static const struct pw_port_events port_events = {
|
|
|
|
|
PW_VERSION_PORT_EVENTS,
|
2019-11-14 18:35:29 +01:00
|
|
|
.info = port_event_info,
|
|
|
|
|
};
|
|
|
|
|
|
2020-07-22 11:27:23 +02:00
|
|
|
static enum spa_audio_channel find_channel(const char *name)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
for (i = 0; spa_type_audio_channel[i].name; i++) {
|
2021-05-18 11:36:13 +10:00
|
|
|
if (spa_streq(name, spa_debug_type_short_name(spa_type_audio_channel[i].name)))
|
2020-07-22 11:27:23 +02:00
|
|
|
return spa_type_audio_channel[i].type;
|
|
|
|
|
}
|
|
|
|
|
return SPA_AUDIO_CHANNEL_UNKNOWN;
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-29 13:21:55 +01:00
|
|
|
static int port_init(void *object)
|
|
|
|
|
{
|
|
|
|
|
struct sm_port *port = object;
|
|
|
|
|
struct impl *impl = SPA_CONTAINER_OF(port->obj.session, struct impl, this);
|
|
|
|
|
struct pw_properties *props = port->obj.props;
|
|
|
|
|
const char *str;
|
|
|
|
|
|
|
|
|
|
if (props) {
|
2021-10-12 14:17:16 +10:00
|
|
|
uint32_t id = SPA_ID_INVALID;
|
|
|
|
|
|
2019-11-29 13:21:55 +01:00
|
|
|
if ((str = pw_properties_get(props, PW_KEY_PORT_DIRECTION)) != NULL)
|
2021-05-18 11:36:13 +10:00
|
|
|
port->direction = spa_streq(str, "out") ?
|
2019-11-29 13:21:55 +01:00
|
|
|
PW_DIRECTION_OUTPUT : PW_DIRECTION_INPUT;
|
2020-07-22 11:27:23 +02:00
|
|
|
if ((str = pw_properties_get(props, PW_KEY_FORMAT_DSP)) != NULL) {
|
2021-05-18 11:36:13 +10:00
|
|
|
if (spa_streq(str, "32 bit float mono audio"))
|
2020-07-22 11:27:23 +02:00
|
|
|
port->type = SM_PORT_TYPE_DSP_AUDIO;
|
2021-05-18 11:36:13 +10:00
|
|
|
else if (spa_streq(str, "8 bit raw midi"))
|
2020-07-22 11:27:23 +02:00
|
|
|
port->type = SM_PORT_TYPE_DSP_MIDI;
|
|
|
|
|
}
|
|
|
|
|
if ((str = pw_properties_get(props, PW_KEY_AUDIO_CHANNEL)) != NULL)
|
|
|
|
|
port->channel = find_channel(str);
|
2021-10-12 14:17:16 +10:00
|
|
|
if (pw_properties_fetch_uint32(props, PW_KEY_NODE_ID, &id) == 0)
|
|
|
|
|
port->node = find_object(impl, id, PW_TYPE_INTERFACE_Node);
|
2019-11-29 13:21:55 +01:00
|
|
|
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: port %d parent node %s (%p) direction:%d type:%d", impl,
|
2020-07-22 11:27:23 +02:00
|
|
|
port->obj.id, str, port->node, port->direction, port->type);
|
2019-11-29 13:21:55 +01:00
|
|
|
if (port->node) {
|
|
|
|
|
spa_list_append(&port->node->port_list, &port->link);
|
2019-12-16 13:37:19 +01:00
|
|
|
port->node->obj.avail |= SM_NODE_CHANGE_MASK_PORTS;
|
2019-11-29 13:21:55 +01:00
|
|
|
port->node->obj.changed |= SM_NODE_CHANGE_MASK_PORTS;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-14 18:35:29 +01:00
|
|
|
static void port_destroy(void *object)
|
|
|
|
|
{
|
|
|
|
|
struct sm_port *port = object;
|
|
|
|
|
if (port->info)
|
|
|
|
|
pw_port_info_free(port->info);
|
|
|
|
|
if (port->node) {
|
|
|
|
|
spa_list_remove(&port->link);
|
2019-11-27 12:18:40 +01:00
|
|
|
port->node->obj.changed |= SM_NODE_CHANGE_MASK_PORTS;
|
2019-11-14 18:35:29 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-29 13:21:55 +01:00
|
|
|
static const struct object_info port_info = {
|
|
|
|
|
.type = PW_TYPE_INTERFACE_Port,
|
2019-12-11 15:59:26 +01:00
|
|
|
.version = PW_VERSION_PORT,
|
2019-11-29 13:21:55 +01:00
|
|
|
.events = &port_events,
|
|
|
|
|
.size = sizeof(struct sm_port),
|
|
|
|
|
.init = port_init,
|
|
|
|
|
.destroy = port_destroy,
|
|
|
|
|
};
|
|
|
|
|
|
2019-11-20 16:18:46 +01:00
|
|
|
/**
|
|
|
|
|
* Session
|
|
|
|
|
*/
|
|
|
|
|
static void session_event_info(void *object, const struct pw_session_info *info)
|
|
|
|
|
{
|
|
|
|
|
struct sm_session *sess = object;
|
|
|
|
|
struct impl *impl = SPA_CONTAINER_OF(sess->obj.session, struct impl, this);
|
|
|
|
|
struct pw_session_info *i = sess->info;
|
|
|
|
|
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: session %d info", impl, sess->obj.id);
|
2019-11-20 16:18:46 +01:00
|
|
|
if (i == NULL && info) {
|
|
|
|
|
i = sess->info = calloc(1, sizeof(struct pw_session_info));
|
|
|
|
|
i->version = PW_VERSION_SESSION_INFO;
|
|
|
|
|
i->id = info->id;
|
|
|
|
|
}
|
2020-11-23 03:56:57 -05:00
|
|
|
if (info) {
|
|
|
|
|
i->change_mask = info->change_mask;
|
|
|
|
|
if (info->change_mask & PW_SESSION_CHANGE_MASK_PROPS) {
|
2021-06-01 11:21:17 +10:00
|
|
|
pw_properties_free ((struct pw_properties *)i->props);
|
2020-11-23 03:56:57 -05:00
|
|
|
i->props = (struct spa_dict *) pw_properties_new_dict (info->props);
|
|
|
|
|
}
|
2019-11-20 16:18:46 +01:00
|
|
|
}
|
|
|
|
|
|
2019-11-27 12:18:40 +01:00
|
|
|
sess->obj.avail |= SM_SESSION_CHANGE_MASK_INFO;
|
|
|
|
|
sess->obj.changed |= SM_SESSION_CHANGE_MASK_INFO;
|
2019-12-16 13:37:19 +01:00
|
|
|
sm_object_sync_update(&sess->obj);
|
2019-11-20 16:18:46 +01:00
|
|
|
}
|
|
|
|
|
|
2019-12-11 16:39:24 +01:00
|
|
|
static const struct pw_session_events session_events = {
|
|
|
|
|
PW_VERSION_SESSION_EVENTS,
|
2019-11-20 16:18:46 +01:00
|
|
|
.info = session_event_info,
|
|
|
|
|
};
|
|
|
|
|
|
2019-11-29 13:21:55 +01:00
|
|
|
static int session_init(void *object)
|
|
|
|
|
{
|
|
|
|
|
struct sm_session *sess = object;
|
|
|
|
|
struct impl *impl = SPA_CONTAINER_OF(sess->obj.session, struct impl, this);
|
|
|
|
|
|
2020-01-07 16:07:51 +01:00
|
|
|
if (sess->obj.id == impl->this.session_id)
|
2019-11-29 13:21:55 +01:00
|
|
|
impl->this.session = sess;
|
|
|
|
|
|
|
|
|
|
spa_list_init(&sess->endpoint_list);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-20 16:18:46 +01:00
|
|
|
static void session_destroy(void *object)
|
|
|
|
|
{
|
|
|
|
|
struct sm_session *sess = object;
|
|
|
|
|
struct sm_endpoint *endpoint;
|
2019-12-18 12:15:03 +01:00
|
|
|
struct pw_session_info *i = sess->info;
|
2019-11-20 16:18:46 +01:00
|
|
|
|
|
|
|
|
spa_list_consume(endpoint, &sess->endpoint_list, link) {
|
|
|
|
|
endpoint->session = NULL;
|
|
|
|
|
spa_list_remove(&endpoint->link);
|
|
|
|
|
}
|
2019-12-18 12:15:03 +01:00
|
|
|
if (i) {
|
2021-06-01 11:21:17 +10:00
|
|
|
pw_properties_free ((struct pw_properties *)i->props);
|
2019-12-18 12:15:03 +01:00
|
|
|
free(i);
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-20 16:18:46 +01:00
|
|
|
}
|
|
|
|
|
|
2019-11-29 13:21:55 +01:00
|
|
|
static const struct object_info session_info = {
|
|
|
|
|
.type = PW_TYPE_INTERFACE_Session,
|
2019-12-11 16:39:24 +01:00
|
|
|
.version = PW_VERSION_SESSION,
|
2019-11-29 13:21:55 +01:00
|
|
|
.events = &session_events,
|
|
|
|
|
.size = sizeof(struct sm_session),
|
|
|
|
|
.init = session_init,
|
|
|
|
|
.destroy = session_destroy,
|
|
|
|
|
};
|
|
|
|
|
|
2019-11-14 18:35:29 +01:00
|
|
|
/**
|
|
|
|
|
* Endpoint
|
|
|
|
|
*/
|
|
|
|
|
static void endpoint_event_info(void *object, const struct pw_endpoint_info *info)
|
|
|
|
|
{
|
|
|
|
|
struct sm_endpoint *endpoint = object;
|
|
|
|
|
struct impl *impl = SPA_CONTAINER_OF(endpoint->obj.session, struct impl, this);
|
|
|
|
|
struct pw_endpoint_info *i = endpoint->info;
|
|
|
|
|
const char *str;
|
|
|
|
|
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: endpoint %d info", impl, endpoint->obj.id);
|
2019-11-14 18:35:29 +01:00
|
|
|
if (i == NULL && info) {
|
2019-11-15 17:13:45 +01:00
|
|
|
i = endpoint->info = calloc(1, sizeof(struct pw_endpoint_info));
|
|
|
|
|
i->id = info->id;
|
|
|
|
|
i->name = info->name ? strdup(info->name) : NULL;
|
|
|
|
|
i->media_class = info->media_class ? strdup(info->media_class) : NULL;
|
|
|
|
|
i->direction = info->direction;
|
|
|
|
|
i->flags = info->flags;
|
2019-11-14 18:35:29 +01:00
|
|
|
}
|
2020-11-23 03:56:57 -05:00
|
|
|
if (info) {
|
|
|
|
|
i->change_mask = info->change_mask;
|
|
|
|
|
if (info->change_mask & PW_ENDPOINT_CHANGE_MASK_SESSION) {
|
|
|
|
|
i->session_id = info->session_id;
|
|
|
|
|
}
|
|
|
|
|
if (info->change_mask & PW_ENDPOINT_CHANGE_MASK_PROPS) {
|
2021-06-01 11:21:17 +10:00
|
|
|
pw_properties_free ((struct pw_properties *)i->props);
|
2020-11-23 03:56:57 -05:00
|
|
|
i->props = (struct spa_dict *) pw_properties_new_dict (info->props);
|
|
|
|
|
if ((str = spa_dict_lookup(i->props, PW_KEY_PRIORITY_SESSION)) != NULL)
|
|
|
|
|
endpoint->priority = pw_properties_parse_int(str);
|
|
|
|
|
}
|
2019-11-14 18:35:29 +01:00
|
|
|
}
|
|
|
|
|
|
2019-11-27 12:18:40 +01:00
|
|
|
endpoint->obj.avail |= SM_ENDPOINT_CHANGE_MASK_INFO;
|
|
|
|
|
endpoint->obj.changed |= SM_ENDPOINT_CHANGE_MASK_INFO;
|
2019-12-16 13:37:19 +01:00
|
|
|
sm_object_sync_update(&endpoint->obj);
|
2019-11-14 18:35:29 +01:00
|
|
|
}
|
|
|
|
|
|
2019-12-11 16:43:53 +01:00
|
|
|
static const struct pw_endpoint_events endpoint_events = {
|
|
|
|
|
PW_VERSION_ENDPOINT_EVENTS,
|
2019-11-14 18:35:29 +01:00
|
|
|
.info = endpoint_event_info,
|
|
|
|
|
};
|
|
|
|
|
|
2019-11-29 13:21:55 +01:00
|
|
|
static int endpoint_init(void *object)
|
|
|
|
|
{
|
|
|
|
|
struct sm_endpoint *endpoint = object;
|
|
|
|
|
struct impl *impl = SPA_CONTAINER_OF(endpoint->obj.session, struct impl, this);
|
|
|
|
|
struct pw_properties *props = endpoint->obj.props;
|
|
|
|
|
|
|
|
|
|
if (props) {
|
2021-10-12 14:17:16 +10:00
|
|
|
uint32_t id = SPA_ID_INVALID;
|
|
|
|
|
|
|
|
|
|
if (pw_properties_fetch_uint32(props, PW_KEY_SESSION_ID, &id) == 0)
|
|
|
|
|
endpoint->session = find_object(impl, id, PW_TYPE_INTERFACE_Session);
|
|
|
|
|
pw_log_debug("%p: endpoint %d parent session %d", impl,
|
|
|
|
|
endpoint->obj.id, id);
|
2019-11-29 13:21:55 +01:00
|
|
|
if (endpoint->session) {
|
|
|
|
|
spa_list_append(&endpoint->session->endpoint_list, &endpoint->link);
|
2019-12-16 13:37:19 +01:00
|
|
|
endpoint->session->obj.avail |= SM_SESSION_CHANGE_MASK_ENDPOINTS;
|
2019-11-29 13:21:55 +01:00
|
|
|
endpoint->session->obj.changed |= SM_SESSION_CHANGE_MASK_ENDPOINTS;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
spa_list_init(&endpoint->stream_list);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-14 18:35:29 +01:00
|
|
|
static void endpoint_destroy(void *object)
|
|
|
|
|
{
|
|
|
|
|
struct sm_endpoint *endpoint = object;
|
|
|
|
|
struct sm_endpoint_stream *stream;
|
2019-12-18 12:15:03 +01:00
|
|
|
struct pw_endpoint_info *i = endpoint->info;
|
2019-11-14 18:35:29 +01:00
|
|
|
|
|
|
|
|
spa_list_consume(stream, &endpoint->stream_list, link) {
|
|
|
|
|
stream->endpoint = NULL;
|
|
|
|
|
spa_list_remove(&stream->link);
|
|
|
|
|
}
|
2019-11-20 16:18:46 +01:00
|
|
|
if (endpoint->session) {
|
|
|
|
|
endpoint->session = NULL;
|
|
|
|
|
spa_list_remove(&endpoint->link);
|
|
|
|
|
}
|
2019-12-18 12:15:03 +01:00
|
|
|
if (i) {
|
2021-06-01 11:21:17 +10:00
|
|
|
pw_properties_free ((struct pw_properties *)i->props);
|
2019-12-18 12:15:03 +01:00
|
|
|
free(i->name);
|
|
|
|
|
free(i->media_class);
|
|
|
|
|
free(i);
|
|
|
|
|
}
|
2019-11-14 18:35:29 +01:00
|
|
|
}
|
|
|
|
|
|
2019-11-29 13:21:55 +01:00
|
|
|
static const struct object_info endpoint_info = {
|
|
|
|
|
.type = PW_TYPE_INTERFACE_Endpoint,
|
2019-12-11 16:43:53 +01:00
|
|
|
.version = PW_VERSION_ENDPOINT,
|
2019-11-29 13:21:55 +01:00
|
|
|
.events = &endpoint_events,
|
|
|
|
|
.size = sizeof(struct sm_endpoint),
|
|
|
|
|
.init = endpoint_init,
|
|
|
|
|
.destroy = endpoint_destroy,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
2019-11-14 18:35:29 +01:00
|
|
|
/**
|
|
|
|
|
* Endpoint Stream
|
|
|
|
|
*/
|
|
|
|
|
static void endpoint_stream_event_info(void *object, const struct pw_endpoint_stream_info *info)
|
|
|
|
|
{
|
|
|
|
|
struct sm_endpoint_stream *stream = object;
|
|
|
|
|
struct impl *impl = SPA_CONTAINER_OF(stream->obj.session, struct impl, this);
|
|
|
|
|
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: endpoint stream %d info", impl, stream->obj.id);
|
2019-11-14 18:35:29 +01:00
|
|
|
if (stream->info == NULL && info) {
|
2019-11-15 17:13:45 +01:00
|
|
|
stream->info = calloc(1, sizeof(struct pw_endpoint_stream_info));
|
|
|
|
|
stream->info->version = PW_VERSION_ENDPOINT_STREAM_INFO;
|
|
|
|
|
stream->info->id = info->id;
|
|
|
|
|
stream->info->endpoint_id = info->endpoint_id;
|
|
|
|
|
stream->info->name = info->name ? strdup(info->name) : NULL;
|
2019-11-14 18:35:29 +01:00
|
|
|
}
|
2020-11-23 03:56:57 -05:00
|
|
|
if (info) {
|
|
|
|
|
stream->info->change_mask = info->change_mask;
|
|
|
|
|
}
|
2019-11-14 18:35:29 +01:00
|
|
|
|
2019-11-27 12:18:40 +01:00
|
|
|
stream->obj.avail |= SM_ENDPOINT_CHANGE_MASK_INFO;
|
|
|
|
|
stream->obj.changed |= SM_ENDPOINT_CHANGE_MASK_INFO;
|
2019-12-16 13:37:19 +01:00
|
|
|
sm_object_sync_update(&stream->obj);
|
2019-11-14 18:35:29 +01:00
|
|
|
}
|
|
|
|
|
|
2019-12-11 16:48:00 +01:00
|
|
|
static const struct pw_endpoint_stream_events endpoint_stream_events = {
|
|
|
|
|
PW_VERSION_ENDPOINT_STREAM_EVENTS,
|
2019-11-14 18:35:29 +01:00
|
|
|
.info = endpoint_stream_event_info,
|
|
|
|
|
};
|
|
|
|
|
|
2019-11-29 13:21:55 +01:00
|
|
|
static int endpoint_stream_init(void *object)
|
|
|
|
|
{
|
|
|
|
|
struct sm_endpoint_stream *stream = object;
|
|
|
|
|
struct impl *impl = SPA_CONTAINER_OF(stream->obj.session, struct impl, this);
|
|
|
|
|
struct pw_properties *props = stream->obj.props;
|
|
|
|
|
|
|
|
|
|
if (props) {
|
2021-10-12 14:17:16 +10:00
|
|
|
uint32_t id = SPA_ID_INVALID;
|
|
|
|
|
|
|
|
|
|
if (pw_properties_fetch_uint32(props, PW_KEY_ENDPOINT_ID, &id) == 0)
|
|
|
|
|
stream->endpoint = find_object(impl, id, PW_TYPE_INTERFACE_Endpoint);
|
|
|
|
|
pw_log_debug("%p: stream %d parent endpoint %d", impl,
|
|
|
|
|
stream->obj.id, id);
|
2019-11-29 13:21:55 +01:00
|
|
|
if (stream->endpoint) {
|
|
|
|
|
spa_list_append(&stream->endpoint->stream_list, &stream->link);
|
2019-12-16 13:37:19 +01:00
|
|
|
stream->endpoint->obj.avail |= SM_ENDPOINT_CHANGE_MASK_STREAMS;
|
2019-11-29 13:21:55 +01:00
|
|
|
stream->endpoint->obj.changed |= SM_ENDPOINT_CHANGE_MASK_STREAMS;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
spa_list_init(&stream->link_list);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-14 18:35:29 +01:00
|
|
|
static void endpoint_stream_destroy(void *object)
|
|
|
|
|
{
|
|
|
|
|
struct sm_endpoint_stream *stream = object;
|
|
|
|
|
|
|
|
|
|
if (stream->info) {
|
|
|
|
|
free(stream->info->name);
|
|
|
|
|
free(stream->info);
|
|
|
|
|
}
|
|
|
|
|
if (stream->endpoint) {
|
|
|
|
|
stream->endpoint = NULL;
|
|
|
|
|
spa_list_remove(&stream->link);
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-11-29 13:21:55 +01:00
|
|
|
|
|
|
|
|
static const struct object_info endpoint_stream_info = {
|
|
|
|
|
.type = PW_TYPE_INTERFACE_EndpointStream,
|
2019-12-11 16:48:00 +01:00
|
|
|
.version = PW_VERSION_ENDPOINT_STREAM,
|
2019-11-29 13:21:55 +01:00
|
|
|
.events = &endpoint_stream_events,
|
|
|
|
|
.size = sizeof(struct sm_endpoint_stream),
|
|
|
|
|
.init = endpoint_stream_init,
|
|
|
|
|
.destroy = endpoint_stream_destroy,
|
|
|
|
|
};
|
|
|
|
|
|
2019-11-15 17:13:45 +01:00
|
|
|
/**
|
|
|
|
|
* Endpoint Link
|
|
|
|
|
*/
|
|
|
|
|
static void endpoint_link_event_info(void *object, const struct pw_endpoint_link_info *info)
|
|
|
|
|
{
|
|
|
|
|
struct sm_endpoint_link *link = object;
|
|
|
|
|
struct impl *impl = SPA_CONTAINER_OF(link->obj.session, struct impl, this);
|
|
|
|
|
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: endpoint link %d info", impl, link->obj.id);
|
2019-11-15 17:13:45 +01:00
|
|
|
if (link->info == NULL && info) {
|
|
|
|
|
link->info = calloc(1, sizeof(struct pw_endpoint_link_info));
|
|
|
|
|
link->info->version = PW_VERSION_ENDPOINT_LINK_INFO;
|
|
|
|
|
link->info->id = info->id;
|
|
|
|
|
link->info->session_id = info->session_id;
|
|
|
|
|
link->info->output_endpoint_id = info->output_endpoint_id;
|
|
|
|
|
link->info->output_stream_id = info->output_stream_id;
|
|
|
|
|
link->info->input_endpoint_id = info->input_endpoint_id;
|
|
|
|
|
link->info->input_stream_id = info->input_stream_id;
|
|
|
|
|
}
|
2020-11-23 03:56:57 -05:00
|
|
|
if (info) {
|
|
|
|
|
link->info->change_mask = info->change_mask;
|
|
|
|
|
}
|
2019-11-15 17:13:45 +01:00
|
|
|
|
2019-11-27 12:18:40 +01:00
|
|
|
link->obj.avail |= SM_ENDPOINT_LINK_CHANGE_MASK_INFO;
|
|
|
|
|
link->obj.changed |= SM_ENDPOINT_LINK_CHANGE_MASK_INFO;
|
2019-12-16 13:37:19 +01:00
|
|
|
sm_object_sync_update(&link->obj);
|
2019-11-15 17:13:45 +01:00
|
|
|
}
|
|
|
|
|
|
2019-12-11 16:36:12 +01:00
|
|
|
static const struct pw_endpoint_link_events endpoint_link_events = {
|
|
|
|
|
PW_VERSION_ENDPOINT_LINK_EVENTS,
|
2019-11-15 17:13:45 +01:00
|
|
|
.info = endpoint_link_event_info,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static void endpoint_link_destroy(void *object)
|
|
|
|
|
{
|
|
|
|
|
struct sm_endpoint_link *link = object;
|
|
|
|
|
|
|
|
|
|
if (link->info) {
|
|
|
|
|
free(link->info->error);
|
|
|
|
|
free(link->info);
|
|
|
|
|
}
|
|
|
|
|
if (link->output) {
|
|
|
|
|
link->output = NULL;
|
|
|
|
|
spa_list_remove(&link->output_link);
|
|
|
|
|
}
|
|
|
|
|
if (link->input) {
|
|
|
|
|
link->input = NULL;
|
|
|
|
|
spa_list_remove(&link->input_link);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-29 13:21:55 +01:00
|
|
|
static const struct object_info endpoint_link_info = {
|
|
|
|
|
.type = PW_TYPE_INTERFACE_EndpointLink,
|
2019-12-11 16:48:00 +01:00
|
|
|
.version = PW_VERSION_ENDPOINT_LINK,
|
2019-11-29 13:21:55 +01:00
|
|
|
.events = &endpoint_link_events,
|
|
|
|
|
.size = sizeof(struct sm_endpoint_link),
|
|
|
|
|
.init = NULL,
|
|
|
|
|
.destroy = endpoint_link_destroy,
|
|
|
|
|
};
|
|
|
|
|
|
2019-11-14 18:35:29 +01:00
|
|
|
/**
|
|
|
|
|
* Proxy
|
|
|
|
|
*/
|
2019-11-28 11:13:53 +01:00
|
|
|
static void done_proxy(void *data, int seq)
|
|
|
|
|
{
|
|
|
|
|
struct sm_object *obj = data;
|
2019-12-16 13:37:19 +01:00
|
|
|
|
|
|
|
|
pw_log_debug("done %p proxy %p avail:%08x update:%08x %d/%d", obj,
|
|
|
|
|
obj->proxy, obj->avail, obj->changed, obj->pending, seq);
|
|
|
|
|
|
|
|
|
|
if (obj->pending == seq) {
|
|
|
|
|
obj->pending = SPA_ID_INVALID;
|
|
|
|
|
if (obj->changed)
|
|
|
|
|
sm_object_emit_update(obj);
|
|
|
|
|
obj->changed = 0;
|
2019-11-28 11:13:53 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
static const struct pw_proxy_events proxy_events = {
|
|
|
|
|
PW_VERSION_PROXY_EVENTS,
|
|
|
|
|
.done = done_proxy,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static void bound_handle(void *data, uint32_t id)
|
2019-11-29 13:21:55 +01:00
|
|
|
{
|
|
|
|
|
struct sm_object *obj = data;
|
|
|
|
|
struct impl *impl = SPA_CONTAINER_OF(obj->session, struct impl, this);
|
|
|
|
|
|
2020-01-14 16:38:40 +01:00
|
|
|
pw_log_debug("bound %p proxy %p handle %p id:%d->%d",
|
|
|
|
|
obj, obj->proxy, obj->handle, obj->id, id);
|
2019-11-29 13:21:55 +01:00
|
|
|
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
if (obj->id == SPA_ID_INVALID) {
|
|
|
|
|
struct sm_object *old_obj = find_object(impl, id, NULL);
|
|
|
|
|
|
|
|
|
|
if (old_obj != NULL) {
|
|
|
|
|
/*
|
|
|
|
|
* Monitor core is always more up-to-date in object creation
|
|
|
|
|
* events (see registry_global), so in case of duplicate objects
|
|
|
|
|
* we should prefer monitor globals.
|
|
|
|
|
*/
|
|
|
|
|
if (obj->monitor_global)
|
|
|
|
|
sm_object_destroy_maybe_free(old_obj);
|
|
|
|
|
else {
|
|
|
|
|
sm_object_destroy_maybe_free(obj);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-18 12:15:03 +01:00
|
|
|
add_object(impl, obj, id);
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
}
|
2019-11-29 13:21:55 +01:00
|
|
|
}
|
|
|
|
|
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
static const struct pw_proxy_events handle_events = {
|
2019-12-19 13:39:05 +01:00
|
|
|
PW_VERSION_PROXY_EVENTS,
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
.bound = bound_handle,
|
2019-11-14 18:35:29 +01:00
|
|
|
};
|
|
|
|
|
|
2019-12-16 13:37:19 +01:00
|
|
|
int sm_object_sync_update(struct sm_object *obj)
|
|
|
|
|
{
|
|
|
|
|
obj->pending = pw_proxy_sync(obj->proxy, 1);
|
|
|
|
|
pw_log_debug("sync %p proxy %p %d", obj, obj->proxy, obj->pending);
|
|
|
|
|
return obj->pending;
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-19 13:15:10 +01:00
|
|
|
static const struct object_info *get_object_info(struct impl *impl, const char *type)
|
2019-11-14 18:35:29 +01:00
|
|
|
{
|
2019-11-29 13:21:55 +01:00
|
|
|
const struct object_info *info;
|
2019-12-19 13:15:10 +01:00
|
|
|
|
2021-05-18 11:36:13 +10:00
|
|
|
if (spa_streq(type, PW_TYPE_INTERFACE_Core))
|
2020-07-16 17:48:00 +02:00
|
|
|
info = &core_object_info;
|
2021-05-18 11:36:13 +10:00
|
|
|
else if (spa_streq(type, PW_TYPE_INTERFACE_Module))
|
2020-07-16 17:48:00 +02:00
|
|
|
info = &module_info;
|
2021-05-18 11:36:13 +10:00
|
|
|
else if (spa_streq(type, PW_TYPE_INTERFACE_Factory))
|
2020-07-16 17:48:00 +02:00
|
|
|
info = &factory_info;
|
2021-05-18 11:36:13 +10:00
|
|
|
else if (spa_streq(type, PW_TYPE_INTERFACE_Client))
|
2019-11-29 13:21:55 +01:00
|
|
|
info = &client_info;
|
2021-05-18 11:36:13 +10:00
|
|
|
else if (spa_streq(type, SPA_TYPE_INTERFACE_Device))
|
2019-11-29 13:21:55 +01:00
|
|
|
info = &spa_device_info;
|
2021-05-18 11:36:13 +10:00
|
|
|
else if (spa_streq(type, PW_TYPE_INTERFACE_Device))
|
2019-11-29 13:21:55 +01:00
|
|
|
info = &device_info;
|
2021-05-18 11:36:13 +10:00
|
|
|
else if (spa_streq(type, PW_TYPE_INTERFACE_Node))
|
2019-11-29 13:21:55 +01:00
|
|
|
info = &node_info;
|
2021-05-18 11:36:13 +10:00
|
|
|
else if (spa_streq(type, PW_TYPE_INTERFACE_Port))
|
2019-11-29 13:21:55 +01:00
|
|
|
info = &port_info;
|
2021-05-18 11:36:13 +10:00
|
|
|
else if (spa_streq(type, PW_TYPE_INTERFACE_Session))
|
2019-11-29 13:21:55 +01:00
|
|
|
info = &session_info;
|
2021-05-18 11:36:13 +10:00
|
|
|
else if (spa_streq(type, PW_TYPE_INTERFACE_Endpoint))
|
2019-11-29 13:21:55 +01:00
|
|
|
info = &endpoint_info;
|
2021-05-18 11:36:13 +10:00
|
|
|
else if (spa_streq(type, PW_TYPE_INTERFACE_EndpointStream))
|
2019-11-29 13:21:55 +01:00
|
|
|
info = &endpoint_stream_info;
|
2021-05-18 11:36:13 +10:00
|
|
|
else if (spa_streq(type, PW_TYPE_INTERFACE_EndpointLink))
|
2019-11-29 13:21:55 +01:00
|
|
|
info = &endpoint_link_info;
|
2019-12-19 13:15:10 +01:00
|
|
|
else
|
2019-11-29 13:21:55 +01:00
|
|
|
info = NULL;
|
2019-12-19 13:15:10 +01:00
|
|
|
|
2019-11-29 13:21:55 +01:00
|
|
|
return info;
|
|
|
|
|
}
|
2019-11-14 18:35:29 +01:00
|
|
|
|
2019-11-29 13:21:55 +01:00
|
|
|
static struct sm_object *init_object(struct impl *impl, const struct object_info *info,
|
2020-01-07 16:07:51 +01:00
|
|
|
struct pw_proxy *proxy, struct pw_proxy *handle, uint32_t id,
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
const struct spa_dict *props, bool monitor_global)
|
2019-11-29 13:21:55 +01:00
|
|
|
{
|
|
|
|
|
struct sm_object *obj;
|
|
|
|
|
|
2020-01-07 16:07:51 +01:00
|
|
|
obj = pw_proxy_get_user_data(handle);
|
2019-11-14 18:35:29 +01:00
|
|
|
obj->session = &impl->this;
|
|
|
|
|
obj->id = id;
|
2019-11-29 13:21:55 +01:00
|
|
|
obj->type = info->type;
|
2019-11-14 18:35:29 +01:00
|
|
|
obj->props = props ? pw_properties_new_dict(props) : pw_properties_new(NULL, NULL);
|
|
|
|
|
obj->proxy = proxy;
|
2020-01-07 16:07:51 +01:00
|
|
|
obj->handle = handle;
|
2019-11-29 13:21:55 +01:00
|
|
|
obj->destroy = info->destroy;
|
|
|
|
|
obj->mask |= SM_OBJECT_CHANGE_MASK_PROPERTIES | SM_OBJECT_CHANGE_MASK_BIND;
|
|
|
|
|
obj->avail |= obj->mask;
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
obj->monitor_global = monitor_global;
|
2019-11-26 12:53:28 +01:00
|
|
|
spa_hook_list_init(&obj->hooks);
|
2019-11-14 18:35:29 +01:00
|
|
|
spa_list_init(&obj->data);
|
|
|
|
|
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
spa_list_append(&impl->object_list, &obj->link);
|
|
|
|
|
|
2020-01-07 16:07:51 +01:00
|
|
|
if (proxy) {
|
|
|
|
|
pw_proxy_add_listener(obj->proxy, &obj->proxy_listener, &proxy_events, obj);
|
|
|
|
|
if (info->events != NULL)
|
|
|
|
|
pw_proxy_add_object_listener(obj->proxy, &obj->object_listener, info->events, obj);
|
|
|
|
|
SPA_FLAG_UPDATE(obj->mask, SM_OBJECT_CHANGE_MASK_LISTENER, info->events != NULL);
|
|
|
|
|
}
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
pw_proxy_add_listener(obj->handle, &obj->handle_listener, &handle_events, obj);
|
2019-11-14 18:35:29 +01:00
|
|
|
|
2019-11-29 13:21:55 +01:00
|
|
|
if (info->init)
|
|
|
|
|
info->init(obj);
|
2019-11-28 11:13:53 +01:00
|
|
|
|
2019-11-29 13:21:55 +01:00
|
|
|
return obj;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct sm_object *
|
2020-01-07 16:07:51 +01:00
|
|
|
create_object(struct impl *impl, struct pw_proxy *proxy, struct pw_proxy *handle,
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
const struct spa_dict *props, bool monitor_global)
|
2019-11-29 13:21:55 +01:00
|
|
|
{
|
2019-12-19 13:15:10 +01:00
|
|
|
const char *type;
|
2019-11-29 13:21:55 +01:00
|
|
|
const struct object_info *info;
|
|
|
|
|
struct sm_object *obj;
|
|
|
|
|
|
2020-01-07 16:07:51 +01:00
|
|
|
type = pw_proxy_get_type(handle, NULL);
|
2019-11-29 13:21:55 +01:00
|
|
|
|
2021-05-18 11:36:13 +10:00
|
|
|
if (spa_streq(type, PW_TYPE_INTERFACE_ClientNode))
|
2020-07-03 16:26:52 +02:00
|
|
|
type = PW_TYPE_INTERFACE_Node;
|
|
|
|
|
|
2019-11-29 13:21:55 +01:00
|
|
|
info = get_object_info(impl, type);
|
|
|
|
|
if (info == NULL) {
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_error("%p: unknown object type %s", impl, type);
|
2019-11-29 13:21:55 +01:00
|
|
|
errno = ENOTSUP;
|
|
|
|
|
return NULL;
|
2019-11-14 18:35:29 +01:00
|
|
|
}
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
obj = init_object(impl, info, proxy, handle, SPA_ID_INVALID, props, monitor_global);
|
2019-11-29 13:21:55 +01:00
|
|
|
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: created new object %p proxy:%p handle:%p", impl,
|
2020-06-04 17:41:01 +02:00
|
|
|
obj, obj->proxy, obj->handle);
|
2019-11-29 13:21:55 +01:00
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
|
}
|
|
|
|
|
|
2021-02-15 15:28:42 +01:00
|
|
|
static struct sm_object *
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
bind_object(struct impl *impl, const struct object_info *info, struct registry_event *re)
|
2019-11-29 13:21:55 +01:00
|
|
|
{
|
|
|
|
|
struct pw_proxy *proxy;
|
|
|
|
|
struct sm_object *obj;
|
|
|
|
|
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
proxy = re->proxy;
|
|
|
|
|
re->proxy = NULL;
|
|
|
|
|
|
|
|
|
|
obj = init_object(impl, info, proxy, proxy, re->id, re->props, false);
|
|
|
|
|
sm_object_discard(obj);
|
|
|
|
|
add_object(impl, obj, re->id);
|
2019-11-14 18:35:29 +01:00
|
|
|
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: bound new object %p proxy %p id:%d", impl, obj, obj->proxy, obj->id);
|
2019-11-14 18:35:29 +01:00
|
|
|
|
2021-02-15 15:28:42 +01:00
|
|
|
return obj;
|
2019-11-29 13:21:55 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
update_object(struct impl *impl, const struct object_info *info, struct sm_object *obj,
|
|
|
|
|
struct registry_event *re)
|
2019-11-29 13:21:55 +01:00
|
|
|
{
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
struct pw_proxy *proxy;
|
|
|
|
|
|
|
|
|
|
pw_properties_update(obj->props, re->props);
|
2019-11-29 13:21:55 +01:00
|
|
|
|
2020-01-07 16:07:51 +01:00
|
|
|
if (obj->proxy != NULL)
|
2019-11-29 13:21:55 +01:00
|
|
|
return 0;
|
|
|
|
|
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: update type:%s", impl, obj->type);
|
2019-11-29 13:21:55 +01:00
|
|
|
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
proxy = re->proxy;
|
|
|
|
|
re->proxy = NULL;
|
2020-01-07 16:07:51 +01:00
|
|
|
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
obj->proxy = proxy;
|
2019-12-19 13:15:10 +01:00
|
|
|
obj->type = info->type;
|
2019-11-29 13:21:55 +01:00
|
|
|
|
|
|
|
|
pw_proxy_add_listener(obj->proxy, &obj->proxy_listener, &proxy_events, obj);
|
|
|
|
|
if (info->events)
|
|
|
|
|
pw_proxy_add_object_listener(obj->proxy, &obj->object_listener, info->events, obj);
|
|
|
|
|
|
|
|
|
|
SPA_FLAG_UPDATE(obj->mask, SM_OBJECT_CHANGE_MASK_LISTENER, info->events != NULL);
|
|
|
|
|
|
|
|
|
|
sm_media_session_emit_create(impl, obj);
|
|
|
|
|
|
|
|
|
|
return 0;
|
2019-11-14 18:35:29 +01:00
|
|
|
}
|
|
|
|
|
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
static void registry_event_free(struct registry_event *re)
|
|
|
|
|
{
|
|
|
|
|
if (re->proxy)
|
|
|
|
|
pw_proxy_destroy(re->proxy);
|
2021-06-01 11:21:17 +10:00
|
|
|
pw_properties_free(re->props_store);
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
if (re->allocated) {
|
|
|
|
|
spa_list_remove(&re->link);
|
|
|
|
|
free(re);
|
|
|
|
|
} else {
|
|
|
|
|
spa_zero(*re);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int handle_registry_event(struct impl *impl, struct registry_event *re)
|
|
|
|
|
{
|
|
|
|
|
struct sm_object *obj;
|
|
|
|
|
const struct object_info *info = NULL;
|
|
|
|
|
|
|
|
|
|
obj = find_object(impl, re->id, NULL);
|
|
|
|
|
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: new global '%d' %s/%d obj:%p monitor:%d seq:%d",
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
impl, re->id, re->type, re->version, obj, re->monitor, re->seq);
|
|
|
|
|
|
|
|
|
|
info = get_object_info(impl, re->type);
|
|
|
|
|
if (info == NULL)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
if (obj == NULL && !re->monitor) {
|
|
|
|
|
/*
|
|
|
|
|
* Only policy core binds new objects.
|
|
|
|
|
*
|
|
|
|
|
* The monitor core event corresponding to this one has already been
|
|
|
|
|
* processed. If monitor doesn't have the id now, the object either has
|
|
|
|
|
* not been created there, or there is a race condition and it was already
|
|
|
|
|
* removed. In that case, we create a zombie object here, but its remove
|
|
|
|
|
* event is already queued and arrives soon.
|
|
|
|
|
*/
|
2021-05-10 12:55:42 +10:00
|
|
|
bind_object(impl, info, re);
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
} else if (obj != NULL && obj->monitor_global == re->monitor) {
|
|
|
|
|
/* Each core handles their own object updates */
|
|
|
|
|
update_object(impl, info, obj, re);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
sm_media_session_schedule_rescan(&impl->this);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int handle_postponed_registry_events(struct impl *impl, int seq)
|
|
|
|
|
{
|
|
|
|
|
struct registry_event *re, *t;
|
|
|
|
|
|
|
|
|
|
spa_list_for_each_safe(re, t, &impl->registry_event_list, link) {
|
|
|
|
|
if (re->seq == seq) {
|
|
|
|
|
handle_registry_event(impl, re);
|
|
|
|
|
registry_event_free(re);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int monitor_sync(struct impl *impl)
|
|
|
|
|
{
|
|
|
|
|
pw_core_set_paused(impl->policy_core, true);
|
|
|
|
|
impl->monitor_seq = pw_core_sync(impl->monitor_core, 0, impl->monitor_seq);
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: monitor sync start %d", impl, impl->monitor_seq);
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
sm_media_session_schedule_rescan(&impl->this);
|
|
|
|
|
return impl->monitor_seq;
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-26 12:53:28 +01:00
|
|
|
static void
|
|
|
|
|
registry_global(void *data, uint32_t id,
|
2019-12-19 13:15:10 +01:00
|
|
|
uint32_t permissions, const char *type, uint32_t version,
|
2019-11-26 12:53:28 +01:00
|
|
|
const struct spa_dict *props)
|
|
|
|
|
{
|
|
|
|
|
struct impl *impl = data;
|
2019-11-29 13:21:55 +01:00
|
|
|
const struct object_info *info;
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
struct registry_event *re = NULL;
|
2021-09-30 09:30:10 +10:00
|
|
|
static bool warned_about_wireplumber = false;
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
|
|
|
|
|
info = get_object_info(impl, type);
|
|
|
|
|
if (info == NULL)
|
|
|
|
|
return;
|
2019-11-26 12:53:28 +01:00
|
|
|
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: registry event (policy) for new global '%d'", impl, id);
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
|
2021-09-30 09:30:10 +10:00
|
|
|
if (!warned_about_wireplumber && props &&
|
|
|
|
|
spa_streq(info->type, PW_TYPE_INTERFACE_Client)) {
|
|
|
|
|
const char *name = spa_dict_lookup(props, PW_KEY_APP_NAME);
|
|
|
|
|
if (spa_streq(name, "WirePlumber")) {
|
|
|
|
|
pw_log_error("WirePlumber appears to be running; "
|
|
|
|
|
"please stop it before starting pipewire-media-session");
|
|
|
|
|
warned_about_wireplumber = true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
/*
|
|
|
|
|
* Handle policy core events after monitor core ones.
|
|
|
|
|
*
|
|
|
|
|
* Monitor sync pauses policy core, so the event will be handled before
|
|
|
|
|
* further registry or proxy events are received via policy core.
|
|
|
|
|
*/
|
|
|
|
|
re = calloc(1, sizeof(struct registry_event));
|
|
|
|
|
if (re == NULL)
|
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
|
|
re->allocated = true;
|
|
|
|
|
spa_list_append(&impl->registry_event_list, &re->link);
|
|
|
|
|
|
|
|
|
|
re->id = id;
|
|
|
|
|
re->monitor = false;
|
|
|
|
|
re->permissions = permissions;
|
|
|
|
|
re->type = info->type;
|
|
|
|
|
re->version = version;
|
|
|
|
|
|
|
|
|
|
/* Bind proxy now */
|
|
|
|
|
re->proxy = pw_registry_bind(impl->registry, id, type, info->version, info->size);
|
|
|
|
|
if (re->proxy == NULL)
|
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
|
|
if (props) {
|
|
|
|
|
re->props_store = pw_properties_new_dict(props);
|
|
|
|
|
if (re->props_store == NULL)
|
|
|
|
|
goto error;
|
|
|
|
|
re->props = &re->props_store->dict;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
re->seq = monitor_sync(impl);
|
|
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
error:
|
|
|
|
|
if (re)
|
|
|
|
|
registry_event_free(re);
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_warn("%p: can't handle global %d: %s", impl, id, spa_strerror(-errno));
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
registry_global_remove(void *data, uint32_t id)
|
|
|
|
|
{
|
|
|
|
|
struct impl *impl = data;
|
|
|
|
|
struct sm_object *obj;
|
|
|
|
|
|
|
|
|
|
obj = find_object(impl, id, NULL);
|
|
|
|
|
obj = (obj && !obj->monitor_global) ? obj : NULL;
|
|
|
|
|
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: registry event (policy) for remove global '%d' obj:%p",
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
impl, id, obj);
|
|
|
|
|
|
|
|
|
|
if (obj)
|
|
|
|
|
sm_object_destroy_maybe_free(obj);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static const struct pw_registry_events registry_events = {
|
|
|
|
|
PW_VERSION_REGISTRY_EVENTS,
|
|
|
|
|
.global = registry_global,
|
|
|
|
|
.global_remove = registry_global_remove,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
monitor_registry_global(void *data, uint32_t id,
|
|
|
|
|
uint32_t permissions, const char *type, uint32_t version,
|
|
|
|
|
const struct spa_dict *props)
|
|
|
|
|
{
|
|
|
|
|
struct impl *impl = data;
|
|
|
|
|
const struct object_info *info;
|
|
|
|
|
struct registry_event re = {
|
|
|
|
|
.id = id, .permissions = permissions, .type = type, .version = version,
|
|
|
|
|
.props = props, .monitor = true
|
|
|
|
|
};
|
|
|
|
|
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: registry event (monitor) for new global '%d'", impl, id);
|
2019-11-29 13:21:55 +01:00
|
|
|
|
|
|
|
|
info = get_object_info(impl, type);
|
|
|
|
|
if (info == NULL)
|
|
|
|
|
return;
|
2019-11-26 12:53:28 +01:00
|
|
|
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
/* Bind proxy now from policy core */
|
|
|
|
|
re.proxy = pw_registry_bind(impl->registry, id, type, info->version, 0);
|
|
|
|
|
if (re.proxy)
|
|
|
|
|
handle_registry_event(impl, &re);
|
2021-05-18 11:36:13 +10:00
|
|
|
else
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_warn("%p: can't handle global %d: %s", impl, id, spa_strerror(-errno));
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
|
|
|
|
|
registry_event_free(&re);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
monitor_registry_global_remove(void *data, uint32_t id)
|
|
|
|
|
{
|
|
|
|
|
struct impl *impl = data;
|
|
|
|
|
struct sm_object *obj;
|
|
|
|
|
|
2020-07-22 14:14:02 +02:00
|
|
|
obj = find_object(impl, id, NULL);
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
obj = (obj && obj->monitor_global) ? obj : NULL;
|
|
|
|
|
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: registry event (monitor) for remove global '%d' obj:%p", impl, id, obj);
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
|
|
|
|
|
if (obj)
|
|
|
|
|
sm_object_destroy_maybe_free(obj);
|
2019-11-26 12:53:28 +01:00
|
|
|
}
|
|
|
|
|
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
static const struct pw_registry_events monitor_registry_events = {
|
|
|
|
|
PW_VERSION_REGISTRY_EVENTS,
|
|
|
|
|
.global = monitor_registry_global,
|
|
|
|
|
.global_remove = monitor_registry_global_remove,
|
|
|
|
|
};
|
|
|
|
|
|
2019-11-26 12:53:28 +01:00
|
|
|
int sm_object_add_listener(struct sm_object *obj, struct spa_hook *listener,
|
|
|
|
|
const struct sm_object_events *events, void *data)
|
|
|
|
|
{
|
|
|
|
|
spa_hook_list_append(&obj->hooks, listener, events, data);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-14 18:35:29 +01:00
|
|
|
int sm_media_session_add_listener(struct sm_media_session *sess, struct spa_hook *listener,
|
|
|
|
|
const struct sm_media_session_events *events, void *data)
|
|
|
|
|
{
|
|
|
|
|
struct impl *impl = SPA_CONTAINER_OF(sess, struct impl, this);
|
2019-11-19 16:07:41 +01:00
|
|
|
struct spa_hook_list save;
|
|
|
|
|
struct sm_object *obj;
|
|
|
|
|
|
|
|
|
|
spa_hook_list_isolate(&impl->hooks, &save, listener, events, data);
|
|
|
|
|
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
spa_list_for_each(obj, &impl->object_list, link) {
|
|
|
|
|
if (obj->id == SPA_ID_INVALID)
|
|
|
|
|
continue;
|
2019-11-26 12:53:28 +01:00
|
|
|
sm_media_session_emit_create(impl, obj);
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
}
|
2019-11-19 16:07:41 +01:00
|
|
|
|
|
|
|
|
spa_hook_list_join(&impl->hooks, &save);
|
|
|
|
|
|
2019-11-14 18:35:29 +01:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct sm_object *sm_media_session_find_object(struct sm_media_session *sess, uint32_t id)
|
|
|
|
|
{
|
|
|
|
|
struct impl *impl = SPA_CONTAINER_OF(sess, struct impl, this);
|
2020-07-22 14:14:02 +02:00
|
|
|
return find_object(impl, id, NULL);
|
2019-11-14 18:35:29 +01:00
|
|
|
}
|
|
|
|
|
|
2020-04-02 15:26:39 +02:00
|
|
|
int sm_media_session_destroy_object(struct sm_media_session *sess, uint32_t id)
|
|
|
|
|
{
|
|
|
|
|
struct impl *impl = SPA_CONTAINER_OF(sess, struct impl, this);
|
|
|
|
|
pw_registry_destroy(impl->registry, id);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-16 17:48:29 +02:00
|
|
|
int sm_media_session_for_each_object(struct sm_media_session *sess,
|
|
|
|
|
int (*callback) (void *data, struct sm_object *object),
|
|
|
|
|
void *data)
|
|
|
|
|
{
|
|
|
|
|
struct impl *impl = SPA_CONTAINER_OF(sess, struct impl, this);
|
|
|
|
|
struct sm_object *obj;
|
|
|
|
|
int res;
|
|
|
|
|
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
spa_list_for_each(obj, &impl->object_list, link) {
|
|
|
|
|
if (obj->id == SPA_ID_INVALID)
|
|
|
|
|
continue;
|
2020-07-16 17:48:29 +02:00
|
|
|
if ((res = callback(data, obj)) != 0)
|
|
|
|
|
return res;
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-14 18:35:29 +01:00
|
|
|
int sm_media_session_schedule_rescan(struct sm_media_session *sess)
|
|
|
|
|
{
|
|
|
|
|
struct impl *impl = SPA_CONTAINER_OF(sess, struct impl, this);
|
2020-11-03 20:25:51 +01:00
|
|
|
|
|
|
|
|
if (impl->scanning) {
|
|
|
|
|
impl->rescan_pending = true;
|
|
|
|
|
return impl->rescan_seq;
|
|
|
|
|
}
|
2019-11-28 11:13:53 +01:00
|
|
|
if (impl->policy_core)
|
2019-12-11 07:46:59 +01:00
|
|
|
impl->rescan_seq = pw_core_sync(impl->policy_core, 0, impl->last_seq);
|
2019-11-15 12:07:26 +01:00
|
|
|
return impl->rescan_seq;
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-18 13:10:21 +01:00
|
|
|
int sm_media_session_sync(struct sm_media_session *sess,
|
|
|
|
|
void (*callback) (void *data), void *data)
|
|
|
|
|
{
|
|
|
|
|
struct impl *impl = SPA_CONTAINER_OF(sess, struct impl, this);
|
|
|
|
|
struct sync *sync;
|
|
|
|
|
|
|
|
|
|
sync = calloc(1, sizeof(struct sync));
|
|
|
|
|
if (sync == NULL)
|
|
|
|
|
return -errno;
|
|
|
|
|
|
|
|
|
|
spa_list_append(&impl->sync_list, &sync->link);
|
|
|
|
|
sync->callback = callback;
|
|
|
|
|
sync->data = data;
|
2019-12-11 07:46:59 +01:00
|
|
|
sync->seq = pw_core_sync(impl->policy_core, 0, impl->last_seq);
|
2019-11-18 13:10:21 +01:00
|
|
|
return sync->seq;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void roundtrip_callback(void *data)
|
|
|
|
|
{
|
|
|
|
|
int *done = data;
|
|
|
|
|
*done = 1;
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-15 12:07:26 +01:00
|
|
|
int sm_media_session_roundtrip(struct sm_media_session *sess)
|
|
|
|
|
{
|
|
|
|
|
struct impl *impl = SPA_CONTAINER_OF(sess, struct impl, this);
|
2019-11-20 16:18:46 +01:00
|
|
|
struct pw_loop *loop = impl->this.loop;
|
2020-08-06 17:11:35 +02:00
|
|
|
int done, res, seq;
|
2019-11-15 12:07:26 +01:00
|
|
|
|
2019-11-28 11:13:53 +01:00
|
|
|
if (impl->policy_core == NULL)
|
2019-11-15 12:07:26 +01:00
|
|
|
return -EIO;
|
|
|
|
|
|
2019-11-18 13:10:21 +01:00
|
|
|
done = 0;
|
2020-08-06 17:11:35 +02:00
|
|
|
if ((seq = sm_media_session_sync(sess, roundtrip_callback, &done)) < 0)
|
|
|
|
|
return seq;
|
2019-11-18 13:10:21 +01:00
|
|
|
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: roundtrip %d", impl, seq);
|
2019-11-15 12:07:26 +01:00
|
|
|
|
2019-11-20 16:18:46 +01:00
|
|
|
pw_loop_enter(loop);
|
2019-11-18 13:10:21 +01:00
|
|
|
while (!done) {
|
2019-11-20 16:18:46 +01:00
|
|
|
if ((res = pw_loop_iterate(loop, -1)) < 0) {
|
2020-11-02 16:43:56 +01:00
|
|
|
if (res == -EINTR)
|
2020-11-02 14:51:07 +01:00
|
|
|
continue;
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_warn("%p: iterate error %d (%s)",
|
2019-11-15 12:07:26 +01:00
|
|
|
loop, res, spa_strerror(res));
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-11-20 16:18:46 +01:00
|
|
|
pw_loop_leave(loop);
|
2019-11-15 12:07:26 +01:00
|
|
|
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: roundtrip %d done", impl, seq);
|
2019-11-15 12:07:26 +01:00
|
|
|
|
2019-11-18 13:10:21 +01:00
|
|
|
return 0;
|
2019-11-14 18:35:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct pw_proxy *sm_media_session_export(struct sm_media_session *sess,
|
2019-12-19 13:15:10 +01:00
|
|
|
const char *type, const struct spa_dict *props,
|
2019-11-14 18:35:29 +01:00
|
|
|
void *object, size_t user_data_size)
|
|
|
|
|
{
|
|
|
|
|
struct impl *impl = SPA_CONTAINER_OF(sess, struct impl, this);
|
2020-01-14 16:38:40 +01:00
|
|
|
struct pw_proxy *handle;
|
|
|
|
|
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: object %s %p", impl, type, object);
|
2020-01-14 16:38:40 +01:00
|
|
|
|
|
|
|
|
handle = pw_core_export(impl->monitor_core, type,
|
2019-12-13 11:26:05 +01:00
|
|
|
props, object, user_data_size);
|
2020-01-14 16:38:40 +01:00
|
|
|
|
|
|
|
|
monitor_sync(impl);
|
|
|
|
|
|
|
|
|
|
return handle;
|
2019-11-14 18:35:29 +01:00
|
|
|
}
|
|
|
|
|
|
2020-01-07 16:07:51 +01:00
|
|
|
struct sm_node *sm_media_session_export_node(struct sm_media_session *sess,
|
|
|
|
|
const struct spa_dict *props, struct pw_impl_node *object)
|
|
|
|
|
{
|
|
|
|
|
struct impl *impl = SPA_CONTAINER_OF(sess, struct impl, this);
|
|
|
|
|
struct sm_node *node;
|
|
|
|
|
struct pw_proxy *handle;
|
|
|
|
|
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: node %p", impl, object);
|
2020-01-07 16:07:51 +01:00
|
|
|
|
|
|
|
|
handle = pw_core_export(impl->monitor_core, PW_TYPE_INTERFACE_Node,
|
|
|
|
|
props, object, sizeof(struct sm_node));
|
|
|
|
|
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
node = (struct sm_node *) create_object(impl, NULL, handle, props, true);
|
2020-01-07 16:07:51 +01:00
|
|
|
|
2020-01-14 16:38:40 +01:00
|
|
|
monitor_sync(impl);
|
|
|
|
|
|
2020-01-07 16:07:51 +01:00
|
|
|
return node;
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-29 13:21:55 +01:00
|
|
|
struct sm_device *sm_media_session_export_device(struct sm_media_session *sess,
|
2019-12-13 11:26:05 +01:00
|
|
|
const struct spa_dict *props, struct spa_device *object)
|
2019-11-29 13:21:55 +01:00
|
|
|
{
|
|
|
|
|
struct impl *impl = SPA_CONTAINER_OF(sess, struct impl, this);
|
|
|
|
|
struct sm_device *device;
|
2020-01-07 16:07:51 +01:00
|
|
|
struct pw_proxy *handle;
|
2019-11-29 13:21:55 +01:00
|
|
|
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: device %p", impl, object);
|
2019-11-29 13:21:55 +01:00
|
|
|
|
2020-01-07 16:07:51 +01:00
|
|
|
handle = pw_core_export(impl->monitor_core, SPA_TYPE_INTERFACE_Device,
|
2019-12-13 11:26:05 +01:00
|
|
|
props, object, sizeof(struct sm_device));
|
2019-11-29 13:21:55 +01:00
|
|
|
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
device = (struct sm_device *) create_object(impl, NULL, handle, props, true);
|
2019-11-29 13:21:55 +01:00
|
|
|
|
2020-01-14 16:38:40 +01:00
|
|
|
monitor_sync(impl);
|
|
|
|
|
|
2019-11-29 13:21:55 +01:00
|
|
|
return device;
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-14 18:35:29 +01:00
|
|
|
struct pw_proxy *sm_media_session_create_object(struct sm_media_session *sess,
|
2019-12-19 13:15:10 +01:00
|
|
|
const char *factory_name, const char *type, uint32_t version,
|
2019-11-14 18:35:29 +01:00
|
|
|
const struct spa_dict *props, size_t user_data_size)
|
|
|
|
|
{
|
|
|
|
|
struct impl *impl = SPA_CONTAINER_OF(sess, struct impl, this);
|
2019-12-11 07:46:59 +01:00
|
|
|
return pw_core_create_object(impl->policy_core,
|
2019-11-14 18:35:29 +01:00
|
|
|
factory_name, type, version, props, user_data_size);
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-26 12:53:28 +01:00
|
|
|
struct sm_node *sm_media_session_create_node(struct sm_media_session *sess,
|
2019-11-29 13:21:55 +01:00
|
|
|
const char *factory_name, const struct spa_dict *props)
|
2019-11-26 12:53:28 +01:00
|
|
|
{
|
|
|
|
|
struct impl *impl = SPA_CONTAINER_OF(sess, struct impl, this);
|
|
|
|
|
struct sm_node *node;
|
|
|
|
|
struct pw_proxy *proxy;
|
|
|
|
|
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: node '%s'", impl, factory_name);
|
2019-11-26 12:53:28 +01:00
|
|
|
|
2019-12-11 07:46:59 +01:00
|
|
|
proxy = pw_core_create_object(impl->policy_core,
|
2019-11-26 12:53:28 +01:00
|
|
|
factory_name,
|
|
|
|
|
PW_TYPE_INTERFACE_Node,
|
2019-12-11 15:26:11 +01:00
|
|
|
PW_VERSION_NODE,
|
2019-11-26 12:53:28 +01:00
|
|
|
props,
|
2019-11-29 13:21:55 +01:00
|
|
|
sizeof(struct sm_node));
|
2019-11-26 12:53:28 +01:00
|
|
|
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
node = (struct sm_node *)create_object(impl, proxy, proxy, props, false);
|
2019-11-26 12:53:28 +01:00
|
|
|
|
|
|
|
|
return node;
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-15 17:13:45 +01:00
|
|
|
static void check_endpoint_link(struct endpoint_link *link)
|
2019-11-15 12:08:46 +01:00
|
|
|
{
|
2019-11-15 17:13:45 +01:00
|
|
|
if (!spa_list_is_empty(&link->link_list))
|
|
|
|
|
return;
|
2019-11-15 12:08:46 +01:00
|
|
|
|
2019-11-15 17:13:45 +01:00
|
|
|
if (link->impl) {
|
|
|
|
|
spa_list_remove(&link->link);
|
|
|
|
|
pw_map_remove(&link->impl->endpoint_links, link->id);
|
2019-11-15 12:08:46 +01:00
|
|
|
|
2020-01-07 16:07:51 +01:00
|
|
|
pw_client_session_link_update(link->impl->this.client_session,
|
2019-11-15 17:13:45 +01:00
|
|
|
link->id,
|
|
|
|
|
PW_CLIENT_SESSION_LINK_UPDATE_DESTROYED,
|
|
|
|
|
0, NULL, NULL);
|
2019-11-15 12:08:46 +01:00
|
|
|
|
2019-11-15 17:13:45 +01:00
|
|
|
link->impl = NULL;
|
|
|
|
|
free(link);
|
2019-11-15 12:08:46 +01:00
|
|
|
}
|
2019-11-15 17:13:45 +01:00
|
|
|
}
|
|
|
|
|
|
2020-09-14 16:45:39 +02:00
|
|
|
static void proxy_link_error(void *data, int seq, int res, const char *message)
|
|
|
|
|
{
|
|
|
|
|
struct link *l = data;
|
2021-09-01 10:48:57 +02:00
|
|
|
pw_log_info("can't link %d:%d -> %d:%d: %s",
|
2020-09-14 16:45:39 +02:00
|
|
|
l->output_node, l->output_port,
|
|
|
|
|
l->input_node, l->input_port, message);
|
|
|
|
|
pw_proxy_destroy(l->proxy);
|
|
|
|
|
}
|
|
|
|
|
|
2020-06-04 17:41:01 +02:00
|
|
|
static void proxy_link_removed(void *data)
|
|
|
|
|
{
|
|
|
|
|
struct link *l = data;
|
|
|
|
|
pw_proxy_destroy(l->proxy);
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-11 16:51:38 +01:00
|
|
|
static void proxy_link_destroy(void *data)
|
2019-11-15 17:13:45 +01:00
|
|
|
{
|
|
|
|
|
struct link *l = data;
|
|
|
|
|
|
2020-07-15 14:19:25 +02:00
|
|
|
spa_list_remove(&l->link);
|
2020-11-06 15:53:32 +01:00
|
|
|
spa_hook_remove(&l->listener);
|
2020-07-15 14:19:25 +02:00
|
|
|
|
2019-11-15 17:13:45 +01:00
|
|
|
if (l->endpoint_link) {
|
|
|
|
|
check_endpoint_link(l->endpoint_link);
|
|
|
|
|
l->endpoint_link = NULL;
|
2019-11-15 12:08:46 +01:00
|
|
|
}
|
2019-11-15 17:13:45 +01:00
|
|
|
}
|
|
|
|
|
|
2019-12-11 16:51:38 +01:00
|
|
|
static const struct pw_proxy_events proxy_link_events = {
|
2019-11-15 17:13:45 +01:00
|
|
|
PW_VERSION_PROXY_EVENTS,
|
2020-09-14 16:45:39 +02:00
|
|
|
.error = proxy_link_error,
|
2020-06-04 17:41:01 +02:00
|
|
|
.removed = proxy_link_removed,
|
2019-12-11 16:51:38 +01:00
|
|
|
.destroy = proxy_link_destroy
|
2019-11-15 17:13:45 +01:00
|
|
|
};
|
|
|
|
|
|
2021-10-01 16:16:10 +02:00
|
|
|
static bool channel_is_aux(uint32_t channel)
|
|
|
|
|
{
|
|
|
|
|
return channel >= SPA_AUDIO_CHANNEL_START_Aux &&
|
|
|
|
|
channel <= SPA_AUDIO_CHANNEL_LAST_Aux;
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-22 15:05:45 +02:00
|
|
|
static int score_ports(struct sm_port *out, struct sm_port *in)
|
|
|
|
|
{
|
|
|
|
|
int score = 0;
|
|
|
|
|
|
|
|
|
|
if (in->direction != PW_DIRECTION_INPUT || out->direction != PW_DIRECTION_OUTPUT)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
if (out->type != SM_PORT_TYPE_UNKNOWN && in->type != SM_PORT_TYPE_UNKNOWN &&
|
|
|
|
|
in->type != out->type)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
if (out->channel == in->channel)
|
|
|
|
|
score += 100;
|
|
|
|
|
else if ((out->channel == SPA_AUDIO_CHANNEL_SL && in->channel == SPA_AUDIO_CHANNEL_RL) ||
|
|
|
|
|
(out->channel == SPA_AUDIO_CHANNEL_RL && in->channel == SPA_AUDIO_CHANNEL_SL) ||
|
|
|
|
|
(out->channel == SPA_AUDIO_CHANNEL_SR && in->channel == SPA_AUDIO_CHANNEL_RR) ||
|
|
|
|
|
(out->channel == SPA_AUDIO_CHANNEL_RR && in->channel == SPA_AUDIO_CHANNEL_SR))
|
2020-07-24 17:39:29 +02:00
|
|
|
score += 60;
|
2020-07-22 15:05:45 +02:00
|
|
|
else if ((out->channel == SPA_AUDIO_CHANNEL_FC && in->channel == SPA_AUDIO_CHANNEL_MONO) ||
|
|
|
|
|
(out->channel == SPA_AUDIO_CHANNEL_MONO && in->channel == SPA_AUDIO_CHANNEL_FC))
|
2020-07-24 17:39:29 +02:00
|
|
|
score += 50;
|
2020-07-27 17:41:52 +02:00
|
|
|
else if (in->channel == SPA_AUDIO_CHANNEL_UNKNOWN ||
|
|
|
|
|
in->channel == SPA_AUDIO_CHANNEL_MONO ||
|
|
|
|
|
out->channel == SPA_AUDIO_CHANNEL_UNKNOWN ||
|
|
|
|
|
out->channel == SPA_AUDIO_CHANNEL_MONO)
|
2020-07-24 17:39:29 +02:00
|
|
|
score += 10;
|
2021-10-11 11:35:29 +02:00
|
|
|
else if (channel_is_aux(in->channel) != channel_is_aux(out->channel))
|
|
|
|
|
score += 7;
|
2020-07-22 17:24:58 +02:00
|
|
|
if (score > 0 && !in->visited)
|
|
|
|
|
score += 5;
|
2020-07-27 17:41:52 +02:00
|
|
|
if (score <= 10)
|
|
|
|
|
score = 0;
|
2020-07-22 15:05:45 +02:00
|
|
|
return score;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct sm_port *find_input_port(struct impl *impl, struct sm_node *outnode,
|
|
|
|
|
struct sm_port *outport, struct sm_node *innode)
|
|
|
|
|
{
|
|
|
|
|
struct sm_port *inport, *best_port = NULL;
|
|
|
|
|
int score, best_score = 0;
|
|
|
|
|
|
|
|
|
|
spa_list_for_each(inport, &innode->port_list, link) {
|
|
|
|
|
score = score_ports(outport, inport);
|
|
|
|
|
if (score > best_score) {
|
|
|
|
|
best_score = score;
|
|
|
|
|
best_port = inport;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return best_port;
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-15 17:13:45 +01:00
|
|
|
static int link_nodes(struct impl *impl, struct endpoint_link *link,
|
|
|
|
|
struct sm_node *outnode, struct sm_node *innode)
|
|
|
|
|
{
|
|
|
|
|
struct pw_properties *props;
|
|
|
|
|
struct sm_port *outport, *inport;
|
2020-08-07 10:50:30 +02:00
|
|
|
int count = 0;
|
2021-04-24 19:45:45 +02:00
|
|
|
bool passive = false;
|
|
|
|
|
const char *str;
|
2019-11-15 12:08:46 +01:00
|
|
|
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: linking %d -> %d", impl, outnode->obj.id, innode->obj.id);
|
2019-11-15 12:08:46 +01:00
|
|
|
|
2021-04-24 19:45:45 +02:00
|
|
|
if ((str = spa_dict_lookup(outnode->info->props, PW_KEY_NODE_PASSIVE)) != NULL)
|
2021-05-18 11:36:13 +10:00
|
|
|
passive |= (pw_properties_parse_bool(str) || spa_streq(str, "out"));
|
2021-04-24 19:45:45 +02:00
|
|
|
if ((str = spa_dict_lookup(innode->info->props, PW_KEY_NODE_PASSIVE)) != NULL)
|
2021-05-18 11:36:13 +10:00
|
|
|
passive |= (pw_properties_parse_bool(str) || spa_streq(str, "in"));
|
2021-04-24 19:45:45 +02:00
|
|
|
|
2019-11-15 12:08:46 +01:00
|
|
|
props = pw_properties_new(NULL, NULL);
|
|
|
|
|
pw_properties_setf(props, PW_KEY_LINK_OUTPUT_NODE, "%d", outnode->obj.id);
|
|
|
|
|
pw_properties_setf(props, PW_KEY_LINK_INPUT_NODE, "%d", innode->obj.id);
|
2021-04-24 19:45:45 +02:00
|
|
|
pw_properties_setf(props, PW_KEY_LINK_PASSIVE, "%s", passive ? "true" : "false");
|
2019-11-15 12:08:46 +01:00
|
|
|
|
2020-07-22 15:05:45 +02:00
|
|
|
spa_list_for_each(inport, &innode->port_list, link)
|
|
|
|
|
inport->visited = false;
|
|
|
|
|
|
|
|
|
|
spa_list_for_each(outport, &outnode->port_list, link) {
|
|
|
|
|
struct link *l;
|
|
|
|
|
struct pw_proxy *p;
|
|
|
|
|
|
|
|
|
|
if (outport->direction != PW_DIRECTION_OUTPUT)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
inport = find_input_port(impl, outnode, outport, innode);
|
|
|
|
|
if (inport == NULL) {
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: port %d:%d can't be linked", impl,
|
2020-07-22 15:05:45 +02:00
|
|
|
outport->direction, outport->obj.id);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
inport->visited = true;
|
2019-11-15 12:08:46 +01:00
|
|
|
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: port %d:%d -> %d:%d", impl,
|
2019-11-15 12:08:46 +01:00
|
|
|
outport->direction, outport->obj.id,
|
|
|
|
|
inport->direction, inport->obj.id);
|
|
|
|
|
|
2020-07-22 15:05:45 +02:00
|
|
|
pw_properties_setf(props, PW_KEY_LINK_OUTPUT_PORT, "%d", outport->obj.id);
|
|
|
|
|
pw_properties_setf(props, PW_KEY_LINK_INPUT_PORT, "%d", inport->obj.id);
|
|
|
|
|
|
|
|
|
|
p = pw_core_create_object(impl->policy_core,
|
|
|
|
|
"link-factory",
|
|
|
|
|
PW_TYPE_INTERFACE_Link,
|
|
|
|
|
PW_VERSION_LINK,
|
|
|
|
|
&props->dict, sizeof(struct link));
|
|
|
|
|
if (p == NULL)
|
|
|
|
|
return -errno;
|
|
|
|
|
|
|
|
|
|
l = pw_proxy_get_user_data(p);
|
|
|
|
|
l->proxy = p;
|
|
|
|
|
l->output_node = outnode->obj.id;
|
|
|
|
|
l->output_port = outport->obj.id;
|
|
|
|
|
l->input_node = innode->obj.id;
|
|
|
|
|
l->input_port = inport->obj.id;
|
|
|
|
|
pw_proxy_add_listener(p, &l->listener, &proxy_link_events, l);
|
2020-08-07 10:50:30 +02:00
|
|
|
count++;
|
2020-07-22 15:05:45 +02:00
|
|
|
|
|
|
|
|
if (link) {
|
|
|
|
|
l->endpoint_link = link;
|
|
|
|
|
spa_list_append(&link->link_list, &l->link);
|
2019-11-15 12:08:46 +01:00
|
|
|
} else {
|
2020-07-22 15:05:45 +02:00
|
|
|
spa_list_append(&impl->link_list, &l->link);
|
2019-11-15 12:08:46 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
pw_properties_free(props);
|
|
|
|
|
|
2020-08-07 10:50:30 +02:00
|
|
|
return count;
|
2019-11-15 17:13:45 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
int sm_media_session_create_links(struct sm_media_session *sess,
|
|
|
|
|
const struct spa_dict *dict)
|
|
|
|
|
{
|
|
|
|
|
struct impl *impl = SPA_CONTAINER_OF(sess, struct impl, this);
|
|
|
|
|
struct sm_object *obj;
|
|
|
|
|
struct sm_node *outnode = NULL, *innode = NULL;
|
|
|
|
|
struct sm_endpoint *outendpoint = NULL, *inendpoint = NULL;
|
|
|
|
|
struct sm_endpoint_stream *outstream = NULL, *instream = NULL;
|
|
|
|
|
struct endpoint_link *link = NULL;
|
|
|
|
|
const char *str;
|
|
|
|
|
int res;
|
|
|
|
|
|
|
|
|
|
sm_media_session_roundtrip(sess);
|
|
|
|
|
|
|
|
|
|
/* find output node */
|
|
|
|
|
if ((str = spa_dict_lookup(dict, PW_KEY_LINK_OUTPUT_NODE)) != NULL &&
|
2020-07-22 14:14:02 +02:00
|
|
|
(obj = find_object(impl, atoi(str), PW_TYPE_INTERFACE_Node)) != NULL)
|
2019-11-15 17:13:45 +01:00
|
|
|
outnode = (struct sm_node*)obj;
|
|
|
|
|
|
|
|
|
|
/* find input node */
|
|
|
|
|
if ((str = spa_dict_lookup(dict, PW_KEY_LINK_INPUT_NODE)) != NULL &&
|
2020-07-22 14:14:02 +02:00
|
|
|
(obj = find_object(impl, atoi(str), PW_TYPE_INTERFACE_Node)) != NULL)
|
2019-11-15 17:13:45 +01:00
|
|
|
innode = (struct sm_node*)obj;
|
|
|
|
|
|
|
|
|
|
/* find endpoints and streams */
|
|
|
|
|
if ((str = spa_dict_lookup(dict, PW_KEY_ENDPOINT_LINK_OUTPUT_ENDPOINT)) != NULL &&
|
2020-07-22 14:14:02 +02:00
|
|
|
(obj = find_object(impl, atoi(str), PW_TYPE_INTERFACE_Endpoint)) != NULL)
|
2019-11-15 17:13:45 +01:00
|
|
|
outendpoint = (struct sm_endpoint*)obj;
|
2020-07-22 14:14:02 +02:00
|
|
|
|
2019-11-15 17:13:45 +01:00
|
|
|
if ((str = spa_dict_lookup(dict, PW_KEY_ENDPOINT_LINK_OUTPUT_STREAM)) != NULL &&
|
2020-07-22 14:14:02 +02:00
|
|
|
(obj = find_object(impl, atoi(str), PW_TYPE_INTERFACE_EndpointStream)) != NULL)
|
2019-11-15 17:13:45 +01:00
|
|
|
outstream = (struct sm_endpoint_stream*)obj;
|
2020-07-22 14:14:02 +02:00
|
|
|
|
2019-11-15 17:13:45 +01:00
|
|
|
if ((str = spa_dict_lookup(dict, PW_KEY_ENDPOINT_LINK_INPUT_ENDPOINT)) != NULL &&
|
2020-07-22 14:14:02 +02:00
|
|
|
(obj = find_object(impl, atoi(str), PW_TYPE_INTERFACE_Endpoint)) != NULL)
|
2019-11-15 17:13:45 +01:00
|
|
|
inendpoint = (struct sm_endpoint*)obj;
|
2020-07-22 14:14:02 +02:00
|
|
|
|
2019-11-15 17:13:45 +01:00
|
|
|
if ((str = spa_dict_lookup(dict, PW_KEY_ENDPOINT_LINK_INPUT_STREAM)) != NULL &&
|
2020-07-22 14:14:02 +02:00
|
|
|
(obj = find_object(impl, atoi(str), PW_TYPE_INTERFACE_EndpointStream)) != NULL)
|
2019-11-15 17:13:45 +01:00
|
|
|
instream = (struct sm_endpoint_stream*)obj;
|
|
|
|
|
|
|
|
|
|
if (outendpoint != NULL && inendpoint != NULL) {
|
|
|
|
|
link = calloc(1, sizeof(struct endpoint_link));
|
|
|
|
|
if (link == NULL)
|
|
|
|
|
return -errno;
|
|
|
|
|
|
|
|
|
|
link->id = pw_map_insert_new(&impl->endpoint_links, link);
|
|
|
|
|
link->impl = impl;
|
|
|
|
|
spa_list_init(&link->link_list);
|
|
|
|
|
spa_list_append(&impl->endpoint_link_list, &link->link);
|
|
|
|
|
|
|
|
|
|
link->info.version = PW_VERSION_ENDPOINT_LINK_INFO;
|
|
|
|
|
link->info.id = link->id;
|
2019-11-20 16:18:46 +01:00
|
|
|
link->info.session_id = impl->this.session->obj.id;
|
2019-11-15 17:13:45 +01:00
|
|
|
link->info.output_endpoint_id = outendpoint->info->id;
|
|
|
|
|
link->info.output_stream_id = outstream ? outstream->info->id : SPA_ID_INVALID;
|
|
|
|
|
link->info.input_endpoint_id = inendpoint->info->id;
|
|
|
|
|
link->info.input_stream_id = instream ? instream->info->id : SPA_ID_INVALID;
|
|
|
|
|
link->info.change_mask =
|
|
|
|
|
PW_ENDPOINT_LINK_CHANGE_MASK_STATE |
|
|
|
|
|
PW_ENDPOINT_LINK_CHANGE_MASK_PROPS;
|
|
|
|
|
link->info.state = PW_ENDPOINT_LINK_STATE_ACTIVE;
|
|
|
|
|
link->info.props = (struct spa_dict*) dict;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* link the nodes, record the link proxies in the endpoint_link */
|
|
|
|
|
if (outnode != NULL && innode != NULL)
|
|
|
|
|
res = link_nodes(impl, link, outnode, innode);
|
|
|
|
|
else
|
|
|
|
|
res = 0;
|
|
|
|
|
|
|
|
|
|
if (link != NULL) {
|
|
|
|
|
/* now create the endpoint link */
|
2020-01-07 16:07:51 +01:00
|
|
|
pw_client_session_link_update(impl->this.client_session,
|
2019-11-15 17:13:45 +01:00
|
|
|
link->id,
|
|
|
|
|
PW_CLIENT_SESSION_UPDATE_INFO,
|
|
|
|
|
0, NULL,
|
|
|
|
|
&link->info);
|
|
|
|
|
}
|
2019-11-15 12:08:46 +01:00
|
|
|
return res;
|
|
|
|
|
}
|
2019-11-14 18:35:29 +01:00
|
|
|
|
2020-07-15 14:20:46 +02:00
|
|
|
int sm_media_session_remove_links(struct sm_media_session *sess,
|
|
|
|
|
const struct spa_dict *dict)
|
|
|
|
|
{
|
|
|
|
|
struct impl *impl = SPA_CONTAINER_OF(sess, struct impl, this);
|
|
|
|
|
struct sm_object *obj;
|
|
|
|
|
struct sm_node *outnode = NULL, *innode = NULL;
|
|
|
|
|
const char *str;
|
|
|
|
|
struct link *l, *t;
|
|
|
|
|
|
|
|
|
|
/* find output node */
|
|
|
|
|
if ((str = spa_dict_lookup(dict, PW_KEY_LINK_OUTPUT_NODE)) != NULL &&
|
2020-07-22 14:14:02 +02:00
|
|
|
(obj = find_object(impl, atoi(str), PW_TYPE_INTERFACE_Node)) != NULL)
|
2020-07-15 14:20:46 +02:00
|
|
|
outnode = (struct sm_node*)obj;
|
|
|
|
|
|
|
|
|
|
/* find input node */
|
|
|
|
|
if ((str = spa_dict_lookup(dict, PW_KEY_LINK_INPUT_NODE)) != NULL &&
|
2020-07-22 14:14:02 +02:00
|
|
|
(obj = find_object(impl, atoi(str), PW_TYPE_INTERFACE_Node)) != NULL)
|
2020-07-15 14:20:46 +02:00
|
|
|
innode = (struct sm_node*)obj;
|
2020-07-22 14:14:02 +02:00
|
|
|
|
2020-07-15 14:20:46 +02:00
|
|
|
if (innode == NULL || outnode == NULL)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
spa_list_for_each_safe(l, t, &impl->link_list, link) {
|
|
|
|
|
if (l->output_node == outnode->obj.id && l->input_node == innode->obj.id) {
|
|
|
|
|
pw_proxy_destroy(l->proxy);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2020-12-30 13:12:48 +01:00
|
|
|
int sm_media_session_load_conf(struct sm_media_session *sess, const char *name,
|
|
|
|
|
struct pw_properties *conf)
|
|
|
|
|
{
|
2021-10-15 10:53:32 +10:00
|
|
|
struct impl *impl = SPA_CONTAINER_OF(sess, struct impl, this);
|
|
|
|
|
return pw_conf_load_conf(impl->config_dir, name, conf);
|
2020-08-13 11:31:57 +02:00
|
|
|
}
|
2020-11-25 16:13:20 +01:00
|
|
|
|
2020-08-13 11:31:57 +02:00
|
|
|
int sm_media_session_load_state(struct sm_media_session *sess,
|
2021-03-05 09:23:19 +01:00
|
|
|
const char *name, struct pw_properties *props)
|
2020-08-13 11:31:57 +02:00
|
|
|
{
|
2021-02-11 17:54:21 +01:00
|
|
|
return pw_conf_load_state(SESSION_PREFIX, name, props);
|
2020-08-13 11:31:57 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int sm_media_session_save_state(struct sm_media_session *sess,
|
2021-03-05 09:23:19 +01:00
|
|
|
const char *name, const struct pw_properties *props)
|
2020-08-13 11:31:57 +02:00
|
|
|
{
|
2021-02-11 17:54:21 +01:00
|
|
|
return pw_conf_save_state(SESSION_PREFIX, name, props);
|
2020-08-13 11:31:57 +02:00
|
|
|
}
|
|
|
|
|
|
2021-02-12 20:47:41 +01:00
|
|
|
char *sm_media_session_sanitize_name(char *name, int size, char sub, const char *fmt, ...)
|
|
|
|
|
{
|
|
|
|
|
char *p;
|
|
|
|
|
va_list varargs;
|
2021-08-05 12:53:05 +02:00
|
|
|
int res;
|
2021-02-12 20:47:41 +01:00
|
|
|
|
|
|
|
|
va_start(varargs, fmt);
|
2021-08-05 12:53:05 +02:00
|
|
|
res = vsnprintf(name, size, fmt, varargs);
|
2021-02-12 20:47:41 +01:00
|
|
|
va_end(varargs);
|
|
|
|
|
|
2021-08-05 12:53:05 +02:00
|
|
|
if (res < 0)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
2021-02-12 20:47:41 +01:00
|
|
|
for (p = name; *p; p++) {
|
|
|
|
|
switch(*p) {
|
|
|
|
|
case '0' ... '9':
|
|
|
|
|
case 'a' ... 'z':
|
|
|
|
|
case 'A' ... 'Z':
|
|
|
|
|
case '.': case '-': case '_':
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
*p = sub;
|
|
|
|
|
break;
|
2021-02-17 13:12:22 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return name;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
char *sm_media_session_sanitize_description(char *name, int size, char sub, const char *fmt, ...)
|
|
|
|
|
{
|
|
|
|
|
char *p;
|
|
|
|
|
va_list varargs;
|
2021-08-05 12:53:05 +02:00
|
|
|
int res;
|
2021-02-17 13:12:22 +01:00
|
|
|
|
|
|
|
|
va_start(varargs, fmt);
|
2021-08-05 12:53:05 +02:00
|
|
|
res = vsnprintf(name, size, fmt, varargs);
|
2021-02-17 13:12:22 +01:00
|
|
|
va_end(varargs);
|
|
|
|
|
|
2021-08-05 12:53:05 +02:00
|
|
|
if (res < 0)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
2021-02-17 13:12:22 +01:00
|
|
|
for (p = name; *p; p++) {
|
|
|
|
|
switch(*p) {
|
|
|
|
|
case ':':
|
|
|
|
|
*p = sub;
|
|
|
|
|
break;
|
2021-02-12 20:47:41 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return name;
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-10 16:49:14 +03:00
|
|
|
int sm_media_session_seat_active_changed(struct sm_media_session *sess, bool active)
|
|
|
|
|
{
|
|
|
|
|
struct impl *impl = SPA_CONTAINER_OF(sess, struct impl, this);
|
|
|
|
|
if (active != impl->seat_active) {
|
|
|
|
|
impl->seat_active = active;
|
|
|
|
|
sm_media_session_emit_seat_active(impl, active);
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-14 16:38:40 +01:00
|
|
|
static void monitor_core_done(void *data, uint32_t id, int seq)
|
|
|
|
|
{
|
|
|
|
|
struct impl *impl = data;
|
|
|
|
|
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
if (id == 0)
|
|
|
|
|
handle_postponed_registry_events(impl, seq);
|
|
|
|
|
|
2020-01-14 16:38:40 +01:00
|
|
|
if (seq == impl->monitor_seq) {
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: monitor sync stop %d", impl, seq);
|
2020-01-14 16:38:40 +01:00
|
|
|
pw_core_set_paused(impl->policy_core, false);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static const struct pw_core_events monitor_core_events = {
|
|
|
|
|
PW_VERSION_CORE_EVENTS,
|
|
|
|
|
.done = monitor_core_done,
|
|
|
|
|
};
|
|
|
|
|
|
2019-11-14 18:35:29 +01:00
|
|
|
static int start_session(struct impl *impl)
|
|
|
|
|
{
|
2019-12-10 18:19:56 +01:00
|
|
|
impl->monitor_core = pw_context_connect(impl->this.context, NULL, 0);
|
2019-12-06 11:48:40 +01:00
|
|
|
if (impl->monitor_core == NULL) {
|
|
|
|
|
pw_log_error("can't start monitor: %m");
|
|
|
|
|
return -errno;
|
|
|
|
|
}
|
2020-01-14 16:38:40 +01:00
|
|
|
|
|
|
|
|
pw_core_add_listener(impl->monitor_core,
|
|
|
|
|
&impl->monitor_listener,
|
|
|
|
|
&monitor_core_events, impl);
|
|
|
|
|
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
impl->monitor_registry = pw_core_get_registry(impl->monitor_core,
|
|
|
|
|
PW_VERSION_REGISTRY, 0);
|
|
|
|
|
pw_registry_add_listener(impl->monitor_registry,
|
|
|
|
|
&impl->monitor_registry_listener,
|
|
|
|
|
&monitor_registry_events, impl);
|
|
|
|
|
|
2019-11-14 18:35:29 +01:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-09 15:52:53 +01:00
|
|
|
static void core_info(void *data, const struct pw_core_info *info)
|
|
|
|
|
{
|
|
|
|
|
struct impl *impl = data;
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: info", impl);
|
2021-09-03 13:26:15 +02:00
|
|
|
impl->this.info = pw_core_info_merge(impl->this.info, info, true);
|
2020-01-09 15:52:53 +01:00
|
|
|
|
|
|
|
|
if (impl->this.info->change_mask != 0)
|
|
|
|
|
sm_media_session_emit_info(impl, impl->this.info);
|
|
|
|
|
impl->this.info->change_mask = 0;
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-14 18:35:29 +01:00
|
|
|
static void core_done(void *data, uint32_t id, int seq)
|
|
|
|
|
{
|
|
|
|
|
struct impl *impl = data;
|
2019-11-18 13:10:21 +01:00
|
|
|
struct sync *s, *t;
|
2019-11-15 12:07:26 +01:00
|
|
|
impl->last_seq = seq;
|
2019-11-18 13:10:21 +01:00
|
|
|
|
|
|
|
|
spa_list_for_each_safe(s, t, &impl->sync_list, link) {
|
|
|
|
|
if (s->seq == seq) {
|
|
|
|
|
spa_list_remove(&s->link);
|
|
|
|
|
s->callback(s->data);
|
|
|
|
|
free(s);
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-11-15 12:07:26 +01:00
|
|
|
if (impl->rescan_seq == seq) {
|
2020-01-07 16:07:51 +01:00
|
|
|
struct sm_object *obj, *to;
|
|
|
|
|
|
2020-11-03 20:25:51 +01:00
|
|
|
if (!impl->scanning) {
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_trace("%p: rescan %u %d", impl, id, seq);
|
2020-11-03 20:25:51 +01:00
|
|
|
impl->scanning = true;
|
|
|
|
|
sm_media_session_emit_rescan(impl, seq);
|
|
|
|
|
impl->scanning = false;
|
|
|
|
|
if (impl->rescan_pending) {
|
|
|
|
|
impl->rescan_pending = false;
|
|
|
|
|
sm_media_session_schedule_rescan(&impl->this);
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-01-07 16:07:51 +01:00
|
|
|
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
spa_list_for_each_safe(obj, to, &impl->object_list, link) {
|
|
|
|
|
if (obj->id == SPA_ID_INVALID)
|
|
|
|
|
continue;
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_trace("%p: obj %p %08x", impl, obj, obj->changed);
|
2020-01-07 16:07:51 +01:00
|
|
|
if (obj->changed)
|
|
|
|
|
sm_object_emit_update(obj);
|
|
|
|
|
obj->changed = 0;
|
|
|
|
|
}
|
2019-11-15 12:07:26 +01:00
|
|
|
}
|
2019-11-14 18:35:29 +01:00
|
|
|
}
|
|
|
|
|
|
2019-12-06 11:48:40 +01:00
|
|
|
static void core_error(void *data, uint32_t id, int seq, int res, const char *message)
|
2019-11-14 18:35:29 +01:00
|
|
|
{
|
2019-12-06 11:48:40 +01:00
|
|
|
struct impl *impl = data;
|
2019-11-14 18:35:29 +01:00
|
|
|
|
2021-09-01 10:48:57 +02:00
|
|
|
pw_log(res == -ENOENT || res == -EINVAL ? SPA_LOG_LEVEL_INFO : SPA_LOG_LEVEL_WARN,
|
2020-11-29 16:21:14 +01:00
|
|
|
"error id:%u seq:%d res:%d (%s): %s",
|
2019-12-06 11:48:40 +01:00
|
|
|
id, seq, res, spa_strerror(res), message);
|
2019-11-14 18:35:29 +01:00
|
|
|
|
2020-06-01 18:14:53 +02:00
|
|
|
if (id == PW_ID_CORE) {
|
2019-12-19 13:39:05 +01:00
|
|
|
if (res == -EPIPE)
|
|
|
|
|
pw_main_loop_quit(impl->loop);
|
2019-11-14 18:35:29 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-08-02 11:25:27 +02:00
|
|
|
|
2020-01-14 16:38:40 +01:00
|
|
|
static const struct pw_core_events policy_core_events = {
|
2019-12-11 07:46:59 +01:00
|
|
|
PW_VERSION_CORE_EVENTS,
|
2020-01-09 15:52:53 +01:00
|
|
|
.info = core_info,
|
2019-12-06 11:48:40 +01:00
|
|
|
.done = core_done,
|
|
|
|
|
.error = core_error
|
2019-11-28 11:13:53 +01:00
|
|
|
};
|
|
|
|
|
|
2019-12-13 11:27:23 +01:00
|
|
|
static void policy_core_destroy(void *data)
|
|
|
|
|
{
|
|
|
|
|
struct impl *impl = data;
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_debug("%p: policy core destroy", impl);
|
2019-12-13 11:27:23 +01:00
|
|
|
impl->policy_core = NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static const struct pw_proxy_events proxy_core_events = {
|
|
|
|
|
PW_VERSION_PROXY_EVENTS,
|
|
|
|
|
.destroy = policy_core_destroy,
|
|
|
|
|
};
|
|
|
|
|
|
2019-12-06 11:48:40 +01:00
|
|
|
static int start_policy(struct impl *impl)
|
2018-08-02 11:25:27 +02:00
|
|
|
{
|
2019-12-10 18:19:56 +01:00
|
|
|
impl->policy_core = pw_context_connect(impl->this.context, NULL, 0);
|
2019-12-06 11:48:40 +01:00
|
|
|
if (impl->policy_core == NULL) {
|
|
|
|
|
pw_log_error("can't start policy: %m");
|
|
|
|
|
return -errno;
|
|
|
|
|
}
|
2018-09-26 13:22:21 +02:00
|
|
|
|
2019-12-11 07:46:59 +01:00
|
|
|
pw_core_add_listener(impl->policy_core,
|
2019-12-18 12:15:03 +01:00
|
|
|
&impl->policy_listener,
|
2020-01-14 16:38:40 +01:00
|
|
|
&policy_core_events, impl);
|
2019-12-13 11:27:23 +01:00
|
|
|
pw_proxy_add_listener((struct pw_proxy*)impl->policy_core,
|
2019-12-18 12:15:03 +01:00
|
|
|
&impl->proxy_policy_listener,
|
|
|
|
|
&proxy_core_events, impl);
|
|
|
|
|
|
2019-12-11 09:44:48 +01:00
|
|
|
impl->registry = pw_core_get_registry(impl->policy_core,
|
2019-12-18 12:15:03 +01:00
|
|
|
PW_VERSION_REGISTRY, 0);
|
2019-12-11 09:44:48 +01:00
|
|
|
pw_registry_add_listener(impl->registry,
|
2019-12-18 12:15:03 +01:00
|
|
|
&impl->registry_listener,
|
|
|
|
|
®istry_events, impl);
|
2018-08-02 11:25:27 +02:00
|
|
|
|
2019-12-18 12:15:03 +01:00
|
|
|
return 0;
|
2018-08-02 11:25:27 +02:00
|
|
|
}
|
|
|
|
|
|
2019-12-18 12:59:42 +01:00
|
|
|
static void session_shutdown(struct impl *impl)
|
|
|
|
|
{
|
|
|
|
|
struct sm_object *obj;
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
struct registry_event *re;
|
2021-03-16 19:45:19 +02:00
|
|
|
struct spa_list free_list;
|
2019-12-18 12:59:42 +01:00
|
|
|
|
2021-09-21 18:25:54 +10:00
|
|
|
pw_log_info("%p", impl);
|
2020-12-01 12:03:28 +01:00
|
|
|
sm_media_session_emit_shutdown(impl);
|
2020-06-01 18:14:53 +02:00
|
|
|
|
2021-03-16 19:45:19 +02:00
|
|
|
/*
|
|
|
|
|
* Monitors may still hold references to objects, which they
|
|
|
|
|
* drop in session destroy event, so don't free undiscarded
|
|
|
|
|
* objects yet. Destroy event handlers may remove any objects
|
|
|
|
|
* in the list, so iterate carefully.
|
|
|
|
|
*/
|
|
|
|
|
spa_list_init(&free_list);
|
|
|
|
|
spa_list_consume(obj, &impl->object_list, link) {
|
|
|
|
|
if (obj->destroyed) {
|
|
|
|
|
spa_list_remove(&obj->link);
|
|
|
|
|
spa_list_append(&free_list, &obj->link);
|
|
|
|
|
} else {
|
|
|
|
|
sm_object_destroy_maybe_free(obj);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
spa_list_consume(re, &impl->registry_event_list, link)
|
|
|
|
|
registry_event_free(re);
|
2019-12-18 12:59:42 +01:00
|
|
|
|
2020-11-23 19:35:23 +01:00
|
|
|
impl->this.metadata = NULL;
|
|
|
|
|
|
2019-12-18 12:59:42 +01:00
|
|
|
sm_media_session_emit_destroy(impl);
|
2019-12-19 13:39:05 +01:00
|
|
|
|
2021-03-16 19:45:19 +02:00
|
|
|
spa_list_consume(obj, &free_list, link)
|
|
|
|
|
sm_object_destroy(obj);
|
|
|
|
|
spa_list_consume(obj, &impl->object_list, link)
|
|
|
|
|
sm_object_destroy(obj); /* in case emit_destroy created new objects */
|
|
|
|
|
|
2020-11-06 15:53:32 +01:00
|
|
|
if (impl->registry) {
|
|
|
|
|
spa_hook_remove(&impl->registry_listener);
|
2019-12-19 15:31:55 +01:00
|
|
|
pw_proxy_destroy((struct pw_proxy*)impl->registry);
|
2020-11-06 15:53:32 +01:00
|
|
|
}
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
if (impl->monitor_registry) {
|
|
|
|
|
spa_hook_remove(&impl->monitor_registry_listener);
|
|
|
|
|
pw_proxy_destroy((struct pw_proxy*)impl->monitor_registry);
|
|
|
|
|
}
|
2020-11-06 15:53:32 +01:00
|
|
|
if (impl->policy_core) {
|
|
|
|
|
spa_hook_remove(&impl->policy_listener);
|
|
|
|
|
spa_hook_remove(&impl->proxy_policy_listener);
|
2019-12-19 15:31:55 +01:00
|
|
|
pw_core_disconnect(impl->policy_core);
|
2020-11-06 15:53:32 +01:00
|
|
|
}
|
|
|
|
|
if (impl->monitor_core) {
|
|
|
|
|
spa_hook_remove(&impl->monitor_listener);
|
2019-12-19 15:31:55 +01:00
|
|
|
pw_core_disconnect(impl->monitor_core);
|
2020-11-06 15:53:32 +01:00
|
|
|
}
|
2020-01-09 15:52:53 +01:00
|
|
|
if (impl->this.info)
|
|
|
|
|
pw_core_info_free(impl->this.info);
|
2019-12-18 12:59:42 +01:00
|
|
|
}
|
|
|
|
|
|
2020-11-20 17:58:08 +01:00
|
|
|
static int sm_metadata_start(struct sm_media_session *sess)
|
|
|
|
|
{
|
|
|
|
|
sess->metadata = sm_media_session_export_metadata(sess, "default");
|
|
|
|
|
if (sess->metadata == NULL)
|
|
|
|
|
return -errno;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2020-10-08 11:57:35 +02:00
|
|
|
static int sm_pulse_bridge_start(struct sm_media_session *sess)
|
|
|
|
|
{
|
|
|
|
|
if (pw_context_load_module(sess->context,
|
|
|
|
|
"libpipewire-module-protocol-pulse",
|
|
|
|
|
NULL, NULL) == NULL)
|
|
|
|
|
return -errno;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2021-05-17 15:17:54 +02:00
|
|
|
static void dbus_connection_disconnected(void *data)
|
|
|
|
|
{
|
|
|
|
|
struct impl *impl = data;
|
|
|
|
|
pw_log_info("DBus disconnected");
|
|
|
|
|
sm_media_session_emit_dbus_disconnected(impl);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static const struct spa_dbus_connection_events dbus_connection_events = {
|
|
|
|
|
SPA_VERSION_DBUS_CONNECTION_EVENTS,
|
|
|
|
|
.disconnected = dbus_connection_disconnected
|
|
|
|
|
};
|
|
|
|
|
|
2020-06-01 18:14:53 +02:00
|
|
|
static void do_quit(void *data, int signal_number)
|
|
|
|
|
{
|
|
|
|
|
struct impl *impl = data;
|
|
|
|
|
pw_main_loop_quit(impl->loop);
|
|
|
|
|
}
|
|
|
|
|
|
2020-12-30 13:12:48 +01:00
|
|
|
static int collect_modules(struct impl *impl, const char *str)
|
|
|
|
|
{
|
|
|
|
|
struct spa_json it[3];
|
|
|
|
|
char key[512], value[512];
|
2021-10-14 13:52:28 +10:00
|
|
|
const char *dir, *prefix = NULL, *val;
|
2020-11-25 16:13:20 +01:00
|
|
|
char check_path[PATH_MAX];
|
|
|
|
|
struct stat statbuf;
|
2020-12-30 13:12:48 +01:00
|
|
|
int count = 0;
|
2020-11-25 16:13:20 +01:00
|
|
|
|
2021-10-14 13:52:28 +10:00
|
|
|
dir = getenv("MEDIA_SESSION_CONFIG_DIR");
|
2021-10-18 11:29:35 +10:00
|
|
|
if (dir == NULL) {
|
2021-10-14 13:52:28 +10:00
|
|
|
prefix = SESSION_PREFIX;
|
2021-10-18 11:29:35 +10:00
|
|
|
if ((dir = getenv("PIPEWIRE_CONFIG_DIR")) == NULL)
|
|
|
|
|
dir = PIPEWIRE_CONFDATADIR;
|
2021-10-14 13:52:28 +10:00
|
|
|
}
|
2020-11-25 16:13:20 +01:00
|
|
|
if (dir == NULL)
|
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
2020-12-30 13:12:48 +01:00
|
|
|
again:
|
|
|
|
|
spa_json_init(&it[0], str, strlen(str));
|
|
|
|
|
if (spa_json_enter_object(&it[0], &it[1]) < 0)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
while (spa_json_get_string(&it[1], key, sizeof(key)-1) > 0) {
|
|
|
|
|
bool add = false;
|
|
|
|
|
|
2021-06-10 17:59:53 +02:00
|
|
|
if (pw_properties_get(impl->modules, key) != NULL) {
|
2020-12-30 13:12:48 +01:00
|
|
|
add = true;
|
|
|
|
|
} else {
|
|
|
|
|
snprintf(check_path, sizeof(check_path),
|
2021-10-14 13:52:28 +10:00
|
|
|
"%s%s%s/%s", dir, prefix ? "/" : "", prefix ? prefix : "", key);
|
2020-12-30 13:12:48 +01:00
|
|
|
add = (stat(check_path, &statbuf) == 0);
|
|
|
|
|
}
|
|
|
|
|
if (add) {
|
|
|
|
|
if (spa_json_enter_array(&it[1], &it[2]) < 0)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
while (spa_json_get_string(&it[2], value, sizeof(value)-1) > 0) {
|
|
|
|
|
pw_properties_set(impl->modules, value, "true");
|
|
|
|
|
}
|
2020-11-25 16:13:20 +01:00
|
|
|
}
|
2020-12-30 13:12:48 +01:00
|
|
|
else if (spa_json_next(&it[1], &val) <= 0)
|
|
|
|
|
break;
|
2020-11-25 16:13:20 +01:00
|
|
|
}
|
2020-12-30 13:12:48 +01:00
|
|
|
/* twice to resolve groups in module list */
|
|
|
|
|
if (count++ == 0)
|
|
|
|
|
goto again;
|
|
|
|
|
|
2020-11-25 16:13:20 +01:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2020-03-30 14:09:44 +02:00
|
|
|
static const struct {
|
|
|
|
|
const char *name;
|
|
|
|
|
const char *desc;
|
|
|
|
|
int (*start)(struct sm_media_session *sess);
|
2020-07-06 12:51:38 +02:00
|
|
|
const char *props;
|
2020-03-30 14:09:44 +02:00
|
|
|
|
|
|
|
|
} modules[] = {
|
2020-07-16 12:50:33 +02:00
|
|
|
{ "flatpak", "manage flatpak access", sm_access_flatpak_start, NULL },
|
2020-07-16 17:54:18 +02:00
|
|
|
{ "portal", "manage portal permissions", sm_access_portal_start, NULL },
|
2020-07-15 14:17:00 +02:00
|
|
|
{ "metadata", "export metadata API", sm_metadata_start, NULL },
|
2020-08-13 11:32:42 +02:00
|
|
|
{ "default-nodes", "restore default nodes", sm_default_nodes_start, NULL },
|
2020-08-13 17:01:47 +02:00
|
|
|
{ "default-profile", "restore default profiles", sm_default_profile_start, NULL },
|
2020-08-17 11:17:21 +02:00
|
|
|
{ "default-routes", "restore default route", sm_default_routes_start, NULL },
|
2020-08-17 17:55:20 +02:00
|
|
|
{ "restore-stream", "restore stream settings", sm_restore_stream_start, NULL },
|
2021-01-17 00:31:47 +02:00
|
|
|
{ "streams-follow-default", "move streams when default changes", sm_streams_follow_default_start, NULL },
|
2021-08-19 12:14:37 -04:00
|
|
|
{ "alsa-no-dsp", "do not configure audio nodes in DSP mode", sm_alsa_no_dsp_start, NULL },
|
2020-07-06 12:51:38 +02:00
|
|
|
{ "alsa-seq", "alsa seq midi support", sm_alsa_midi_start, NULL },
|
2020-12-30 13:12:48 +01:00
|
|
|
{ "alsa-monitor", "alsa card udev detection", sm_alsa_monitor_start, NULL },
|
2020-07-06 12:51:38 +02:00
|
|
|
{ "v4l2", "video for linux udev detection", sm_v4l2_monitor_start, NULL },
|
|
|
|
|
{ "libcamera", "libcamera udev detection", sm_libcamera_monitor_start, NULL },
|
|
|
|
|
{ "bluez5", "bluetooth support", sm_bluez5_monitor_start, NULL },
|
2021-07-30 19:40:32 +03:00
|
|
|
{ "bluez5-autoswitch", "switch bluetooth profiles automatically", sm_bluez5_autoswitch_start, NULL },
|
2020-07-06 12:51:38 +02:00
|
|
|
{ "suspend-node", "suspend inactive nodes", sm_suspend_node_start, NULL },
|
|
|
|
|
{ "policy-node", "configure and link nodes", sm_policy_node_start, NULL },
|
2020-09-30 20:12:34 +02:00
|
|
|
{ "pulse-bridge", "accept pulseaudio clients", sm_pulse_bridge_start, NULL },
|
2021-04-10 16:49:14 +03:00
|
|
|
#ifdef HAVE_SYSTEMD
|
|
|
|
|
{ "logind", "systemd-logind seat support", sm_logind_start, NULL },
|
|
|
|
|
#endif
|
2020-03-30 14:09:44 +02:00
|
|
|
};
|
|
|
|
|
|
2020-12-30 13:12:48 +01:00
|
|
|
static bool is_module_enabled(struct impl *impl, const char *val)
|
2020-10-30 09:54:18 +01:00
|
|
|
{
|
2021-10-12 14:17:16 +10:00
|
|
|
return pw_properties_get_bool(impl->modules, val, false);
|
2020-12-21 05:49:16 +02:00
|
|
|
}
|
|
|
|
|
|
2021-02-19 17:48:27 +01:00
|
|
|
static void show_help(const char *name, struct impl *impl, const char *config_name)
|
2020-03-30 14:09:44 +02:00
|
|
|
{
|
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
|
|
fprintf(stdout, "%s [options]\n"
|
2021-02-19 17:48:27 +01:00
|
|
|
" -h, --help Show this help\n"
|
|
|
|
|
" --version Show version\n"
|
|
|
|
|
" -c, --config Load config (Default %s)\n",
|
|
|
|
|
name, config_name);
|
2020-03-30 14:09:44 +02:00
|
|
|
|
2021-02-19 17:48:27 +01:00
|
|
|
fprintf(stdout, "\noptions: (*=enabled)\n");
|
2020-03-30 14:09:44 +02:00
|
|
|
for (i = 0; i < SPA_N_ELEMENTS(modules); i++) {
|
2020-06-04 10:51:03 +02:00
|
|
|
fprintf(stdout, "\t %c %-15.15s: %s\n",
|
2020-12-30 13:12:48 +01:00
|
|
|
is_module_enabled(impl, modules[i].name) ? '*' : ' ',
|
2020-06-04 10:51:03 +02:00
|
|
|
modules[i].name, modules[i].desc);
|
2020-03-30 14:09:44 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-08-02 11:25:27 +02:00
|
|
|
int main(int argc, char *argv[])
|
|
|
|
|
{
|
2021-04-10 16:49:14 +03:00
|
|
|
struct impl impl = { .seat_active = true };
|
2019-11-20 16:18:46 +01:00
|
|
|
const struct spa_support *support;
|
2021-02-19 17:48:27 +01:00
|
|
|
const char *str, *config_name = SESSION_CONF;
|
|
|
|
|
bool do_show_help = false;
|
2019-11-20 16:18:46 +01:00
|
|
|
uint32_t n_support;
|
2020-03-30 14:09:44 +02:00
|
|
|
int res = 0, c;
|
|
|
|
|
static const struct option long_options[] = {
|
2020-04-02 14:34:02 +02:00
|
|
|
{ "help", no_argument, NULL, 'h' },
|
|
|
|
|
{ "version", no_argument, NULL, 'V' },
|
2021-02-19 17:48:27 +01:00
|
|
|
{ "config", required_argument, NULL, 'c' },
|
2021-09-24 08:51:28 +10:00
|
|
|
{ "verbose", no_argument, NULL, 'v' },
|
2020-04-02 14:34:02 +02:00
|
|
|
{ NULL, 0, NULL, 0}
|
2020-03-30 14:09:44 +02:00
|
|
|
};
|
|
|
|
|
size_t i;
|
|
|
|
|
const struct spa_dict_item *item;
|
2021-09-24 08:51:28 +10:00
|
|
|
enum spa_log_level level = pw_log_level;
|
2021-10-14 13:52:28 +10:00
|
|
|
const char *config_dir;
|
2018-08-02 11:25:27 +02:00
|
|
|
|
|
|
|
|
pw_init(&argc, &argv);
|
|
|
|
|
|
2021-09-30 10:38:18 +10:00
|
|
|
PW_LOG_TOPIC_INIT(ms_topic);
|
2021-09-21 18:25:54 +10:00
|
|
|
|
2021-09-24 08:51:28 +10:00
|
|
|
while ((c = getopt_long(argc, argv, "hVc:v", long_options, NULL)) != -1) {
|
2021-02-19 17:48:27 +01:00
|
|
|
switch (c) {
|
2021-09-24 08:51:28 +10:00
|
|
|
case 'v':
|
|
|
|
|
if (level < SPA_LOG_LEVEL_TRACE)
|
2021-09-25 07:21:01 +10:00
|
|
|
pw_log_set_level(++level);
|
2021-09-24 08:51:28 +10:00
|
|
|
break;
|
2021-02-19 17:48:27 +01:00
|
|
|
case 'h':
|
|
|
|
|
do_show_help = true;
|
|
|
|
|
break;
|
|
|
|
|
case 'V':
|
|
|
|
|
fprintf(stdout, "%s\n"
|
|
|
|
|
"Compiled with libpipewire %s\n"
|
|
|
|
|
"Linked with libpipewire %s\n",
|
|
|
|
|
argv[0],
|
|
|
|
|
pw_get_headers_version(),
|
|
|
|
|
pw_get_library_version());
|
|
|
|
|
return 0;
|
|
|
|
|
case 'c':
|
|
|
|
|
config_name = optarg;
|
|
|
|
|
break;
|
|
|
|
|
default:
|
2021-09-30 10:41:26 +10:00
|
|
|
return 1;
|
2021-02-19 17:48:27 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-10-14 13:52:28 +10:00
|
|
|
config_dir = getenv("MEDIA_SESSION_CONFIG_DIR");
|
2021-10-15 10:53:32 +10:00
|
|
|
impl.config_dir = config_dir ? config_dir : SESSION_PREFIX;
|
2021-01-19 12:17:29 +01:00
|
|
|
impl.this.props = pw_properties_new(
|
2021-10-15 10:53:32 +10:00
|
|
|
PW_KEY_CONFIG_PREFIX, impl.config_dir,
|
2021-02-19 17:48:27 +01:00
|
|
|
PW_KEY_CONFIG_NAME, config_name,
|
2021-01-19 12:17:29 +01:00
|
|
|
NULL);
|
2020-12-31 13:06:17 +01:00
|
|
|
if (impl.this.props == NULL)
|
2021-09-30 10:41:26 +10:00
|
|
|
return 1;
|
2020-12-31 11:46:54 +01:00
|
|
|
|
2020-12-30 13:12:48 +01:00
|
|
|
if ((impl.conf = pw_properties_new(NULL, NULL)) == NULL)
|
2021-09-30 10:41:26 +10:00
|
|
|
return 1;
|
2021-02-10 19:57:35 +01:00
|
|
|
|
2021-10-18 11:18:22 +10:00
|
|
|
pw_conf_load_conf(impl.config_dir, config_name, impl.conf);
|
2021-02-10 19:57:35 +01:00
|
|
|
|
2021-02-12 17:28:52 +01:00
|
|
|
if ((str = pw_properties_get(impl.conf, "context.properties")) != NULL)
|
2020-12-31 13:06:17 +01:00
|
|
|
pw_properties_update_string(impl.this.props, str, strlen(str));
|
2020-12-21 05:49:16 +02:00
|
|
|
|
2020-12-30 13:12:48 +01:00
|
|
|
if ((impl.modules = pw_properties_new("default", "true", NULL)) == NULL)
|
2021-09-30 10:41:26 +10:00
|
|
|
return 1;
|
2021-02-11 17:54:21 +01:00
|
|
|
if ((str = pw_properties_get(impl.conf, "session.modules")) != NULL)
|
2020-12-30 13:12:48 +01:00
|
|
|
collect_modules(&impl, str);
|
2020-11-25 16:13:20 +01:00
|
|
|
|
2021-02-19 17:48:27 +01:00
|
|
|
if (do_show_help) {
|
|
|
|
|
show_help(argv[0], &impl, config_name);
|
|
|
|
|
return 0;
|
2020-03-30 14:09:44 +02:00
|
|
|
}
|
|
|
|
|
|
2021-09-30 08:36:01 +10:00
|
|
|
pw_log_info("media-session context properties:");
|
2020-07-06 12:51:38 +02:00
|
|
|
spa_dict_for_each(item, &impl.this.props->dict)
|
2020-03-30 14:09:44 +02:00
|
|
|
pw_log_info(" '%s' = '%s'", item->key, item->value);
|
|
|
|
|
|
2019-11-20 16:18:46 +01:00
|
|
|
impl.loop = pw_main_loop_new(NULL);
|
2020-01-16 16:19:09 +01:00
|
|
|
if (impl.loop == NULL)
|
2021-09-30 10:41:26 +10:00
|
|
|
return 1;
|
2019-11-20 16:18:46 +01:00
|
|
|
impl.this.loop = pw_main_loop_get_loop(impl.loop);
|
2020-06-01 18:14:53 +02:00
|
|
|
|
|
|
|
|
pw_loop_add_signal(impl.this.loop, SIGINT, do_quit, &impl);
|
|
|
|
|
pw_loop_add_signal(impl.this.loop, SIGTERM, do_quit, &impl);
|
|
|
|
|
|
2020-10-21 16:36:37 +02:00
|
|
|
impl.this.context = pw_context_new(impl.this.loop,
|
2021-01-19 12:17:29 +01:00
|
|
|
pw_properties_copy(impl.this.props),
|
2020-10-21 16:36:37 +02:00
|
|
|
0);
|
|
|
|
|
|
2020-01-16 16:19:09 +01:00
|
|
|
if (impl.this.context == NULL)
|
2021-09-30 10:41:26 +10:00
|
|
|
return 1;
|
2018-08-02 11:25:27 +02:00
|
|
|
|
2019-12-16 10:28:18 +01:00
|
|
|
pw_context_set_object(impl.this.context, SM_TYPE_MEDIA_SESSION, &impl);
|
|
|
|
|
|
2019-11-14 18:35:29 +01:00
|
|
|
pw_map_init(&impl.globals, 64, 64);
|
media-session: deal with global id race conditions
To resolve monitor and policy core global ids racing with each other,
use separate registry event handlers for both cores. Each handles only
their own objects, determined by where the object handle was created.
Postpone handling of policy core new global events after monitor sync,
which orders them after the corresponding monitor proxy and registry
events. Monitor core is then more up-to-date, so we resolve id clashes
in favor of monitor globals, which avoids duplicate objects.
Fix use-after-free by tracking whether a monitor holds references to
sm_object. Keep also objects pending for id in a list, so that they can
be cleaned up on session_shutdown (monitors may leak objects at
shutdown, because spa objectinfo events won't be handled then).
Caveats:
Zombie objects may still created if policy core is late by several
events, but in those cases the corresponding remove events are already
in the queue.
Also, there's a (theoretical) possibility that pw_registry_bind will
bind the wrong object, if the registry event is handled too late and an
id is reused by the server.
For details, see reverted 77e4fdb1e485681635b282579d1c8d26f828a0a6
for which this is a another approach.
2021-02-27 14:55:13 +02:00
|
|
|
spa_list_init(&impl.object_list);
|
|
|
|
|
spa_list_init(&impl.registry_event_list);
|
2020-07-15 14:19:25 +02:00
|
|
|
spa_list_init(&impl.link_list);
|
2019-11-15 17:13:45 +01:00
|
|
|
pw_map_init(&impl.endpoint_links, 64, 64);
|
|
|
|
|
spa_list_init(&impl.endpoint_link_list);
|
2019-11-18 13:10:21 +01:00
|
|
|
spa_list_init(&impl.sync_list);
|
2019-11-14 18:35:29 +01:00
|
|
|
spa_hook_list_init(&impl.hooks);
|
2018-08-02 11:25:27 +02:00
|
|
|
|
2019-12-10 18:19:56 +01:00
|
|
|
support = pw_context_get_support(impl.this.context, &n_support);
|
2019-11-20 16:18:46 +01:00
|
|
|
|
2021-02-12 17:55:56 +01:00
|
|
|
impl.dbus = spa_support_find(support, n_support, SPA_TYPE_INTERFACE_DBus);
|
|
|
|
|
if (impl.dbus) {
|
|
|
|
|
impl.this.dbus_connection = spa_dbus_get_connection(impl.dbus, SPA_DBUS_TYPE_SESSION);
|
2021-02-09 17:48:01 +01:00
|
|
|
if (impl.this.dbus_connection == NULL)
|
|
|
|
|
pw_log_warn("no dbus connection");
|
2021-05-17 15:17:54 +02:00
|
|
|
else {
|
2021-02-09 17:48:01 +01:00
|
|
|
pw_log_debug("got dbus connection %p", impl.this.dbus_connection);
|
2021-05-17 15:17:54 +02:00
|
|
|
spa_dbus_connection_add_listener(impl.this.dbus_connection,
|
|
|
|
|
&impl.dbus_connection_listener,
|
|
|
|
|
&dbus_connection_events, &impl);
|
|
|
|
|
}
|
2021-02-12 17:55:56 +01:00
|
|
|
} else {
|
2021-04-28 20:29:44 +02:00
|
|
|
pw_log_info("dbus disabled");
|
2021-02-09 17:48:01 +01:00
|
|
|
}
|
2019-11-20 16:18:46 +01:00
|
|
|
|
2019-12-18 12:15:03 +01:00
|
|
|
if ((res = start_session(&impl)) < 0)
|
|
|
|
|
goto exit;
|
|
|
|
|
if ((res = start_policy(&impl)) < 0)
|
|
|
|
|
goto exit;
|
2019-11-03 10:31:14 +01:00
|
|
|
|
2020-03-30 14:09:44 +02:00
|
|
|
for (i = 0; i < SPA_N_ELEMENTS(modules); i++) {
|
|
|
|
|
const char *name = modules[i].name;
|
2020-12-30 13:12:48 +01:00
|
|
|
if (is_module_enabled(&impl, name)) {
|
2021-09-30 08:36:01 +10:00
|
|
|
pw_log_info("enabling media session module: %s", name);
|
2020-03-30 14:09:44 +02:00
|
|
|
modules[i].start(&impl.this);
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-01-07 16:07:51 +01:00
|
|
|
|
|
|
|
|
// sm_session_manager_start(&impl.this);
|
|
|
|
|
|
2019-11-20 16:18:46 +01:00
|
|
|
pw_main_loop_run(impl.loop);
|
2019-12-18 12:59:42 +01:00
|
|
|
|
2019-12-18 12:15:03 +01:00
|
|
|
exit:
|
2019-12-18 12:59:42 +01:00
|
|
|
session_shutdown(&impl);
|
2018-08-02 11:25:27 +02:00
|
|
|
|
2019-12-10 18:19:56 +01:00
|
|
|
pw_context_destroy(impl.this.context);
|
2019-11-20 16:18:46 +01:00
|
|
|
pw_main_loop_destroy(impl.loop);
|
2018-08-02 11:25:27 +02:00
|
|
|
|
2019-12-18 12:15:03 +01:00
|
|
|
pw_map_clear(&impl.endpoint_links);
|
|
|
|
|
pw_map_clear(&impl.globals);
|
2020-06-04 17:41:01 +02:00
|
|
|
pw_properties_free(impl.this.props);
|
2020-12-30 13:12:48 +01:00
|
|
|
pw_properties_free(impl.conf);
|
|
|
|
|
pw_properties_free(impl.modules);
|
2019-12-18 12:15:03 +01:00
|
|
|
|
2020-06-02 17:37:45 +02:00
|
|
|
pw_deinit();
|
|
|
|
|
|
2019-12-18 12:15:03 +01:00
|
|
|
return res;
|
2018-08-02 11:25:27 +02:00
|
|
|
}
|