mirror of
				https://gitlab.freedesktop.org/pipewire/pipewire.git
				synced 2025-10-29 05:40:27 -04:00 
			
		
		
		
	buffer: don't use ringbuffer in chunk
We can't use a ringbuffer on the chunk because it implies the consumer would write to it to update the read position, which we can't do because the chunk is read-only and might even be shared. Go back to offset/size pairs, which can sortof do the same thing if we want later when we keep a non-shared read pointer in the consumer. Keep alsa timestamp around and filled state for future. mmap the input port meta/data/chunk as read-only. Only do clock update requests when asked.
This commit is contained in:
		
							parent
							
								
									2ad722b579
								
							
						
					
					
						commit
						4288a634f4
					
				
					 25 changed files with 165 additions and 126 deletions
				
			
		|  | @ -25,7 +25,6 @@ extern "C" { | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| #include <spa/utils/defs.h> | #include <spa/utils/defs.h> | ||||||
| #include <spa/utils/ringbuffer.h> |  | ||||||
| #include <spa/buffer/meta.h> | #include <spa/buffer/meta.h> | ||||||
| #include <spa/support/type-map.h> | #include <spa/support/type-map.h> | ||||||
| 
 | 
 | ||||||
|  | @ -65,8 +64,12 @@ static inline void spa_type_data_map(struct spa_type_map *map, struct spa_type_d | ||||||
| 
 | 
 | ||||||
| /** Chunk of memory */ | /** Chunk of memory */ | ||||||
| struct spa_chunk { | struct spa_chunk { | ||||||
| 	struct spa_ringbuffer area;	/**< ringbuffer with valid memory */ | 	uint32_t offset;		/**< offset of valid data. Should be taken
 | ||||||
| 	int32_t stride;			/**< stride of ringbuffer increment */ | 					  *  modulo the data maxsize to get the offset | ||||||
|  | 					  *  in the data memory. */ | ||||||
|  | 	uint32_t size;			/**< size of valid data. Should be clamped to
 | ||||||
|  | 					  *  maxsize. */ | ||||||
|  | 	int32_t stride;			/**< stride of valid data */ | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| /** Data for a buffer */ | /** Data for a buffer */ | ||||||
|  |  | ||||||
|  | @ -25,7 +25,6 @@ extern "C" { | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| #include <spa/utils/defs.h> | #include <spa/utils/defs.h> | ||||||
| #include <spa/utils/ringbuffer.h> |  | ||||||
| #include <spa/support/type-map.h> | #include <spa/support/type-map.h> | ||||||
| 
 | 
 | ||||||
| /** \page page_meta Metadata
 | /** \page page_meta Metadata
 | ||||||
|  |  | ||||||
|  | @ -102,8 +102,8 @@ int spa_debug_buffer(const struct spa_buffer *buffer) | ||||||
| 		fprintf(stderr, "   offset:  %d\n", d->mapoffset); | 		fprintf(stderr, "   offset:  %d\n", d->mapoffset); | ||||||
| 		fprintf(stderr, "   maxsize: %u\n", d->maxsize); | 		fprintf(stderr, "   maxsize: %u\n", d->maxsize); | ||||||
| 		fprintf(stderr, "   chunk:   %p\n", d->chunk); | 		fprintf(stderr, "   chunk:   %p\n", d->chunk); | ||||||
| 		fprintf(stderr, "    read:   %d\n", d->chunk->area.readindex); | 		fprintf(stderr, "    offset: %d\n", d->chunk->offset); | ||||||
| 		fprintf(stderr, "    write:  %u\n", d->chunk->area.writeindex); | 		fprintf(stderr, "    size:   %u\n", d->chunk->size); | ||||||
| 		fprintf(stderr, "    stride: %d\n", d->chunk->stride); | 		fprintf(stderr, "    stride: %d\n", d->chunk->stride); | ||||||
| 	} | 	} | ||||||
| 	return 0; | 	return 0; | ||||||
|  |  | ||||||
|  | @ -316,12 +316,29 @@ static int set_swparams(struct state *state) | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline void try_pull(struct state *state, snd_pcm_uframes_t frames, bool do_pull) | static inline void calc_timeout(size_t target, size_t current, | ||||||
|  | 				size_t rate, snd_htimestamp_t *now, | ||||||
|  | 				struct timespec *ts) | ||||||
|  | { | ||||||
|  | 	ts->tv_sec = now->tv_sec; | ||||||
|  | 	ts->tv_nsec = now->tv_nsec; | ||||||
|  | 	if (target > current) | ||||||
|  | 		ts->tv_nsec += ((target - current) * SPA_NSEC_PER_SEC) / rate; | ||||||
|  | 
 | ||||||
|  | 	while (ts->tv_nsec >= SPA_NSEC_PER_SEC) { | ||||||
|  | 		ts->tv_sec++; | ||||||
|  | 		ts->tv_nsec -= SPA_NSEC_PER_SEC; | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static inline void try_pull(struct state *state, snd_pcm_uframes_t frames, | ||||||
|  | 		snd_pcm_uframes_t written, bool do_pull) | ||||||
| { | { | ||||||
| 	struct spa_port_io *io = state->io; | 	struct spa_port_io *io = state->io; | ||||||
| 
 | 
 | ||||||
| 	if (spa_list_is_empty(&state->ready) && do_pull) { | 	if (spa_list_is_empty(&state->ready) && do_pull) { | ||||||
| 		spa_log_trace(state->log, "alsa-util %p: %d", state, io->status); | 		spa_log_trace(state->log, "alsa-util %p: %d %lu", state, io->status, | ||||||
|  | 				state->filled + written); | ||||||
| 		io->status = SPA_STATUS_NEED_BUFFER; | 		io->status = SPA_STATUS_NEED_BUFFER; | ||||||
| 		io->range.offset = state->sample_count * state->frame_size; | 		io->range.offset = state->sample_count * state->frame_size; | ||||||
| 		io->range.min_size = state->threshold * state->frame_size; | 		io->range.min_size = state->threshold * state->frame_size; | ||||||
|  | @ -340,47 +357,53 @@ pull_frames(struct state *state, | ||||||
| 	snd_pcm_uframes_t total_frames = 0, to_write = SPA_MIN(frames, state->props.max_latency); | 	snd_pcm_uframes_t total_frames = 0, to_write = SPA_MIN(frames, state->props.max_latency); | ||||||
| 	bool underrun = false; | 	bool underrun = false; | ||||||
| 
 | 
 | ||||||
| 	try_pull(state, frames, do_pull); | 	try_pull(state, frames, 0, do_pull); | ||||||
| 
 | 
 | ||||||
| 	while (!spa_list_is_empty(&state->ready) && to_write > 0) { | 	while (!spa_list_is_empty(&state->ready) && to_write > 0) { | ||||||
| 		uint8_t *dst; | 		uint8_t *dst, *src; | ||||||
| 		size_t n_bytes, n_frames; | 		size_t n_bytes, n_frames; | ||||||
| 		struct buffer *b; | 		struct buffer *b; | ||||||
| 		struct spa_data *d; | 		struct spa_data *d; | ||||||
| 		struct spa_ringbuffer *ringbuffer; | 		uint32_t index, offs, avail, l0, l1; | ||||||
| 		uint32_t index; |  | ||||||
| 		int32_t avail; |  | ||||||
| 
 | 
 | ||||||
| 		b = spa_list_first(&state->ready, struct buffer, link); | 		b = spa_list_first(&state->ready, struct buffer, link); | ||||||
| 		d = b->outbuf->datas; | 		d = b->outbuf->datas; | ||||||
| 
 | 
 | ||||||
| 		dst = SPA_MEMBER(my_areas[0].addr, offset * state->frame_size, uint8_t); | 		dst = SPA_MEMBER(my_areas[0].addr, offset * state->frame_size, uint8_t); | ||||||
|  | 		src = d[0].data; | ||||||
| 
 | 
 | ||||||
| 		ringbuffer = &d[0].chunk->area; | 		index = d[0].chunk->offset + state->ready_offset; | ||||||
| 
 | 		avail = d[0].chunk->size - state->ready_offset; | ||||||
| 		avail = spa_ringbuffer_get_read_index(ringbuffer, &index); |  | ||||||
| 		avail /= state->frame_size; | 		avail /= state->frame_size; | ||||||
| 
 | 
 | ||||||
| 		n_frames = SPA_MIN(avail, to_write); | 		n_frames = SPA_MIN(avail, to_write); | ||||||
| 		n_bytes = n_frames * state->frame_size; | 		n_bytes = n_frames * state->frame_size; | ||||||
| 
 | 
 | ||||||
| 		spa_ringbuffer_read_data(ringbuffer, d[0].data, d[0].maxsize, | 		offs = index % d[0].maxsize; | ||||||
| 					 index % d[0].maxsize, dst, n_bytes); | 		l0 = SPA_MIN(n_bytes, d[0].maxsize - offs); | ||||||
| 		spa_ringbuffer_read_update(ringbuffer, index + n_bytes); | 		l1 = n_bytes - l0; | ||||||
| 
 | 
 | ||||||
| 		if (avail == n_frames || state->n_buffers == 1) { | 		memcpy(dst, src + offs, l0); | ||||||
|  | 		if (l1 > 0) | ||||||
|  | 			memcpy(dst + l0, src, l1); | ||||||
|  | 
 | ||||||
|  | 		state->ready_offset += n_bytes; | ||||||
|  | 
 | ||||||
|  | 		if (state->ready_offset >= d[0].chunk->size) { | ||||||
| 			spa_list_remove(&b->link); | 			spa_list_remove(&b->link); | ||||||
| 			b->outstanding = true; | 			b->outstanding = true; | ||||||
| 			spa_log_trace(state->log, "alsa-util %p: reuse buffer %u", state, b->outbuf->id); | 			spa_log_trace(state->log, "alsa-util %p: reuse buffer %u", state, b->outbuf->id); | ||||||
| 			state->callbacks->reuse_buffer(state->callbacks_data, 0, b->outbuf->id); | 			state->callbacks->reuse_buffer(state->callbacks_data, 0, b->outbuf->id); | ||||||
|  | 			state->ready_offset = 0; | ||||||
| 		} | 		} | ||||||
| 		total_frames += n_frames; | 		total_frames += n_frames; | ||||||
| 		to_write -= n_frames; | 		to_write -= n_frames; | ||||||
| 
 | 
 | ||||||
| 		spa_log_trace(state->log, "alsa-util %p: %u written %lu frames, left %ld", state, index, total_frames, to_write); | 		spa_log_trace(state->log, "alsa-util %p: written %lu frames, left %ld", | ||||||
|  | 				state, total_frames, to_write); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	try_pull(state, frames, do_pull); | 	try_pull(state, frames, total_frames, do_pull); | ||||||
| 
 | 
 | ||||||
| 	if (total_frames == 0 && do_pull) { | 	if (total_frames == 0 && do_pull) { | ||||||
| 		total_frames = SPA_MIN(frames, state->threshold); | 		total_frames = SPA_MIN(frames, state->threshold); | ||||||
|  | @ -414,8 +437,7 @@ push_frames(struct state *state, | ||||||
| 		size_t n_bytes; | 		size_t n_bytes; | ||||||
| 		struct buffer *b; | 		struct buffer *b; | ||||||
| 		struct spa_data *d; | 		struct spa_data *d; | ||||||
| 		uint32_t index, avail; | 		uint32_t index, offs, avail, l0, l1; | ||||||
| 		int32_t filled; |  | ||||||
| 
 | 
 | ||||||
| 		b = spa_list_first(&state->free, struct buffer, link); | 		b = spa_list_first(&state->free, struct buffer, link); | ||||||
| 		spa_list_remove(&b->link); | 		spa_list_remove(&b->link); | ||||||
|  | @ -430,15 +452,21 @@ push_frames(struct state *state, | ||||||
| 
 | 
 | ||||||
| 		src = SPA_MEMBER(my_areas[0].addr, offset * state->frame_size, uint8_t); | 		src = SPA_MEMBER(my_areas[0].addr, offset * state->frame_size, uint8_t); | ||||||
| 
 | 
 | ||||||
| 		filled = spa_ringbuffer_get_write_index(&d[0].chunk->area, &index); | 		avail = d[0].maxsize / state->frame_size; | ||||||
| 		avail = (d[0].maxsize - filled) / state->frame_size; | 		index = 0; | ||||||
| 		total_frames = SPA_MIN(avail, frames); | 		total_frames = SPA_MIN(avail, frames); | ||||||
| 		n_bytes = total_frames * state->frame_size; | 		n_bytes = total_frames * state->frame_size; | ||||||
| 
 | 
 | ||||||
| 		spa_ringbuffer_write_data(&d[0].chunk->area, d[0].data, d[0].maxsize, | 		offs = index % d[0].maxsize; | ||||||
| 					index % d[0].maxsize, src, n_bytes); | 		l0 = SPA_MIN(n_bytes, d[0].maxsize - offs); | ||||||
|  | 		l1 = n_bytes - l0; | ||||||
| 
 | 
 | ||||||
| 		spa_ringbuffer_write_update(&d[0].chunk->area, index + n_bytes); | 		memcpy(src, d[0].data + offs, l0); | ||||||
|  | 		if (l1 > 0) | ||||||
|  | 			memcpy(src + l0, d[0].data, l1); | ||||||
|  | 
 | ||||||
|  | 		d[0].chunk->offset = index; | ||||||
|  | 		d[0].chunk->size = n_bytes; | ||||||
| 		d[0].chunk->stride = state->frame_size; | 		d[0].chunk->stride = state->frame_size; | ||||||
| 
 | 
 | ||||||
| 		b->outstanding = true; | 		b->outstanding = true; | ||||||
|  | @ -464,21 +492,6 @@ static int alsa_try_resume(struct state *state) | ||||||
| 	return res; | 	return res; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline void calc_timeout(size_t target, size_t current, |  | ||||||
| 				size_t rate, snd_htimestamp_t *now, |  | ||||||
| 				struct timespec *ts) |  | ||||||
| { |  | ||||||
| 	ts->tv_sec = now->tv_sec; |  | ||||||
| 	ts->tv_nsec = now->tv_nsec; |  | ||||||
| 	if (target > current) |  | ||||||
| 		ts->tv_nsec += ((target - current) * SPA_NSEC_PER_SEC) / rate; |  | ||||||
| 
 |  | ||||||
| 	while (ts->tv_nsec >= SPA_NSEC_PER_SEC) { |  | ||||||
| 		ts->tv_sec++; |  | ||||||
| 		ts->tv_nsec -= SPA_NSEC_PER_SEC; |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| static void alsa_on_playback_timeout_event(struct spa_source *source) | static void alsa_on_playback_timeout_event(struct spa_source *source) | ||||||
| { | { | ||||||
| 	uint64_t exp; | 	uint64_t exp; | ||||||
|  | @ -487,10 +500,9 @@ static void alsa_on_playback_timeout_event(struct spa_source *source) | ||||||
| 	snd_pcm_t *hndl = state->hndl; | 	snd_pcm_t *hndl = state->hndl; | ||||||
| 	snd_pcm_sframes_t avail; | 	snd_pcm_sframes_t avail; | ||||||
| 	struct itimerspec ts; | 	struct itimerspec ts; | ||||||
| 	snd_pcm_uframes_t total_written = 0, filled; | 	snd_pcm_uframes_t total_written = 0; | ||||||
| 	const snd_pcm_channel_area_t *my_areas; | 	const snd_pcm_channel_area_t *my_areas; | ||||||
| 	snd_pcm_status_t *status; | 	snd_pcm_status_t *status; | ||||||
| 	snd_htimestamp_t htstamp; |  | ||||||
| 
 | 
 | ||||||
| 	if (state->started && read(state->timerfd, &exp, sizeof(uint64_t)) != sizeof(uint64_t)) | 	if (state->started && read(state->timerfd, &exp, sizeof(uint64_t)) != sizeof(uint64_t)) | ||||||
| 		spa_log_warn(state->log, "error reading timerfd: %s", strerror(errno)); | 		spa_log_warn(state->log, "error reading timerfd: %s", strerror(errno)); | ||||||
|  | @ -503,27 +515,27 @@ static void alsa_on_playback_timeout_event(struct spa_source *source) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	avail = snd_pcm_status_get_avail(status); | 	avail = snd_pcm_status_get_avail(status); | ||||||
| 	snd_pcm_status_get_htstamp(status, &htstamp); | 	snd_pcm_status_get_htstamp(status, &state->now); | ||||||
| 
 | 
 | ||||||
| 	if (avail > state->buffer_frames) | 	if (avail > state->buffer_frames) | ||||||
| 		avail = state->buffer_frames; | 		avail = state->buffer_frames; | ||||||
| 
 | 
 | ||||||
| 	filled = state->buffer_frames - avail; | 	state->filled = state->buffer_frames - avail; | ||||||
| 
 | 
 | ||||||
| 	state->last_ticks = state->sample_count - filled; | 	state->last_ticks = state->sample_count - state->filled; | ||||||
| 	state->last_monotonic = (int64_t) htstamp.tv_sec * SPA_NSEC_PER_SEC + (int64_t) htstamp.tv_nsec; | 	state->last_monotonic = (int64_t) state->now.tv_sec * SPA_NSEC_PER_SEC + (int64_t) state->now.tv_nsec; | ||||||
| 
 | 
 | ||||||
| 	spa_log_trace(state->log, "timeout %ld %d %ld %ld %ld", filled, state->threshold, | 	spa_log_trace(state->log, "timeout %ld %d %ld %ld %ld", state->filled, state->threshold, | ||||||
| 		      state->sample_count, htstamp.tv_sec, htstamp.tv_nsec); | 		      state->sample_count, state->now.tv_sec, state->now.tv_nsec); | ||||||
| 
 | 
 | ||||||
| 	if (filled > state->threshold) { | 	if (state->filled > state->threshold) { | ||||||
| 		if (snd_pcm_state(hndl) == SND_PCM_STATE_SUSPENDED) { | 		if (snd_pcm_state(hndl) == SND_PCM_STATE_SUSPENDED) { | ||||||
| 			spa_log_error(state->log, "suspended: try resume"); | 			spa_log_error(state->log, "suspended: try resume"); | ||||||
| 			if ((res = alsa_try_resume(state)) < 0) | 			if ((res = alsa_try_resume(state)) < 0) | ||||||
| 				return; | 				return; | ||||||
| 		} | 		} | ||||||
| 	} else { | 	} else { | ||||||
| 		snd_pcm_uframes_t to_write = state->buffer_frames - filled; | 		snd_pcm_uframes_t to_write = avail; | ||||||
| 		bool do_pull = true; | 		bool do_pull = true; | ||||||
| 
 | 
 | ||||||
| 		while (total_written < to_write) { | 		while (total_written < to_write) { | ||||||
|  | @ -547,9 +559,10 @@ static void alsa_on_playback_timeout_event(struct spa_source *source) | ||||||
| 					return; | 					return; | ||||||
| 			} | 			} | ||||||
| 			total_written += written; | 			total_written += written; | ||||||
|  | 			state->sample_count += written; | ||||||
|  | 			state->filled += written; | ||||||
| 			do_pull = false; | 			do_pull = false; | ||||||
| 		} | 		} | ||||||
| 		state->sample_count += total_written; |  | ||||||
| 	} | 	} | ||||||
| 	if (!state->alsa_started && total_written > 0) { | 	if (!state->alsa_started && total_written > 0) { | ||||||
| 		spa_log_debug(state->log, "snd_pcm_start"); | 		spa_log_debug(state->log, "snd_pcm_start"); | ||||||
|  | @ -560,7 +573,7 @@ static void alsa_on_playback_timeout_event(struct spa_source *source) | ||||||
| 		state->alsa_started = true; | 		state->alsa_started = true; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	calc_timeout(total_written + filled, state->threshold, state->rate, &htstamp, &ts.it_value); | 	calc_timeout(state->filled, state->threshold, state->rate, &state->now, &ts.it_value); | ||||||
| 
 | 
 | ||||||
| 	ts.it_interval.tv_sec = 0; | 	ts.it_interval.tv_sec = 0; | ||||||
| 	ts.it_interval.tv_nsec = 0; | 	ts.it_interval.tv_nsec = 0; | ||||||
|  |  | ||||||
|  | @ -32,7 +32,6 @@ extern "C" { | ||||||
| #include <spa/support/loop.h> | #include <spa/support/loop.h> | ||||||
| #include <spa/support/log.h> | #include <spa/support/log.h> | ||||||
| #include <spa/utils/list.h> | #include <spa/utils/list.h> | ||||||
| #include <spa/utils/ringbuffer.h> |  | ||||||
| 
 | 
 | ||||||
| #include <spa/clock/clock.h> | #include <spa/clock/clock.h> | ||||||
| #include <spa/node/node.h> | #include <spa/node/node.h> | ||||||
|  | @ -150,13 +149,17 @@ struct state { | ||||||
| 	struct spa_list free; | 	struct spa_list free; | ||||||
| 	struct spa_list ready; | 	struct spa_list ready; | ||||||
| 
 | 
 | ||||||
|  | 	size_t ready_offset; | ||||||
|  | 
 | ||||||
| 	bool started; | 	bool started; | ||||||
| 	struct spa_source source; | 	struct spa_source source; | ||||||
| 	int timerfd; | 	int timerfd; | ||||||
| 	bool alsa_started; | 	bool alsa_started; | ||||||
| 	int threshold; | 	int threshold; | ||||||
| 
 | 
 | ||||||
|  | 	snd_htimestamp_t now; | ||||||
| 	int64_t sample_count; | 	int64_t sample_count; | ||||||
|  | 	int64_t filled; | ||||||
| 	int64_t last_ticks; | 	int64_t last_ticks; | ||||||
| 	int64_t last_monotonic; | 	int64_t last_monotonic; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -688,20 +688,22 @@ add_port_data(struct impl *this, void *out, size_t outsize, size_t next, struct | ||||||
| { | { | ||||||
| 	size_t insize; | 	size_t insize; | ||||||
| 	struct buffer *b; | 	struct buffer *b; | ||||||
| 	uint32_t index = 0, offset, len1, len2, maxsize; | 	uint32_t index, offset, len1, len2, maxsize; | ||||||
| 	mix_func_t mix = layer == 0 ? this->copy : this->add; | 	mix_func_t mix = layer == 0 ? this->copy : this->add; | ||||||
|  | 	struct spa_data *d; | ||||||
| 	void *data; | 	void *data; | ||||||
| 	struct spa_ringbuffer *rb; |  | ||||||
| 
 | 
 | ||||||
| 	b = spa_list_first(&port->queue, struct buffer, link); | 	b = spa_list_first(&port->queue, struct buffer, link); | ||||||
| 
 | 
 | ||||||
| 	maxsize = b->outbuf->datas[0].maxsize; | 	d = b->outbuf->datas; | ||||||
| 	data = b->outbuf->datas[0].data; |  | ||||||
| 	rb = &b->outbuf->datas[0].chunk->area, |  | ||||||
| 
 | 
 | ||||||
| 	insize = spa_ringbuffer_get_read_index(rb, &index); | 	maxsize = d[0].maxsize; | ||||||
|  | 	data = d[0].data; | ||||||
|  | 
 | ||||||
|  | 	insize = SPA_MIN(d[0].chunk->size, maxsize); | ||||||
| 	outsize = SPA_MIN(outsize, insize); | 	outsize = SPA_MIN(outsize, insize); | ||||||
| 
 | 
 | ||||||
|  | 	index = d[0].chunk->offset + (insize - port->queued_bytes); | ||||||
| 	offset = index % maxsize; | 	offset = index % maxsize; | ||||||
| 
 | 
 | ||||||
| 	len1 = SPA_MIN(outsize, maxsize - offset); | 	len1 = SPA_MIN(outsize, maxsize - offset); | ||||||
|  | @ -709,17 +711,14 @@ add_port_data(struct impl *this, void *out, size_t outsize, size_t next, struct | ||||||
| 	if ((len2 = outsize - len1) > 0) | 	if ((len2 = outsize - len1) > 0) | ||||||
| 		mix(out + len1, data, len2); | 		mix(out + len1, data, len2); | ||||||
| 
 | 
 | ||||||
| 	spa_ringbuffer_read_update(rb, index + outsize); |  | ||||||
| 
 |  | ||||||
| 	port->queued_bytes -= outsize; | 	port->queued_bytes -= outsize; | ||||||
| 
 | 
 | ||||||
| 	if (outsize == insize || next == 0) { | 	if (port->queued_bytes == 0) { | ||||||
| 		spa_log_trace(this->log, NAME " %p: return buffer %d on port %p %zd", | 		spa_log_trace(this->log, NAME " %p: return buffer %d on port %p %zd", | ||||||
| 			      this, b->outbuf->id, port, outsize); | 			      this, b->outbuf->id, port, outsize); | ||||||
| 		port->io->buffer_id = b->outbuf->id; | 		port->io->buffer_id = b->outbuf->id; | ||||||
| 		spa_list_remove(&b->link); | 		spa_list_remove(&b->link); | ||||||
| 		b->outstanding = true; | 		b->outstanding = true; | ||||||
| 		port->queued_bytes = 0; |  | ||||||
| 	} else { | 	} else { | ||||||
| 		spa_log_trace(this->log, NAME " %p: keeping buffer %d on port %p %zd %zd", | 		spa_log_trace(this->log, NAME " %p: keeping buffer %d on port %p %zd %zd", | ||||||
| 			      this, b->outbuf->id, port, port->queued_bytes, outsize); | 			      this, b->outbuf->id, port, port->queued_bytes, outsize); | ||||||
|  | @ -733,9 +732,7 @@ static int mix_output(struct impl *this, size_t n_bytes) | ||||||
| 	struct port *outport; | 	struct port *outport; | ||||||
| 	struct spa_port_io *outio; | 	struct spa_port_io *outio; | ||||||
| 	struct spa_data *od; | 	struct spa_data *od; | ||||||
| 	int32_t filled, avail, maxsize; | 	uint32_t avail, index, maxsize, len1, len2, offset; | ||||||
| 	uint32_t index = 0, len1, len2, offset; |  | ||||||
| 	struct spa_ringbuffer *rb; |  | ||||||
| 
 | 
 | ||||||
| 	outport = GET_OUT_PORT(this, 0); | 	outport = GET_OUT_PORT(this, 0); | ||||||
| 	outio = outport->io; | 	outio = outport->io; | ||||||
|  | @ -752,10 +749,8 @@ static int mix_output(struct impl *this, size_t n_bytes) | ||||||
| 	od = outbuf->outbuf->datas; | 	od = outbuf->outbuf->datas; | ||||||
| 	maxsize = od[0].maxsize; | 	maxsize = od[0].maxsize; | ||||||
| 
 | 
 | ||||||
| 	rb = &od[0].chunk->area; | 	avail = maxsize; | ||||||
| 
 | 	index = 0; | ||||||
| 	filled = spa_ringbuffer_get_write_index(rb, &index); |  | ||||||
| 	avail = maxsize - filled; |  | ||||||
| 	n_bytes = SPA_MIN(n_bytes, avail); | 	n_bytes = SPA_MIN(n_bytes, avail); | ||||||
| 
 | 
 | ||||||
| 	offset = index % maxsize; | 	offset = index % maxsize; | ||||||
|  | @ -782,7 +777,9 @@ static int mix_output(struct impl *this, size_t n_bytes) | ||||||
| 		layer++; | 		layer++; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	spa_ringbuffer_write_update(rb, index + n_bytes); | 	od[0].chunk->offset = index; | ||||||
|  | 	od[0].chunk->size = n_bytes; | ||||||
|  | 	od[0].chunk->stride = 0; | ||||||
| 
 | 
 | ||||||
| 	outio->buffer_id = outbuf->outbuf->id; | 	outio->buffer_id = outbuf->outbuf->id; | ||||||
| 	outio->status = SPA_STATUS_HAVE_BUFFER; | 	outio->status = SPA_STATUS_HAVE_BUFFER; | ||||||
|  | @ -819,8 +816,7 @@ static int impl_node_process_input(struct spa_node *node) | ||||||
| 		if (inport->queued_bytes == 0 && | 		if (inport->queued_bytes == 0 && | ||||||
| 		    inio->status == SPA_STATUS_HAVE_BUFFER && inio->buffer_id < inport->n_buffers) { | 		    inio->status == SPA_STATUS_HAVE_BUFFER && inio->buffer_id < inport->n_buffers) { | ||||||
| 			struct buffer *b = &inport->buffers[inio->buffer_id]; | 			struct buffer *b = &inport->buffers[inio->buffer_id]; | ||||||
| 			uint32_t index; | 			struct spa_data *d = b->outbuf->datas; | ||||||
| 			struct spa_ringbuffer *rb; |  | ||||||
| 
 | 
 | ||||||
| 			if (!b->outstanding) { | 			if (!b->outstanding) { | ||||||
| 				spa_log_warn(this->log, NAME " %p: buffer %u in use", this, | 				spa_log_warn(this->log, NAME " %p: buffer %u in use", this, | ||||||
|  | @ -835,8 +831,7 @@ static int impl_node_process_input(struct spa_node *node) | ||||||
| 
 | 
 | ||||||
| 			spa_list_append(&inport->queue, &b->link); | 			spa_list_append(&inport->queue, &b->link); | ||||||
| 
 | 
 | ||||||
| 			rb = &b->outbuf->datas[0].chunk->area; | 			inport->queued_bytes = SPA_MIN(d[0].chunk->size, d[0].maxsize); | ||||||
| 			inport->queued_bytes += spa_ringbuffer_get_read_index(rb, &index); |  | ||||||
| 
 | 
 | ||||||
| 			spa_log_trace(this->log, NAME " %p: queue buffer %d on port %d %zd %zd", | 			spa_log_trace(this->log, NAME " %p: queue buffer %d on port %d %zd %zd", | ||||||
| 				      this, b->outbuf->id, i, inport->queued_bytes, min_queued); | 				      this, b->outbuf->id, i, inport->queued_bytes, min_queued); | ||||||
|  |  | ||||||
|  | @ -299,7 +299,6 @@ static int make_buffer(struct impl *this) | ||||||
| 	int n_bytes, n_samples; | 	int n_bytes, n_samples; | ||||||
| 	uint32_t maxsize; | 	uint32_t maxsize; | ||||||
| 	void *data; | 	void *data; | ||||||
| 	struct spa_ringbuffer *rb; |  | ||||||
| 	struct spa_data *d; | 	struct spa_data *d; | ||||||
| 	int32_t filled, avail; | 	int32_t filled, avail; | ||||||
| 	uint32_t index, offset, l0, l1; | 	uint32_t index, offset, l0, l1; | ||||||
|  | @ -329,9 +328,8 @@ static int make_buffer(struct impl *this) | ||||||
| 	spa_log_trace(this->log, NAME " %p: dequeue buffer %d %d %d", this, b->outbuf->id, | 	spa_log_trace(this->log, NAME " %p: dequeue buffer %d %d %d", this, b->outbuf->id, | ||||||
| 		      maxsize, n_bytes); | 		      maxsize, n_bytes); | ||||||
| 
 | 
 | ||||||
| 	rb = &d[0].chunk->area; | 	filled = 0; | ||||||
| 
 | 	index = 0; | ||||||
| 	filled = spa_ringbuffer_get_write_index(rb, &index); |  | ||||||
| 	avail = maxsize - filled; | 	avail = maxsize - filled; | ||||||
| 	n_bytes = SPA_MIN(avail, n_bytes); | 	n_bytes = SPA_MIN(avail, n_bytes); | ||||||
| 
 | 
 | ||||||
|  | @ -346,7 +344,9 @@ static int make_buffer(struct impl *this) | ||||||
| 	if (l1 > 0) | 	if (l1 > 0) | ||||||
| 		this->render_func(this, data, l1); | 		this->render_func(this, data, l1); | ||||||
| 
 | 
 | ||||||
| 	spa_ringbuffer_write_update(rb, index + n_bytes); | 	d[0].chunk->offset = index; | ||||||
|  | 	d[0].chunk->size = n_bytes; | ||||||
|  | 	d[0].chunk->stride = this->bpf; | ||||||
| 
 | 
 | ||||||
| 	if (b->h) { | 	if (b->h) { | ||||||
| 		b->h->seq = this->sample_count; | 		b->h->seq = this->sample_count; | ||||||
|  |  | ||||||
|  | @ -267,7 +267,8 @@ static int consume_buffer(struct impl *this) | ||||||
| 
 | 
 | ||||||
| 	render_buffer(this, b); | 	render_buffer(this, b); | ||||||
| 
 | 
 | ||||||
| 	spa_ringbuffer_set_avail(&b->outbuf->datas[0].chunk->area, n_bytes); | 	b->outbuf->datas[0].chunk->offset = 0; | ||||||
|  | 	b->outbuf->datas[0].chunk->size = n_bytes; | ||||||
| 	b->outbuf->datas[0].chunk->stride = n_bytes; | 	b->outbuf->datas[0].chunk->stride = n_bytes; | ||||||
| 
 | 
 | ||||||
| 	if (b->h) { | 	if (b->h) { | ||||||
|  |  | ||||||
|  | @ -279,7 +279,8 @@ static int make_buffer(struct impl *this) | ||||||
| 
 | 
 | ||||||
| 	fill_buffer(this, b); | 	fill_buffer(this, b); | ||||||
| 
 | 
 | ||||||
| 	spa_ringbuffer_set_avail(&b->outbuf->datas[0].chunk->area, n_bytes); | 	b->outbuf->datas[0].chunk->offset = 0; | ||||||
|  | 	b->outbuf->datas[0].chunk->size = n_bytes; | ||||||
| 	b->outbuf->datas[0].chunk->stride = n_bytes; | 	b->outbuf->datas[0].chunk->stride = n_bytes; | ||||||
| 
 | 
 | ||||||
| 	if (b->h) { | 	if (b->h) { | ||||||
|  |  | ||||||
|  | @ -930,7 +930,8 @@ static int mmap_read(struct impl *this) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	d = b->outbuf->datas; | 	d = b->outbuf->datas; | ||||||
| 	spa_ringbuffer_set_avail(&d[0].chunk->area, buf.bytesused); | 	d[0].chunk->offset = 0; | ||||||
|  | 	d[0].chunk->size = buf.bytesused; | ||||||
| 	d[0].chunk->stride = port->fmt.fmt.pix.bytesperline; | 	d[0].chunk->stride = port->fmt.fmt.pix.bytesperline; | ||||||
| 
 | 
 | ||||||
| 	b->outstanding = true; | 	b->outstanding = true; | ||||||
|  | @ -1090,7 +1091,8 @@ mmap_init(struct impl *this, | ||||||
| 		d = buffers[i]->datas; | 		d = buffers[i]->datas; | ||||||
| 		d[0].mapoffset = 0; | 		d[0].mapoffset = 0; | ||||||
| 		d[0].maxsize = b->v4l2_buffer.length; | 		d[0].maxsize = b->v4l2_buffer.length; | ||||||
| 		spa_ringbuffer_set_avail(&d[0].chunk->area, 0); | 		d[0].chunk->offset = 0; | ||||||
|  | 		d[0].chunk->size = 0; | ||||||
| 		d[0].chunk->stride = state->fmt.fmt.pix.bytesperline; | 		d[0].chunk->stride = state->fmt.fmt.pix.bytesperline; | ||||||
| 
 | 
 | ||||||
| 		if (state->export_buf) { | 		if (state->export_buf) { | ||||||
|  |  | ||||||
|  | @ -294,7 +294,8 @@ static int make_buffer(struct impl *this) | ||||||
| 
 | 
 | ||||||
| 	fill_buffer(this, b); | 	fill_buffer(this, b); | ||||||
| 
 | 
 | ||||||
| 	spa_ringbuffer_set_avail(&b->outbuf->datas[0].chunk->area, n_bytes); | 	b->outbuf->datas[0].chunk->offset = 0; | ||||||
|  | 	b->outbuf->datas[0].chunk->size = n_bytes; | ||||||
| 	b->outbuf->datas[0].chunk->stride = this->stride; | 	b->outbuf->datas[0].chunk->stride = this->stride; | ||||||
| 
 | 
 | ||||||
| 	if (b->h) { | 	if (b->h) { | ||||||
|  |  | ||||||
|  | @ -690,7 +690,7 @@ static void do_volume(struct impl *this, struct spa_buffer *dbuf, struct spa_buf | ||||||
| 	struct spa_data *sd, *dd; | 	struct spa_data *sd, *dd; | ||||||
| 	int16_t *src, *dst; | 	int16_t *src, *dst; | ||||||
| 	double volume; | 	double volume; | ||||||
| 	uint32_t towrite, savail, davail; | 	uint32_t written, towrite, savail, davail; | ||||||
| 	uint32_t sindex, dindex; | 	uint32_t sindex, dindex; | ||||||
| 
 | 
 | ||||||
| 	volume = this->props.volume; | 	volume = this->props.volume; | ||||||
|  | @ -698,13 +698,16 @@ static void do_volume(struct impl *this, struct spa_buffer *dbuf, struct spa_buf | ||||||
| 	sd = sbuf->datas; | 	sd = sbuf->datas; | ||||||
| 	dd = dbuf->datas; | 	dd = dbuf->datas; | ||||||
| 
 | 
 | ||||||
| 	savail = spa_ringbuffer_get_read_index(&sd[0].chunk->area, &sindex); | 	savail = SPA_MIN(sd[0].chunk->size, sd[0].maxsize); | ||||||
| 	davail = spa_ringbuffer_get_write_index(&dd[0].chunk->area, &dindex); | 	sindex = sd[0].chunk->offset; | ||||||
|  | 	davail = 0; | ||||||
|  | 	dindex = 0; | ||||||
| 	davail = dd[0].maxsize - davail; | 	davail = dd[0].maxsize - davail; | ||||||
| 
 | 
 | ||||||
| 	towrite = SPA_MIN(savail, davail); | 	towrite = SPA_MIN(savail, davail); | ||||||
|  | 	written = 0; | ||||||
| 
 | 
 | ||||||
| 	while (towrite > 0) { | 	while (written < towrite) { | ||||||
| 		uint32_t soffset = sindex % sd[0].maxsize; | 		uint32_t soffset = sindex % sd[0].maxsize; | ||||||
| 		uint32_t doffset = dindex % dd[0].maxsize; | 		uint32_t doffset = dindex % dd[0].maxsize; | ||||||
| 
 | 
 | ||||||
|  | @ -720,10 +723,11 @@ static void do_volume(struct impl *this, struct spa_buffer *dbuf, struct spa_buf | ||||||
| 
 | 
 | ||||||
| 		sindex += n_bytes; | 		sindex += n_bytes; | ||||||
| 		dindex += n_bytes; | 		dindex += n_bytes; | ||||||
| 		towrite -= n_bytes; | 		written += n_bytes; | ||||||
| 	} | 	} | ||||||
| 	spa_ringbuffer_read_update(&sd[0].chunk->area, sindex); | 	dd[0].chunk->offset = 0; | ||||||
| 	spa_ringbuffer_write_update(&dd[0].chunk->area, dindex); | 	dd[0].chunk->size = written; | ||||||
|  | 	dd[0].chunk->stride = 0; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int impl_node_process_input(struct spa_node *node) | static int impl_node_process_input(struct spa_node *node) | ||||||
|  |  | ||||||
|  | @ -171,7 +171,8 @@ init_buffer(struct data *data, struct spa_buffer **bufs, struct buffer *ba, int | ||||||
| 		b->datas[0].maxsize = size; | 		b->datas[0].maxsize = size; | ||||||
| 		b->datas[0].data = malloc(size); | 		b->datas[0].data = malloc(size); | ||||||
| 		b->datas[0].chunk = &b->chunks[0]; | 		b->datas[0].chunk = &b->chunks[0]; | ||||||
| 		spa_ringbuffer_set_avail(&b->datas[0].chunk->area, 0); | 		b->datas[0].chunk->offset = 0; | ||||||
|  | 		b->datas[0].chunk->size = 0; | ||||||
| 		b->datas[0].chunk->stride = 0; | 		b->datas[0].chunk->stride = 0; | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -182,7 +182,8 @@ init_buffer(struct data *data, struct spa_buffer **bufs, struct buffer *ba, int | ||||||
| 		b->datas[0].maxsize = size; | 		b->datas[0].maxsize = size; | ||||||
| 		b->datas[0].data = malloc(size); | 		b->datas[0].data = malloc(size); | ||||||
| 		b->datas[0].chunk = &b->chunks[0]; | 		b->datas[0].chunk = &b->chunks[0]; | ||||||
| 		spa_ringbuffer_init(&b->datas[0].chunk->area); | 		b->datas[0].chunk->offset = 0; | ||||||
|  | 		b->datas[0].chunk->size = 0; | ||||||
| 		b->datas[0].chunk->stride = 0; | 		b->datas[0].chunk->stride = 0; | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -167,7 +167,8 @@ init_buffer(struct data *data, struct spa_buffer **bufs, struct buffer *ba, int | ||||||
| 		b->datas[0].maxsize = size; | 		b->datas[0].maxsize = size; | ||||||
| 		b->datas[0].data = malloc(size); | 		b->datas[0].data = malloc(size); | ||||||
| 		b->datas[0].chunk = &b->chunks[0]; | 		b->datas[0].chunk = &b->chunks[0]; | ||||||
| 		spa_ringbuffer_set_avail(&b->datas[0].chunk->area, size); | 		b->datas[0].chunk->offset = 0; | ||||||
|  | 		b->datas[0].chunk->size = size; | ||||||
| 		b->datas[0].chunk->stride = 0; | 		b->datas[0].chunk->stride = 0; | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -149,7 +149,8 @@ init_buffer(struct data *data, struct spa_buffer **bufs, struct buffer *ba, int | ||||||
| 		b->datas[0].maxsize = size; | 		b->datas[0].maxsize = size; | ||||||
| 		b->datas[0].data = malloc(size); | 		b->datas[0].data = malloc(size); | ||||||
| 		b->datas[0].chunk = &b->chunks[0]; | 		b->datas[0].chunk = &b->chunks[0]; | ||||||
| 		spa_ringbuffer_set_avail(&b->datas[0].chunk->area, 0); | 		b->datas[0].chunk->offset = 0; | ||||||
|  | 		b->datas[0].chunk->size = 0; | ||||||
| 		b->datas[0].chunk->stride = 0; | 		b->datas[0].chunk->stride = 0; | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -232,7 +232,8 @@ static void on_source_have_output(void *_data) | ||||||
| 		datas[0].mapoffset = 0; | 		datas[0].mapoffset = 0; | ||||||
| 		datas[0].maxsize = sstride * 240; | 		datas[0].maxsize = sstride * 240; | ||||||
| 		datas[0].data = sdata; | 		datas[0].data = sdata; | ||||||
| 		spa_ringbuffer_set_avail(&datas[0].chunk->area, sstride * 240); | 		datas[0].chunk->offset = 0; | ||||||
|  | 		datas[0].chunk->size = sstride * 240; | ||||||
| 		datas[0].chunk->stride = sstride; | 		datas[0].chunk->stride = sstride; | ||||||
| 	} else { | 	} else { | ||||||
| 		uint8_t *map; | 		uint8_t *map; | ||||||
|  | @ -369,7 +370,8 @@ static int setup_buffers(struct data *data) | ||||||
| 		b->datas[0].maxsize = 0; | 		b->datas[0].maxsize = 0; | ||||||
| 		b->datas[0].data = NULL; | 		b->datas[0].data = NULL; | ||||||
| 		b->datas[0].chunk = &b->chunks[0]; | 		b->datas[0].chunk = &b->chunks[0]; | ||||||
| 		spa_ringbuffer_set_avail(&b->datas[0].chunk->area, 0); | 		b->datas[0].chunk->offset = 0; | ||||||
|  | 		b->datas[0].chunk->size = 0; | ||||||
| 		b->datas[0].chunk->stride = 0; | 		b->datas[0].chunk->stride = 0; | ||||||
| 	} | 	} | ||||||
| 	data->n_buffers = MAX_BUFFERS; | 	data->n_buffers = MAX_BUFFERS; | ||||||
|  | @ -402,7 +404,8 @@ static int sdl_alloc_buffers(struct data *data) | ||||||
| 		b->datas[0].type = data->type.data.MemPtr; | 		b->datas[0].type = data->type.data.MemPtr; | ||||||
| 		b->datas[0].maxsize = stride * 240; | 		b->datas[0].maxsize = stride * 240; | ||||||
| 		b->datas[0].data = ptr; | 		b->datas[0].data = ptr; | ||||||
| 		spa_ringbuffer_set_avail(&b->datas[0].chunk->area, stride * 240); | 		b->datas[0].chunk->offset = 0; | ||||||
|  | 		b->datas[0].chunk->size = stride * 240; | ||||||
| 		b->datas[0].chunk->stride = stride; | 		b->datas[0].chunk->stride = stride; | ||||||
| 	} | 	} | ||||||
| 	return 0; | 	return 0; | ||||||
|  |  | ||||||
|  | @ -364,8 +364,8 @@ static int impl_node_process_output(struct spa_node *node) | ||||||
| 	int16_t *dst; | 	int16_t *dst; | ||||||
|         struct spa_port_io *io = d->io; |         struct spa_port_io *io = d->io; | ||||||
| 	uint32_t maxsize, index = 0; | 	uint32_t maxsize, index = 0; | ||||||
| 	struct spa_ringbuffer *rb; |  | ||||||
| 	uint32_t filled, offset; | 	uint32_t filled, offset; | ||||||
|  | 	struct spa_data *od; | ||||||
| 
 | 
 | ||||||
| 	if (io->buffer_id < d->n_buffers) { | 	if (io->buffer_id < d->n_buffers) { | ||||||
| 		reuse_buffer(d, io->buffer_id); | 		reuse_buffer(d, io->buffer_id); | ||||||
|  | @ -378,10 +378,12 @@ static int impl_node_process_output(struct spa_node *node) | ||||||
|         b = spa_list_first(&d->empty, struct buffer, link); |         b = spa_list_first(&d->empty, struct buffer, link); | ||||||
|         spa_list_remove(&b->link); |         spa_list_remove(&b->link); | ||||||
| 
 | 
 | ||||||
| 	maxsize = b->buffer->datas[0].maxsize; | 	od = b->buffer->datas; | ||||||
| 	rb = &b->buffer->datas[0].chunk->area; |  | ||||||
| 
 | 
 | ||||||
| 	filled = spa_ringbuffer_get_write_index(rb, &index); | 	maxsize = od[0].maxsize; | ||||||
|  | 
 | ||||||
|  | 	filled = 0; | ||||||
|  | 	index = 0; | ||||||
| 	avail = maxsize - filled; | 	avail = maxsize - filled; | ||||||
| 	offset = index % maxsize; | 	offset = index % maxsize; | ||||||
| 
 | 
 | ||||||
|  | @ -404,7 +406,9 @@ static int impl_node_process_output(struct spa_node *node) | ||||||
|                         *dst++ = val; |                         *dst++ = val; | ||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
| 	spa_ringbuffer_write_update(rb, index + avail); | 	od[0].chunk->offset = 0; | ||||||
|  | 	od[0].chunk->size = avail; | ||||||
|  | 	od[0].chunk->stride = 0; | ||||||
| 
 | 
 | ||||||
| 	io->buffer_id = b->buffer->id; | 	io->buffer_id = b->buffer->id; | ||||||
| 	io->status = SPA_STATUS_HAVE_BUFFER; | 	io->status = SPA_STATUS_HAVE_BUFFER; | ||||||
|  |  | ||||||
|  | @ -526,8 +526,8 @@ do_send_buffer (GstPipeWireSink *pwsink) | ||||||
|   for (i = 0; i < data->buf->n_datas; i++) { |   for (i = 0; i < data->buf->n_datas; i++) { | ||||||
|     struct spa_data *d = &data->buf->datas[i]; |     struct spa_data *d = &data->buf->datas[i]; | ||||||
|     GstMemory *mem = gst_buffer_peek_memory (buffer, i); |     GstMemory *mem = gst_buffer_peek_memory (buffer, i); | ||||||
|     d->chunk->area.readindex = mem->offset - data->offset; |     d->chunk->offset = mem->offset - data->offset; | ||||||
|     d->chunk->area.writeindex = d->chunk->area.readindex + mem->size; |     d->chunk->size = mem->size; | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|   if (!(res = pw_stream_send_buffer (pwsink->stream, data->id))) { |   if (!(res = pw_stream_send_buffer (pwsink->stream, data->id))) { | ||||||
|  |  | ||||||
|  | @ -464,11 +464,9 @@ on_new_buffer (void *_data, | ||||||
|   } |   } | ||||||
|   for (i = 0; i < data->buf->n_datas; i++) { |   for (i = 0; i < data->buf->n_datas; i++) { | ||||||
|     struct spa_data *d = &data->buf->datas[i]; |     struct spa_data *d = &data->buf->datas[i]; | ||||||
|     uint32_t index; |  | ||||||
|     GstMemory *mem = gst_buffer_peek_memory (buf, i); |     GstMemory *mem = gst_buffer_peek_memory (buf, i); | ||||||
|     mem->size = spa_ringbuffer_get_read_index(&d->chunk->area, &index); |     mem->offset = SPA_MIN(d->chunk->offset, d->maxsize); | ||||||
|     mem->offset = index % d->maxsize; |     mem->size = SPA_MIN(d->chunk->size, d->maxsize - mem->offset); | ||||||
|     spa_ringbuffer_set_avail(&d->chunk->area, 0); |  | ||||||
|   } |   } | ||||||
| 
 | 
 | ||||||
|   if (pwsrc->always_copy) |   if (pwsrc->always_copy) | ||||||
|  | @ -663,7 +661,7 @@ gst_pipewire_src_negotiate (GstBaseSrc * basesrc) | ||||||
|   pw_stream_connect (pwsrc->stream, |   pw_stream_connect (pwsrc->stream, | ||||||
|                      PW_DIRECTION_INPUT, |                      PW_DIRECTION_INPUT, | ||||||
|                      pwsrc->path, |                      pwsrc->path, | ||||||
|                      PW_STREAM_FLAG_AUTOCONNECT, |                      PW_STREAM_FLAG_AUTOCONNECT | PW_STREAM_FLAG_CLOCK_UPDATE, | ||||||
|                      (const struct spa_pod **)possible->pdata, |                      (const struct spa_pod **)possible->pdata, | ||||||
|                      possible->len); |                      possible->len); | ||||||
|   g_ptr_array_free (possible, TRUE); |   g_ptr_array_free (possible, TRUE); | ||||||
|  |  | ||||||
|  | @ -21,6 +21,7 @@ | ||||||
| #include <errno.h> | #include <errno.h> | ||||||
| #include <sys/mman.h> | #include <sys/mman.h> | ||||||
| 
 | 
 | ||||||
|  | #include <spa/utils/ringbuffer.h> | ||||||
| #include <pipewire/log.h> | #include <pipewire/log.h> | ||||||
| #include <extensions/client-node.h> | #include <extensions/client-node.h> | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -305,7 +305,9 @@ static int driver_process_output(struct spa_node *node) | ||||||
| 		in_io->status = SPA_STATUS_NEED_BUFFER; | 		in_io->status = SPA_STATUS_NEED_BUFFER; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	spa_ringbuffer_set_avail(&out->outbuf->datas[0].chunk->area, ctrl->buffer_size * sizeof(int16_t) * 2); | 	out->outbuf->datas[0].chunk->offset = 0; | ||||||
|  | 	out->outbuf->datas[0].chunk->size = ctrl->buffer_size * sizeof(int16_t) * 2; | ||||||
|  | 	out->outbuf->datas[0].chunk->stride = 0; | ||||||
| 
 | 
 | ||||||
| 	spa_hook_list_call(&nd->listener_list, struct pw_jack_node_events, push); | 	spa_hook_list_call(&nd->listener_list, struct pw_jack_node_events, push); | ||||||
| 	gn->ready[SPA_DIRECTION_INPUT] = gn->required[SPA_DIRECTION_OUTPUT] = 0; | 	gn->ready[SPA_DIRECTION_INPUT] = gn->required[SPA_DIRECTION_OUTPUT] = 0; | ||||||
|  |  | ||||||
|  | @ -305,7 +305,8 @@ static struct spa_pod *find_param(struct spa_pod **params, int n_params, uint32_ | ||||||
|  *   || | ... <n_metas>                | |  *   || | ... <n_metas>                | | ||||||
|  *   || +------------------------------+ |  *   || +------------------------------+ | ||||||
|  *   +->| struct spa_chunk             | memory for n_datas chunks |  *   +->| struct spa_chunk             | memory for n_datas chunks | ||||||
|  *    | |   struct spa_ringbuffer area | |  *    | |   uint32_t offset            | | ||||||
|  |  *    | |   uint32_t size              | | ||||||
|  *    | |   int32_t stride             | |  *    | |   int32_t stride             | | ||||||
|  *    | | ... <n_datas> chunks         | |  *    | | ... <n_datas> chunks         | | ||||||
|  *    | +------------------------------+ |  *    | +------------------------------+ | ||||||
|  | @ -438,7 +439,8 @@ static struct spa_buffer **alloc_buffers(struct pw_link *this, | ||||||
| 				d->mapoffset = SPA_PTRDIFF(ddp, mem->ptr); | 				d->mapoffset = SPA_PTRDIFF(ddp, mem->ptr); | ||||||
| 				d->maxsize = data_sizes[j]; | 				d->maxsize = data_sizes[j]; | ||||||
| 				d->data = SPA_MEMBER(mem->ptr, d->mapoffset, void); | 				d->data = SPA_MEMBER(mem->ptr, d->mapoffset, void); | ||||||
| 				spa_ringbuffer_set_avail(&d->chunk->area, 0); | 				d->chunk->offset = 0; | ||||||
|  | 				d->chunk->size = 0; | ||||||
| 				d->chunk->stride = data_strides[j]; | 				d->chunk->stride = data_strides[j]; | ||||||
| 				ddp += data_sizes[j]; | 				ddp += data_sizes[j]; | ||||||
| 			} else { | 			} else { | ||||||
|  |  | ||||||
|  | @ -955,8 +955,7 @@ client_node_port_use_buffers(void *object, | ||||||
| 
 | 
 | ||||||
| 		if (mid->ptr == NULL) { | 		if (mid->ptr == NULL) { | ||||||
| 			mid->ptr = | 			mid->ptr = | ||||||
| 			    mmap(NULL, mid->size + mid->offset, prot, MAP_SHARED, | 			    mmap(NULL, mid->size + mid->offset, prot, MAP_SHARED, mid->fd, 0); | ||||||
| 				 mid->fd, 0); |  | ||||||
| 			if (mid->ptr == MAP_FAILED) { | 			if (mid->ptr == MAP_FAILED) { | ||||||
| 				mid->ptr = NULL; | 				mid->ptr = NULL; | ||||||
| 				pw_log_warn("Failed to mmap memory %d %p: %s", mid->size, mid, | 				pw_log_warn("Failed to mmap memory %d %p: %s", mid->size, mid, | ||||||
|  |  | ||||||
|  | @ -661,10 +661,12 @@ static void handle_socket(struct pw_stream *stream, int rtreadfd, int rtwritefd) | ||||||
| 					       SPA_IO_ERR | SPA_IO_HUP, | 					       SPA_IO_ERR | SPA_IO_HUP, | ||||||
| 					       true, on_rtsocket_condition, stream); | 					       true, on_rtsocket_condition, stream); | ||||||
| 
 | 
 | ||||||
| 	impl->timeout_source = pw_loop_add_timer(stream->remote->core->main_loop, on_timeout, stream); | 	if (impl->flags & PW_STREAM_FLAG_CLOCK_UPDATE) { | ||||||
| 	interval.tv_sec = 0; | 		impl->timeout_source = pw_loop_add_timer(stream->remote->core->main_loop, on_timeout, stream); | ||||||
| 	interval.tv_nsec = 100000000; | 		interval.tv_sec = 0; | ||||||
| 	pw_loop_update_timer(stream->remote->core->main_loop, impl->timeout_source, NULL, &interval, false); | 		interval.tv_nsec = 100000000; | ||||||
|  | 		pw_loop_update_timer(stream->remote->core->main_loop, impl->timeout_source, NULL, &interval, false); | ||||||
|  | 	} | ||||||
| 	return; | 	return; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -830,6 +832,9 @@ client_node_port_use_buffers(void *data, | ||||||
| 	struct buffer_id *bid; | 	struct buffer_id *bid; | ||||||
| 	uint32_t i, j, len; | 	uint32_t i, j, len; | ||||||
| 	struct spa_buffer *b; | 	struct spa_buffer *b; | ||||||
|  | 	int prot; | ||||||
|  | 
 | ||||||
|  | 	prot = PROT_READ | (direction == SPA_DIRECTION_OUTPUT ? PROT_WRITE : 0); | ||||||
| 
 | 
 | ||||||
| 	/* clear previous buffers */ | 	/* clear previous buffers */ | ||||||
| 	clear_buffers(stream); | 	clear_buffers(stream); | ||||||
|  | @ -845,8 +850,7 @@ client_node_port_use_buffers(void *data, | ||||||
| 
 | 
 | ||||||
| 		if (mid->ptr == NULL) { | 		if (mid->ptr == NULL) { | ||||||
| 			mid->ptr = | 			mid->ptr = | ||||||
| 			    mmap(NULL, mid->size + mid->offset, PROT_READ | PROT_WRITE, MAP_SHARED, | 			    mmap(NULL, mid->size + mid->offset, prot, MAP_SHARED, mid->fd, 0); | ||||||
| 				 mid->fd, 0); |  | ||||||
| 			if (mid->ptr == MAP_FAILED) { | 			if (mid->ptr == MAP_FAILED) { | ||||||
| 				mid->ptr = NULL; | 				mid->ptr = NULL; | ||||||
| 				pw_log_warn("Failed to mmap memory %d %p: %s", mid->size, mid, | 				pw_log_warn("Failed to mmap memory %d %p: %s", mid->size, mid, | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Wim Taymans
						Wim Taymans