mirror of
https://gitlab.freedesktop.org/pipewire/pipewire.git
synced 2025-10-29 05:40:27 -04:00
Previously the pointer was determined as follows:
mm->this.ptr = SPA_PTROFF(m->ptr, range.start, void);
however, when `pw_map_range` is calculated, `pw_map_range::start` is the offset
from the beginning of the first page, starting at `pw_map_range::offset`.
This works correctly if `memblock_map()` runs because that will map the file
with expected offset, so using `range.start` is correct.
However, when a mapping is reused (i.e. `memblock_find_mapping()`) finds something,
then `range.start` is not necessarily correct. Consider the following example:
* page size is 10
* one memblock with size 20 (2 pages)
* the applications wants to mappings:
* (offset=5,size=10)
* (offset=15,size=5)
After the first request from the application, a `mapping` object is created
that covers the first two pages of the memblock: offset=0 and size=20. During
the second request, the calculated `pw_map_range` is as follows:
{ start = 5, offset = 10, size = 10 }
and the only previously created mapping is reused since (0 <= 5) and (10 <= 20). When
the pointer of the mapping is adjusted afterwards it will be incorrect since `m->ptr`
points to byte 0 on page 0 (instead of byte 0 on page 1 -- that is assumed). Thereforce
the two will unexpectedly overlap.
Fix that by using `offset - m->offset` when adjusting the mapping's pointer. Also move
the `range` variable into a smaller scope because it only makes sense there. And add
a test that check the above previously incorrect case.
Fixes: 2caf81c97c ("mem: improve memory handling")
Fixes #4884
49 lines
1.2 KiB
C
49 lines
1.2 KiB
C
/* PipeWire */
|
|
/* SPDX-FileCopyrightText: Copyright © 2025 PipeWire authors */
|
|
/* SPDX-License-Identifier: MIT */
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <pipewire/mem.h>
|
|
#include <spa/buffer/buffer.h>
|
|
|
|
#include "pwtest.h"
|
|
|
|
PWTEST(mempool_issue4884)
|
|
{
|
|
/*
|
|
* See https://gitlab.freedesktop.org/pipewire/pipewire/-/issues/4884. This
|
|
* test checks if the offset is correctly applied when a mapping is reused.
|
|
*/
|
|
|
|
long page_size = sysconf(_SC_PAGESIZE);
|
|
pwtest_errno_ok(page_size);
|
|
pwtest_int_ge(page_size, 8);
|
|
|
|
struct pw_mempool *p = pw_mempool_new(NULL);
|
|
pwtest_ptr_notnull(p);
|
|
|
|
struct pw_memblock *b = pw_mempool_alloc(p, PW_MEMBLOCK_FLAG_READWRITE, SPA_DATA_MemFd, 2 * page_size);
|
|
pwtest_ptr_notnull(b);
|
|
|
|
struct pw_memmap *m1 = pw_mempool_map_id(p, b->id, PW_MEMMAP_FLAG_READWRITE, page_size / 2, page_size, NULL);
|
|
pwtest_ptr_notnull(m1);
|
|
pwtest_ptr_eq(m1->block, b);
|
|
|
|
struct pw_memmap *m2 = pw_mempool_map_id(p, b->id, PW_MEMMAP_FLAG_READWRITE, 3 * page_size / 2, page_size / 2, NULL);
|
|
pwtest_ptr_notnull(m2);
|
|
pwtest_ptr_eq(m2->block, b);
|
|
|
|
pwtest_int_eq(SPA_PTRDIFF(m2->ptr, m1->ptr), page_size);
|
|
|
|
pw_mempool_destroy(p);
|
|
|
|
return PWTEST_PASS;
|
|
}
|
|
|
|
PWTEST_SUITE(pw_mempool)
|
|
{
|
|
pwtest_add(mempool_issue4884, PWTEST_NOARG);
|
|
|
|
return PWTEST_PASS;
|
|
}
|