shm: rework alignment when punching memory

This commit is contained in:
Lennart Poettering 2009-05-14 19:51:05 +02:00
parent 763954c467
commit 0921b1b4a3

View file

@ -69,7 +69,10 @@
#define SHM_MARKER ((int) 0xbeefcafe) #define SHM_MARKER ((int) 0xbeefcafe)
/* We now put this SHM marker at the end of each segment. It's /* We now put this SHM marker at the end of each segment. It's
* optional, to not require a reboot when upgrading, though */ * optional, to not require a reboot when upgrading, though. Note that
* on multiarch systems 32bit and 64bit processes might access this
* region simultaneously. The header fields need to be independant
* from the process' word with */
struct shm_marker { struct shm_marker {
pa_atomic_t marker; /* 0xbeefcafe */ pa_atomic_t marker; /* 0xbeefcafe */
pa_atomic_t pid; pa_atomic_t pid;
@ -79,6 +82,8 @@ struct shm_marker {
uint64_t _reserved4; uint64_t _reserved4;
} PA_GCC_PACKED; } PA_GCC_PACKED;
#define SHM_MARKER_SIZE PA_ALIGN(sizeof(struct shm_marker))
static char *segment_name(char *fn, size_t l, unsigned id) { static char *segment_name(char *fn, size_t l, unsigned id) {
pa_snprintf(fn, l, "/pulse-shm-%u", id); pa_snprintf(fn, l, "/pulse-shm-%u", id);
return fn; return fn;
@ -97,15 +102,15 @@ int pa_shm_create_rw(pa_shm *m, size_t size, pa_bool_t shared, mode_t mode) {
* ones */ * ones */
pa_shm_cleanup(); pa_shm_cleanup();
/* Round up to make it aligned */ /* Round up to make it page aligned */
size = PA_ALIGN(size); size = PA_PAGE_ALIGN(size);
if (!shared) { if (!shared) {
m->id = 0; m->id = 0;
m->size = size; m->size = size;
#ifdef MAP_ANONYMOUS #ifdef MAP_ANONYMOUS
if ((m->ptr = mmap(NULL, PA_PAGE_ALIGN(m->size), PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, -1, (off_t) 0)) == MAP_FAILED) { if ((m->ptr = mmap(NULL, m->size, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, -1, (off_t) 0)) == MAP_FAILED) {
pa_log("mmap() failed: %s", pa_cstrerror(errno)); pa_log("mmap() failed: %s", pa_cstrerror(errno));
goto fail; goto fail;
} }
@ -136,7 +141,7 @@ int pa_shm_create_rw(pa_shm *m, size_t size, pa_bool_t shared, mode_t mode) {
goto fail; goto fail;
} }
m->size = size + PA_ALIGN(sizeof(struct shm_marker)); m->size = size + SHM_MARKER_SIZE;
if (ftruncate(fd, (off_t) m->size) < 0) { if (ftruncate(fd, (off_t) m->size) < 0) {
pa_log("ftruncate() failed: %s", pa_cstrerror(errno)); pa_log("ftruncate() failed: %s", pa_cstrerror(errno));
@ -150,7 +155,7 @@ int pa_shm_create_rw(pa_shm *m, size_t size, pa_bool_t shared, mode_t mode) {
/* We store our PID at the end of the shm block, so that we /* We store our PID at the end of the shm block, so that we
* can check for dead shm segments later */ * can check for dead shm segments later */
marker = (struct shm_marker*) ((uint8_t*) m->ptr + m->size - PA_ALIGN(sizeof(struct shm_marker))); marker = (struct shm_marker*) ((uint8_t*) m->ptr + m->size - SHM_MARKER_SIZE);
pa_atomic_store(&marker->pid, (int) getpid()); pa_atomic_store(&marker->pid, (int) getpid());
pa_atomic_store(&marker->marker, SHM_MARKER); pa_atomic_store(&marker->marker, SHM_MARKER);
@ -197,7 +202,7 @@ void pa_shm_free(pa_shm *m) {
#endif #endif
} else { } else {
#ifdef HAVE_SHM_OPEN #ifdef HAVE_SHM_OPEN
if (munmap(m->ptr, m->size) < 0) if (munmap(m->ptr, PA_PAGE_ALIGN(m->size)) < 0)
pa_log("munmap() failed: %s", pa_cstrerror(errno)); pa_log("munmap() failed: %s", pa_cstrerror(errno));
if (m->do_unlink) { if (m->do_unlink) {
@ -214,12 +219,12 @@ void pa_shm_free(pa_shm *m) {
#endif #endif
} }
memset(m, 0, sizeof(*m)); pa_zero(*m);
} }
void pa_shm_punch(pa_shm *m, size_t offset, size_t size) { void pa_shm_punch(pa_shm *m, size_t offset, size_t size) {
void *ptr; void *ptr;
size_t o, ps; size_t o;
pa_assert(m); pa_assert(m);
pa_assert(m->ptr); pa_assert(m->ptr);
@ -233,16 +238,19 @@ void pa_shm_punch(pa_shm *m, size_t offset, size_t size) {
/* You're welcome to implement this as NOOP on systems that don't /* You're welcome to implement this as NOOP on systems that don't
* support it */ * support it */
/* Align this to multiples of the page size */ /* Align the pointer up to multiples of the page size */
ptr = (uint8_t*) m->ptr + offset; ptr = (uint8_t*) m->ptr + offset;
o = (size_t) ((uint8_t*) ptr - (uint8_t*) PA_PAGE_ALIGN_PTR(ptr)); o = (size_t) ((uint8_t*) ptr - (uint8_t*) PA_PAGE_ALIGN_PTR(ptr));
if (o > 0) { if (o > 0) {
ps = PA_PAGE_SIZE; size_t delta = PA_PAGE_SIZE - o;
ptr = (uint8_t*) ptr + (ps - o); ptr = (uint8_t*) ptr + delta;
size -= ps - o; size -= delta;
} }
/* Align the size down to multiples of page size */
size = (size / PA_PAGE_SIZE) * PA_PAGE_SIZE;
#ifdef MADV_REMOVE #ifdef MADV_REMOVE
if (madvise(ptr, size, MADV_REMOVE) >= 0) if (madvise(ptr, size, MADV_REMOVE) >= 0)
return; return;
@ -254,9 +262,9 @@ void pa_shm_punch(pa_shm *m, size_t offset, size_t size) {
#endif #endif
#ifdef MADV_DONTNEED #ifdef MADV_DONTNEED
pa_assert_se(madvise(ptr, size, MADV_DONTNEED) == 0); madvise(ptr, size, MADV_DONTNEED);
#elif defined(POSIX_MADV_DONTNEED) #elif defined(POSIX_MADV_DONTNEED)
pa_assert_se(posix_madvise(ptr, size, POSIX_MADV_DONTNEED) == 0); posix_madvise(ptr, size, POSIX_MADV_DONTNEED);
#endif #endif
} }
@ -283,7 +291,7 @@ int pa_shm_attach_ro(pa_shm *m, unsigned id) {
} }
if (st.st_size <= 0 || if (st.st_size <= 0 ||
st.st_size > (off_t) (MAX_SHM_SIZE+PA_ALIGN(sizeof(struct shm_marker))) || st.st_size > (off_t) (MAX_SHM_SIZE+SHM_MARKER_SIZE) ||
PA_ALIGN((size_t) st.st_size) != (size_t) st.st_size) { PA_ALIGN((size_t) st.st_size) != (size_t) st.st_size) {
pa_log("Invalid shared memory segment size"); pa_log("Invalid shared memory segment size");
goto fail; goto fail;
@ -346,12 +354,12 @@ int pa_shm_cleanup(void) {
if (pa_shm_attach_ro(&seg, id) < 0) if (pa_shm_attach_ro(&seg, id) < 0)
continue; continue;
if (seg.size < PA_ALIGN(sizeof(struct shm_marker))) { if (seg.size < SHM_MARKER_SIZE) {
pa_shm_free(&seg); pa_shm_free(&seg);
continue; continue;
} }
m = (struct shm_marker*) ((uint8_t*) seg.ptr + seg.size - PA_ALIGN(sizeof(struct shm_marker))); m = (struct shm_marker*) ((uint8_t*) seg.ptr + seg.size - SHM_MARKER_SIZE);
if (pa_atomic_load(&m->marker) != SHM_MARKER) { if (pa_atomic_load(&m->marker) != SHM_MARKER) {
pa_shm_free(&seg); pa_shm_free(&seg);