summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatt Mackall <mpm@selenic.com>2008-10-08 14:51:57 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-09 12:18:27 -0700
commit70096a561d1e09120bae1f293f3632cedbfd5c68 (patch)
tree6c5f7474089ceeff06aa515a5b39cb38e7318efe
parent69849375d6b13e94d08cdc94b49b11fbab454a0e (diff)
downloadlinux-bcache-70096a561d1e09120bae1f293f3632cedbfd5c68.tar.zst
SLOB: fix bogus ksize calculation fix
This fixes the previous fix, which was completely wrong on closer inspection. This version has been manually tested with a user-space test harness and generates sane values. A nearly identical patch has been boot-tested. The problem arose from changing how kmalloc/kfree handled alignment padding without updating ksize to match. This brings it in sync. Signed-off-by: Matt Mackall <mpm@selenic.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/slob.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/mm/slob.c b/mm/slob.c
index 62b679dc660f..cb675d126791 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -514,9 +514,11 @@ size_t ksize(const void *block)
return 0;
sp = (struct slob_page *)virt_to_page(block);
- if (slob_page(sp))
- return (((slob_t *)block - 1)->units - 1) * SLOB_UNIT;
- else
+ if (slob_page(sp)) {
+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+ unsigned int *m = (unsigned int *)(block - align);
+ return SLOB_UNITS(*m) * SLOB_UNIT;
+ } else
return sp->page.private;
}