summaryrefslogtreecommitdiff
path: root/kern
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2016-02-22 21:59:07 +0100
committerRichard Braun <rbraun@sceen.net>2016-02-22 21:59:07 +0100
commit724326b4d590d94ff81eb2e5817cc79a9bade7e4 (patch)
tree0536d17f1a5db9625a0a6561d91157e958942e7d /kern
parentb325f426b367d813b23799aeb058d7d3ac81f13d (diff)
Fix slab allocator option handling
The slab allocator has grown to use multiple ways to allocate slabs as well as track them, which got a little messy. One consequence is the breaking of the KMEM_CF_VERIFY option. In order to make the code less confusing, this change expresses all options as explicit cache flags and clearly defines their relationships. The special kmem_slab and vm_map_entry caches are initialized accordingly. * kern/slab.c (KMEM_CF_DIRECTMAP): Rename to ... (KMEM_CF_PHYSMEM): ... this new macro. (KMEM_CF_DIRECT): Restore macro. (KMEM_CF_USE_TREE, KMEM_CF_USE_PAGE): New macros. (KMEM_CF_VERIFY): Update value. (kmem_pagealloc_directmap): Rename to... (kmem_pagealloc_physmem): ... this new function. (kmem_pagefree_directmap): Rename to ... (kmem_pagefree_physmem): ... this new function. (kmem_pagealloc, kmem_pagefree): Update macro names. (kmem_slab_use_tree): Remove function. (kmem_slab_create, kmem_slab_destroy): Update according to the new cache flags. (kmem_cache_compute_sizes): Rename to ... (kmem_cache_compute_properties): ... this new function, and update to properly set cache flags. (kmem_cache_init): Update call to kmem_cache_compute_properties. (kmem_cache_alloc_from_slab): Check KMEM_CF_USE_TREE instead of calling the defunct kmem_slab_use_tree function. (kmem_cache_free_to_slab): Update according to the new cache flags. kmem_cache_free_verify): Add assertion. (slab_init): Update initialization of kmem_slab_cache. * kern/slab.h (KMEM_CACHE_DIRECTMAP): Rename to ... (KMEM_CACHE_PHYSMEM): ... this new macro. * vm/vm_map.c (vm_map_init): Update initialization of vm_map_entry_cache.
Diffstat (limited to 'kern')
-rw-r--r--kern/slab.c114
-rw-r--r--kern/slab.h2
2 files changed, 66 insertions, 50 deletions
diff --git a/kern/slab.c b/kern/slab.c
index cb720475..41ff01a9 100644
--- a/kern/slab.c
+++ b/kern/slab.c
@@ -195,9 +195,17 @@
*
* The flags don't change once set and can be tested without locking.
*/
-#define KMEM_CF_SLAB_EXTERNAL 0x1 /* Slab data is off slab */
-#define KMEM_CF_DIRECTMAP 0x2 /* Allocate from physical memory */
-#define KMEM_CF_VERIFY 0x4 /* Debugging facilities enabled */
+#define KMEM_CF_SLAB_EXTERNAL 0x01 /* Slab data is off slab */
+#define KMEM_CF_PHYSMEM 0x02 /* Allocate from physical memory */
+#define KMEM_CF_DIRECT 0x04 /* Direct buf-to-slab translation
+ (implies !KMEM_CF_SLAB_EXTERNAL) */
+#define KMEM_CF_USE_TREE 0x08 /* Use red-black tree to track slab
+ data */
+#define KMEM_CF_USE_PAGE 0x10 /* Use page private data to track slab
+ data (implies KMEM_CF_SLAB_EXTERNAL
+ and KMEM_CF_PHYSMEM) */
+#define KMEM_CF_VERIFY 0x20 /* Debugging facilities enabled
+ (implies KMEM_CF_USE_TREE) */
/*
* Options for kmem_cache_alloc_verify().
@@ -355,7 +363,7 @@ static inline void * kmem_bufctl_to_buf(union kmem_bufctl *bufctl,
}
static vm_offset_t
-kmem_pagealloc_directmap(vm_size_t size)
+kmem_pagealloc_physmem(vm_size_t size)
{
struct vm_page *page;
@@ -374,7 +382,7 @@ kmem_pagealloc_directmap(vm_size_t size)
}
static void
-kmem_pagefree_directmap(vm_offset_t addr, vm_size_t size)
+kmem_pagefree_physmem(vm_offset_t addr, vm_size_t size)
{
struct vm_page *page;
@@ -411,28 +419,19 @@ kmem_pagefree_virtual(vm_offset_t addr, vm_size_t size)
static vm_offset_t
kmem_pagealloc(vm_size_t size, int flags)
{
- return (flags & KMEM_CF_DIRECTMAP)
- ? kmem_pagealloc_directmap(size)
+ return (flags & KMEM_CF_PHYSMEM)
+ ? kmem_pagealloc_physmem(size)
: kmem_pagealloc_virtual(size);
}
static void
kmem_pagefree(vm_offset_t addr, vm_size_t size, int flags)
{
- return (flags & KMEM_CF_DIRECTMAP)
- ? kmem_pagefree_directmap(addr, size)
+ return (flags & KMEM_CF_PHYSMEM)
+ ? kmem_pagefree_physmem(addr, size)
: kmem_pagefree_virtual(addr, size);
}
-static inline int kmem_slab_use_tree(int flags)
-{
- if (flags & KMEM_CF_VERIFY)
- return 1;
-
- return ((flags & (KMEM_CF_SLAB_EXTERNAL | KMEM_CF_DIRECTMAP))
- == KMEM_CF_SLAB_EXTERNAL);
-}
-
static void kmem_slab_create_verify(struct kmem_slab *slab,
struct kmem_cache *cache)
{
@@ -480,7 +479,7 @@ static struct kmem_slab * kmem_slab_create(struct kmem_cache *cache,
return NULL;
}
- if (cache->flags & KMEM_CF_DIRECTMAP) {
+ if (cache->flags & KMEM_CF_USE_PAGE) {
struct vm_page *page;
page = vm_page_lookup_pa(kvtophys(slab_buf));
@@ -556,7 +555,7 @@ static void kmem_slab_destroy(struct kmem_slab *slab, struct kmem_cache *cache)
slab_buf = (vm_offset_t)P2ALIGN((unsigned long)slab->addr, PAGE_SIZE);
if (cache->flags & KMEM_CF_SLAB_EXTERNAL) {
- if (cache->flags & KMEM_CF_DIRECTMAP) {
+ if (cache->flags & KMEM_CF_USE_PAGE) {
struct vm_page *page;
/* Not strictly needed, but let's increase safety */
@@ -726,13 +725,13 @@ static void kmem_cache_error(struct kmem_cache *cache, void *buf, int error,
}
/*
- * Compute an appropriate slab size for the given cache.
+ * Compute properties such as slab size for the given cache.
*
* Once the slab size is known, this function sets the related properties
- * (buffers per slab and maximum color). It can also set the KMEM_CF_DIRECTMAP
- * and/or KMEM_CF_SLAB_EXTERNAL flags depending on the resulting layout.
+ * (buffers per slab and maximum color). It can also set some KMEM_CF_xxx
+ * flags depending on the resulting layout.
*/
-static void kmem_cache_compute_sizes(struct kmem_cache *cache, int flags)
+static void kmem_cache_compute_properties(struct kmem_cache *cache, int flags)
{
size_t size, waste;
int embed;
@@ -770,8 +769,8 @@ static void kmem_cache_compute_sizes(struct kmem_cache *cache, int flags)
if (!embed)
cache->flags |= KMEM_CF_SLAB_EXTERNAL;
- if ((flags & KMEM_CACHE_DIRECTMAP) || (cache->slab_size == PAGE_SIZE)) {
- cache->flags |= KMEM_CF_DIRECTMAP;
+ if ((flags & KMEM_CACHE_PHYSMEM) || (cache->slab_size == PAGE_SIZE)) {
+ cache->flags |= KMEM_CF_PHYSMEM;
/*
* Avoid using larger-than-page slabs backed by the direct physical
@@ -781,6 +780,21 @@ static void kmem_cache_compute_sizes(struct kmem_cache *cache, int flags)
if (cache->slab_size != PAGE_SIZE)
panic("slab: invalid cache parameters");
}
+
+ if (cache->flags & KMEM_CF_VERIFY)
+ cache->flags |= KMEM_CF_USE_TREE;
+
+ if (cache->flags & KMEM_CF_SLAB_EXTERNAL) {
+ if (cache->flags & KMEM_CF_PHYSMEM)
+ cache->flags |= KMEM_CF_USE_PAGE;
+ else
+ cache->flags |= KMEM_CF_USE_TREE;
+ } else {
+ if (cache->slab_size == PAGE_SIZE)
+ cache->flags |= KMEM_CF_DIRECT;
+ else
+ cache->flags |= KMEM_CF_USE_TREE;
+ }
}
void kmem_cache_init(struct kmem_cache *cache, const char *name,
@@ -840,7 +854,7 @@ void kmem_cache_init(struct kmem_cache *cache, const char *name,
cache->buf_size = buf_size;
}
- kmem_cache_compute_sizes(cache, flags);
+ kmem_cache_compute_properties(cache, flags);
#if SLAB_USE_CPU_POOLS
for (cpu_pool_type = kmem_cpu_pool_types;
@@ -973,7 +987,7 @@ static void * kmem_cache_alloc_from_slab(struct kmem_cache *cache)
cache->nr_free_slabs--;
}
- if ((slab->nr_refs == 1) && kmem_slab_use_tree(cache->flags))
+ if ((slab->nr_refs == 1) && (cache->flags & KMEM_CF_USE_TREE))
rbtree_insert(&cache->active_slabs, &slab->tree_node,
kmem_slab_cmp_insert);
@@ -990,30 +1004,30 @@ static void kmem_cache_free_to_slab(struct kmem_cache *cache, void *buf)
struct kmem_slab *slab;
union kmem_bufctl *bufctl;
- if (cache->flags & KMEM_CF_SLAB_EXTERNAL) {
- if (cache->flags & KMEM_CF_DIRECTMAP) {
- struct vm_page *page;
-
- page = vm_page_lookup_pa(kvtophys((vm_offset_t)buf));
- assert(page != NULL);
- slab = vm_page_get_priv(page);
- } else {
- struct rbtree_node *node;
-
- node = rbtree_lookup_nearest(&cache->active_slabs, buf,
- kmem_slab_cmp_lookup, RBTREE_LEFT);
- assert(node != NULL);
- slab = rbtree_entry(node, struct kmem_slab, tree_node);
- }
-
- assert((unsigned long)buf < (P2ALIGN((unsigned long)slab->addr
- + cache->slab_size, PAGE_SIZE)));
- } else {
+ if (cache->flags & KMEM_CF_DIRECT) {
assert(cache->slab_size == PAGE_SIZE);
slab = (struct kmem_slab *)P2END((unsigned long)buf, cache->slab_size)
- 1;
+ } else if (cache->flags & KMEM_CF_USE_PAGE) {
+ struct vm_page *page;
+
+ page = vm_page_lookup_pa(kvtophys((vm_offset_t)buf));
+ assert(page != NULL);
+ slab = vm_page_get_priv(page);
+ } else {
+ struct rbtree_node *node;
+
+ assert(cache->flags & KMEM_CF_USE_TREE);
+ node = rbtree_lookup_nearest(&cache->active_slabs, buf,
+ kmem_slab_cmp_lookup, RBTREE_LEFT);
+ assert(node != NULL);
+ slab = rbtree_entry(node, struct kmem_slab, tree_node);
}
+ assert((unsigned long)buf >= (unsigned long)slab->addr);
+ assert(((unsigned long)buf + cache->buf_size)
+ <= vm_page_trunc((unsigned long)slab->addr + cache->slab_size));
+
assert(slab->nr_refs >= 1);
assert(slab->nr_refs <= cache->bufs_per_slab);
bufctl = kmem_buf_to_bufctl(buf, cache);
@@ -1025,7 +1039,7 @@ static void kmem_cache_free_to_slab(struct kmem_cache *cache, void *buf)
if (slab->nr_refs == 0) {
/* The slab has become free */
- if (kmem_slab_use_tree(cache->flags))
+ if (cache->flags & KMEM_CF_USE_TREE)
rbtree_remove(&cache->active_slabs, &slab->tree_node);
if (cache->bufs_per_slab > 1)
@@ -1146,6 +1160,8 @@ static void kmem_cache_free_verify(struct kmem_cache *cache, void *buf)
unsigned char *redzone_byte;
unsigned long slabend;
+ assert(cache->flags & KMEM_CF_USE_TREE);
+
simple_lock(&cache->lock);
node = rbtree_lookup_nearest(&cache->active_slabs, buf,
kmem_slab_cmp_lookup, RBTREE_LEFT);
@@ -1312,7 +1328,7 @@ void slab_init(void)
* Prevent off slab data for the slab cache to avoid infinite recursion.
*/
kmem_cache_init(&kmem_slab_cache, "kmem_slab", sizeof(struct kmem_slab),
- 0, NULL, KMEM_CACHE_NOOFFSLAB | KMEM_CACHE_DIRECTMAP);
+ 0, NULL, KMEM_CACHE_NOOFFSLAB);
}
void kalloc_init(void)
diff --git a/kern/slab.h b/kern/slab.h
index a9978fdb..8527c9db 100644
--- a/kern/slab.h
+++ b/kern/slab.h
@@ -196,7 +196,7 @@ typedef struct kmem_cache *kmem_cache_t;
* Cache initialization flags.
*/
#define KMEM_CACHE_NOOFFSLAB 0x1 /* Don't allocate external slab data */
-#define KMEM_CACHE_DIRECTMAP 0x2 /* Allocate from physical memory */
+#define KMEM_CACHE_PHYSMEM 0x2 /* Allocate from physical memory */
#define KMEM_CACHE_VERIFY 0x4 /* Use debugging facilities */
/*