summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2016-06-29 15:33:29 +0200
committerRichard Braun <rbraun@sceen.net>2016-06-29 15:33:41 +0200
commit09ddf29b02c5087e02f358c2e67e27d17d3a21a7 (patch)
treeb6e40614490bfa20480f5311299e57e7b41849a4
parent28048bc18e4d43bb15eddc82aa89685b346405f8 (diff)
Fix locking error in the slab allocator
* kern/slab.c (kmem_slab_create): Set `slab->cache` member. (kmem_cache_reap): Return dead slabs instead of destroying in place. (slab_collect): Destroy slabs outside of critical section. * kern/slab.h (struct kmem_slab): New `cache` member.
-rw-r--r--kern/slab.c37
-rw-r--r--kern/slab.h3
2 files changed, 21 insertions, 19 deletions
diff --git a/kern/slab.c b/kern/slab.c
index eeb94f85..43962e7e 100644
--- a/kern/slab.c
+++ b/kern/slab.c
@@ -495,6 +495,7 @@ static struct kmem_slab * kmem_slab_create(struct kmem_cache *cache,
slab = (struct kmem_slab *)(slab_buf + cache->slab_size) - 1;
}
+ slab->cache = cache;
list_node_init(&slab->list_node);
rbtree_node_init(&slab->tree_node);
slab->nr_refs = 0;
@@ -925,29 +926,17 @@ static int kmem_cache_grow(struct kmem_cache *cache)
return !empty;
}
-static void kmem_cache_reap(struct kmem_cache *cache)
+static void kmem_cache_reap(struct kmem_cache *cache, struct list *dead_slabs)
{
- struct kmem_slab *slab;
- struct list dead_slabs;
- unsigned long nr_free_slabs;
-
simple_lock(&cache->lock);
- list_set_head(&dead_slabs, &cache->free_slabs);
+
+ list_concat(dead_slabs, &cache->free_slabs);
list_init(&cache->free_slabs);
- nr_free_slabs = cache->nr_free_slabs;
- cache->nr_bufs -= cache->bufs_per_slab * nr_free_slabs;
- cache->nr_slabs -= nr_free_slabs;
+ cache->nr_bufs -= cache->bufs_per_slab * cache->nr_free_slabs;
+ cache->nr_slabs -= cache->nr_free_slabs;
cache->nr_free_slabs = 0;
- simple_unlock(&cache->lock);
-
- while (!list_empty(&dead_slabs)) {
- slab = list_first_entry(&dead_slabs, struct kmem_slab, list_node);
- list_remove(&slab->list_node);
- kmem_slab_destroy(slab, cache);
- nr_free_slabs--;
- }
- assert(nr_free_slabs == 0);
+ simple_unlock(&cache->lock);
}
/*
@@ -1286,18 +1275,28 @@ slab_free:
void slab_collect(void)
{
struct kmem_cache *cache;
+ struct kmem_slab *slab;
+ struct list dead_slabs;
if (elapsed_ticks <= (kmem_gc_last_tick + KMEM_GC_INTERVAL))
return;
kmem_gc_last_tick = elapsed_ticks;
+ list_init(&dead_slabs);
+
simple_lock(&kmem_cache_list_lock);
list_for_each_entry(&kmem_cache_list, cache, node)
- kmem_cache_reap(cache);
+ kmem_cache_reap(cache, &dead_slabs);
simple_unlock(&kmem_cache_list_lock);
+
+ while (!list_empty(&dead_slabs)) {
+ slab = list_first_entry(&dead_slabs, struct kmem_slab, list_node);
+ list_remove(&slab->list_node);
+ kmem_slab_destroy(slab, slab->cache);
+ }
}
void slab_bootstrap(void)
diff --git a/kern/slab.h b/kern/slab.h
index 8527c9db..9d8a1156 100644
--- a/kern/slab.h
+++ b/kern/slab.h
@@ -56,6 +56,8 @@
#include <sys/types.h>
#include <vm/vm_types.h>
+struct kmem_cache;
+
#if SLAB_USE_CPU_POOLS
/*
@@ -117,6 +119,7 @@ struct kmem_buftag {
* Page-aligned collection of unconstructed buffers.
*/
struct kmem_slab {
+ struct kmem_cache *cache;
struct list list_node;
struct rbtree_node tree_node;
unsigned long nr_refs;