summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2016-02-22 21:59:07 +0100
committerRichard Braun <rbraun@sceen.net>2016-02-22 21:59:07 +0100
commitb325f426b367d813b23799aeb058d7d3ac81f13d (patch)
treea17f7d46316faadd79958de3760c4ee744014e88
parent9e7f22971be0f427601926b42b640426ab7da4db (diff)
Optimize slab lookup on the free path
Caches that use external slab data but allocate slabs from the direct physical mapping can look up slab data in constant time by associating the slab data directly with the underlying page. * kern/slab.c (kmem_slab_use_tree): Take KMEM_CF_DIRECTMAP into account. (kmem_slab_create): Set page private data if relevant. (kmem_slab_destroy): Clear page private data if relevant. (kmem_cache_free_to_slab): Use page private data if relevant. * vm/vm_page.c (vm_page_init_pa): Set `priv' member to NULL. * vm/vm_page.h (vm_page_set_priv, vm_page_get_priv): New functions.
-rw-r--r--kern/slab.c54
-rw-r--r--vm/vm_page.c1
-rw-r--r--vm/vm_page.h16
3 files changed, 60 insertions, 11 deletions
diff --git a/kern/slab.c b/kern/slab.c
index 375ad23b..cb720475 100644
--- a/kern/slab.c
+++ b/kern/slab.c
@@ -424,6 +424,15 @@ kmem_pagefree(vm_offset_t addr, vm_size_t size, int flags)
: kmem_pagefree_virtual(addr, size);
}
+static inline int kmem_slab_use_tree(int flags)
+{
+ if (flags & KMEM_CF_VERIFY)
+ return 1;
+
+ return ((flags & (KMEM_CF_SLAB_EXTERNAL | KMEM_CF_DIRECTMAP))
+ == KMEM_CF_SLAB_EXTERNAL);
+}
+
static void kmem_slab_create_verify(struct kmem_slab *slab,
struct kmem_cache *cache)
{
@@ -470,6 +479,14 @@ static struct kmem_slab * kmem_slab_create(struct kmem_cache *cache,
kmem_pagefree(slab_buf, cache->slab_size, cache->flags);
return NULL;
}
+
+ if (cache->flags & KMEM_CF_DIRECTMAP) {
+ struct vm_page *page;
+
+ page = vm_page_lookup_pa(kvtophys(slab_buf));
+ assert(page != NULL);
+ vm_page_set_priv(page, slab);
+ }
} else {
slab = (struct kmem_slab *)(slab_buf + cache->slab_size) - 1;
}
@@ -537,15 +554,21 @@ static void kmem_slab_destroy(struct kmem_slab *slab, struct kmem_cache *cache)
kmem_slab_destroy_verify(slab, cache);
slab_buf = (vm_offset_t)P2ALIGN((unsigned long)slab->addr, PAGE_SIZE);
- kmem_pagefree(slab_buf, cache->slab_size, cache->flags);
- if (cache->flags & KMEM_CF_SLAB_EXTERNAL)
+ if (cache->flags & KMEM_CF_SLAB_EXTERNAL) {
+ if (cache->flags & KMEM_CF_DIRECTMAP) {
+ struct vm_page *page;
+
+ /* Not strictly needed, but let's increase safety */
+ page = vm_page_lookup_pa(kvtophys(slab_buf));
+ assert(page != NULL);
+ vm_page_set_priv(page, NULL);
+ }
+
kmem_cache_free(&kmem_slab_cache, (vm_offset_t)slab);
-}
+ }
-static inline int kmem_slab_use_tree(int flags)
-{
- return (flags & KMEM_CF_SLAB_EXTERNAL) || (flags & KMEM_CF_VERIFY);
+ kmem_pagefree(slab_buf, cache->slab_size, cache->flags);
}
static inline int kmem_slab_cmp_lookup(const void *addr,
@@ -968,12 +991,21 @@ static void kmem_cache_free_to_slab(struct kmem_cache *cache, void *buf)
union kmem_bufctl *bufctl;
if (cache->flags & KMEM_CF_SLAB_EXTERNAL) {
- struct rbtree_node *node;
+ if (cache->flags & KMEM_CF_DIRECTMAP) {
+ struct vm_page *page;
+
+ page = vm_page_lookup_pa(kvtophys((vm_offset_t)buf));
+ assert(page != NULL);
+ slab = vm_page_get_priv(page);
+ } else {
+ struct rbtree_node *node;
+
+ node = rbtree_lookup_nearest(&cache->active_slabs, buf,
+ kmem_slab_cmp_lookup, RBTREE_LEFT);
+ assert(node != NULL);
+ slab = rbtree_entry(node, struct kmem_slab, tree_node);
+ }
- node = rbtree_lookup_nearest(&cache->active_slabs, buf,
- kmem_slab_cmp_lookup, RBTREE_LEFT);
- assert(node != NULL);
- slab = rbtree_entry(node, struct kmem_slab, tree_node);
assert((unsigned long)buf < (P2ALIGN((unsigned long)slab->addr
+ cache->slab_size, PAGE_SIZE)));
} else {
diff --git a/vm/vm_page.c b/vm/vm_page.c
index 48d70964..a868fce8 100644
--- a/vm/vm_page.c
+++ b/vm/vm_page.c
@@ -165,6 +165,7 @@ vm_page_init_pa(struct vm_page *page, unsigned short seg_index, phys_addr_t pa)
page->type = VM_PT_RESERVED;
page->seg_index = seg_index;
page->order = VM_PAGE_ORDER_UNLISTED;
+ page->priv = NULL;
page->phys_addr = pa;
}
diff --git a/vm/vm_page.h b/vm/vm_page.h
index 6f4f3c22..f2e20a78 100644
--- a/vm/vm_page.h
+++ b/vm/vm_page.h
@@ -82,6 +82,7 @@ struct vm_page {
unsigned short type;
unsigned short seg_index;
unsigned short order;
+ void *priv;
/*
* This member is used throughout the code and may only change for
@@ -424,6 +425,21 @@ vm_page_direct_ptr(const struct vm_page *page)
#endif
/*
+ * Associate private data with a page.
+ */
+static inline void
+vm_page_set_priv(struct vm_page *page, void *priv)
+{
+ page->priv = priv;
+}
+
+static inline void *
+vm_page_get_priv(const struct vm_page *page)
+{
+ return page->priv;
+}
+
+/*
* Load physical memory into the vm_page module at boot time.
*
* The avail_start and avail_end parameters are used to maintain a simple