summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJustus Winter <justus@gnupg.org>2016-10-21 16:15:08 +0200
committerJustus Winter <justus@gnupg.org>2016-10-22 13:34:16 +0200
commit3ff86d799822a0ea8bb3a213e79d399267a876b2 (patch)
tree504b4a0bb2e56da6a974a0fb4d466904672a897d
parent260d0254d3dd8b2cb0b062a255f1c4201566b5a8 (diff)
i386: Allocate page directories using the slab allocator.
* i386/intel/pmap.c (pd_cache): New variable. (pdp_cache): Likewise. (pmap_init): Initialize new caches. (pmap_create): Use the caches. (pmap_destroy): Free to the caches.
-rw-r--r--i386/intel/pmap.c39
1 files changed, 28 insertions, 11 deletions
diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c
index 096e6fd0..b143dd72 100644
--- a/i386/intel/pmap.c
+++ b/i386/intel/pmap.c
@@ -388,6 +388,12 @@ struct pmap kernel_pmap_store;
pmap_t kernel_pmap;
struct kmem_cache pmap_cache; /* cache of pmap structures */
+struct kmem_cache pd_cache; /* cache of page directories */
+#if PAE
+struct kmem_cache pdpt_cache; /* cache of page
+ directory pointer
+ tables */
+#endif
boolean_t pmap_debug = FALSE; /* flag for debugging prints */
@@ -976,6 +982,13 @@ void pmap_init(void)
*/
s = (vm_size_t) sizeof(struct pmap);
kmem_cache_init(&pmap_cache, "pmap", s, 0, NULL, 0);
+ kmem_cache_init(&pd_cache, "pd",
+ PDPNUM * INTEL_PGBYTES, INTEL_PGBYTES, NULL, 0);
+#if PAE
+ kmem_cache_init(&pdpt_cache, "pdpt",
+ PDPNUM * sizeof(pt_entry_t),
+ PDPNUM * sizeof(pt_entry_t), NULL, 0);
+#endif
s = (vm_size_t) sizeof(struct pv_entry);
kmem_cache_init(&pv_list_cache, "pv_entry", s, 0, NULL, 0);
@@ -1152,12 +1165,13 @@ pmap_t pmap_create(vm_size_t size)
p = (pmap_t) kmem_cache_alloc(&pmap_cache);
if (p == PMAP_NULL)
- panic("pmap_create");
+ return PMAP_NULL;
- if (kmem_alloc_wired(kernel_map,
- (vm_offset_t *)&p->dirbase, PDPNUM * INTEL_PGBYTES)
- != KERN_SUCCESS)
- panic("pmap_create");
+ p->dirbase = (pt_entry_t *) kmem_cache_alloc(&pd_cache);
+ if (p->dirbase == NULL) {
+ kmem_cache_free(&pmap_cache, (vm_address_t) p);
+ return PMAP_NULL;
+ }
memcpy(p->dirbase, kernel_page_dir, PDPNUM * INTEL_PGBYTES);
#ifdef LINUX_DEV
@@ -1175,10 +1189,13 @@ pmap_t pmap_create(vm_size_t size)
#endif /* MACH_PV_PAGETABLES */
#if PAE
- if (kmem_alloc_wired(kernel_map,
- (vm_offset_t *)&p->pdpbase, INTEL_PGBYTES)
- != KERN_SUCCESS)
- panic("pmap_create");
+ p->pdpbase = (pt_entry_t *) kmem_cache_alloc(&pdpt_cache);
+ if (p->pdpbase == NULL) {
+ kmem_cache_free(&pd_cache, (vm_address_t) p->dirbase);
+ kmem_cache_free(&pmap_cache, (vm_address_t) p);
+ return PMAP_NULL;
+ }
+
{
int i;
for (i = 0; i < PDPNUM; i++)
@@ -1263,12 +1280,12 @@ void pmap_destroy(pmap_t p)
pmap_set_page_readwrite((void*) p->dirbase + i * INTEL_PGBYTES);
}
#endif /* MACH_PV_PAGETABLES */
- kmem_free(kernel_map, (vm_offset_t)p->dirbase, PDPNUM * INTEL_PGBYTES);
+ kmem_cache_free(&pd_cache, (vm_offset_t) p->dirbase);
#if PAE
#ifdef MACH_PV_PAGETABLES
pmap_set_page_readwrite(p->pdpbase);
#endif /* MACH_PV_PAGETABLES */
- kmem_free(kernel_map, (vm_offset_t)p->pdpbase, INTEL_PGBYTES);
+ kmem_cache_free(&pdpt_cache, (vm_offset_t) p->pdpbase);
#endif /* PAE */
kmem_cache_free(&pmap_cache, (vm_offset_t) p);
}