summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSamuel Thibault <samuel.thibault@ens-lyon.org>2018-01-18 14:09:03 +0100
committerSamuel Thibault <samuel.thibault@ens-lyon.org>2018-01-18 14:09:03 +0100
commit9c527481190ed900f9bac550353988fdc7b2526c (patch)
tree4df149892cf46098ee25d9e83a56f14d28985d29
parentde813a9cd1351607ea8c183d2b64628328c358fd (diff)
Pushing what I came up with for now. Next issues are at least kernel stacks access and LDT accessmeltdown
-rw-r--r--i386/i386/gdt.c2
-rw-r--r--i386/i386/idt.c2
-rw-r--r--i386/i386/idt_inittab.S6
-rw-r--r--i386/i386/ktss.c4
-rw-r--r--i386/i386/locore.S32
-rw-r--r--i386/i386at/model_dep.c6
-rw-r--r--i386/intel/pmap.c186
-rw-r--r--i386/intel/pmap.h43
-rw-r--r--i386/ldscript16
9 files changed, 273 insertions, 24 deletions
diff --git a/i386/i386/gdt.c b/i386/i386/gdt.c
index c895eb3a..e8b8bd0c 100644
--- a/i386/i386/gdt.c
+++ b/i386/i386/gdt.c
@@ -43,7 +43,7 @@
/* It is actually defined in xen_boothdr.S */
extern
#endif /* MACH_PV_DESCRIPTORS */
-struct real_descriptor gdt[GDTSZ];
+struct real_descriptor gdt[GDTSZ] __section(".data.shared");
void
gdt_init(void)
diff --git a/i386/i386/idt.c b/i386/i386/idt.c
index d304ec3e..021beb56 100644
--- a/i386/i386/idt.c
+++ b/i386/i386/idt.c
@@ -26,7 +26,7 @@
#include <i386at/idt.h>
#include <i386/gdt.h>
-struct real_gate idt[IDTSZ];
+struct real_gate idt[IDTSZ] __section(".data.shared");
struct idt_init_entry
{
diff --git a/i386/i386/idt_inittab.S b/i386/i386/idt_inittab.S
index 8e92d805..6794400e 100644
--- a/i386/i386/idt_inittab.S
+++ b/i386/i386/idt_inittab.S
@@ -34,7 +34,7 @@
Here's the header that comes before everything else. */
.data 2
ENTRY(idt_inittab)
- .text
+ .section .text.shared
/*
* Interrupt descriptor table and code vectors for it.
@@ -46,14 +46,14 @@ ENTRY(idt_inittab)
.byte (((type)&ACC_PL)>>5)|((((type)&(ACC_TYPE|ACC_A))==ACC_INTR_GATE)<<2) ;\
.word KERNEL_CS ;\
.long entry ;\
- .text
+ .section .text.shared
#else /* MACH_PV_DESCRIPTORS */
#define IDT_ENTRY(n,entry,type) \
.data 2 ;\
.long entry ;\
.word n ;\
.word type ;\
- .text
+ .section .text.shared
#endif /* MACH_PV_DESCRIPTORS */
/*
diff --git a/i386/i386/ktss.c b/i386/i386/ktss.c
index 21d00300..57bbfdfe 100644
--- a/i386/i386/ktss.c
+++ b/i386/i386/ktss.c
@@ -37,13 +37,13 @@
#include "ktss.h"
/* A kernel TSS with a complete I/O bitmap. */
-struct task_tss ktss;
+struct task_tss ktss __section(".data.shared");
void
ktss_init(void)
{
/* XXX temporary exception stack */
- static int exception_stack[1024];
+ static int exception_stack[1024] __section(".data.shared");
#ifdef MACH_RING1
/* Xen won't allow us to do any I/O by default anyway, just register
diff --git a/i386/i386/locore.S b/i386/i386/locore.S
index ddba2245..1d213c94 100644
--- a/i386/i386/locore.S
+++ b/i386/i386/locore.S
@@ -53,6 +53,13 @@ DATA(recover_table) ;\
.text ;\
9:
+#define RECOVERSHARED(addr) \
+ .text 2 ;\
+ .long 9f ;\
+ .long addr ;\
+ .section .text.shared ;\
+9:
+
#define RECOVER_TABLE_END \
.text 2 ;\
.globl EXT(recover_table_end) ;\
@@ -80,6 +87,19 @@ DATA(retry_table) ;\
LEXT(retry_table_end) ;\
.text
+#ifdef MACH_HYP
+#define SWITCH_TO_KERNEL_PT
+#define SWITCH_TO_USER_PT
+#else
+#define SWITCH_TO_KERNEL_PT \
+ movl EXT(kernel_pt),%edx; \
+ movl %edx,%cr3 /* Switch to kernel pagetable */
+
+#define SWITCH_TO_USER_PT \
+ movl EXT(user_pt),%edx; \
+ /*movl %edx,%cr3 /* Switch to user pagetable */
+#endif
+
/*
* Allocate recovery and retry tables.
*/
@@ -611,7 +631,7 @@ ENTRY(call_continuation)
#define INTERRUPT(n) \
.data 2 ;\
.long 0f ;\
- .text ;\
+ .section .text.shared ;\
P2ALIGN(TEXT_ALIGN) ;\
0: ;\
pushl %eax ;\
@@ -644,6 +664,7 @@ INTERRUPT(15)
* All interrupts enter here.
* old %eax on stack; interrupt number in %eax.
*/
+.section .text.shared
ENTRY(all_intrs)
pushl %ecx /* save registers */
pushl %edx
@@ -663,6 +684,7 @@ ENTRY(all_intrs)
mov %dx,%es
mov %dx,%fs
mov %dx,%gs
+ SWITCH_TO_KERNEL_PT
CPU_NUMBER(%edx)
@@ -701,6 +723,7 @@ LEXT(return_to_iret) /* ( label for kdb_kintr and hardclock) */
cmpl $0,CX(EXT(need_ast),%edx)
jnz ast_from_interrupt /* take it if so */
1:
+ SWITCH_TO_USER_PT
pop %gs /* restore segment regs */
pop %fs
pop %es
@@ -722,6 +745,7 @@ _return_to_iret_i: /* ( label for kdb_kintr) */
stack_overflowed:
ud2
+.text
/*
* Take an AST from an interrupt.
@@ -993,6 +1017,7 @@ ttd_from_iret_i: /* on interrupt stack */
*
* eax contains system call number.
*/
+.section .text.shared
ENTRY(syscall)
syscall_entry:
pushf /* save flags as soon as possible */
@@ -1088,7 +1113,7 @@ syscall_native:
movl %esp,%edx /* save kernel ESP for error recovery */
0: subl $4,%esi
- RECOVER(mach_call_addr_push)
+ RECOVERSHARED(mach_call_addr_push)
pushl %fs:(%esi) /* push argument on stack */
loop 0b /* loop for all arguments */
@@ -1178,9 +1203,10 @@ syscall_addr:
.data
DATA(cpu_features)
.long 0
- .text
+ .section .text.shared
END(syscall)
+ .text
/* Discover what kind of cpu we have; return the family number
(3, 4, 5, 6, for 386, 486, 586, 686 respectively). */
diff --git a/i386/i386at/model_dep.c b/i386/i386at/model_dep.c
index feda8c33..1b991628 100644
--- a/i386/i386at/model_dep.c
+++ b/i386/i386at/model_dep.c
@@ -449,14 +449,16 @@ i386at_init(void)
#endif /* PAE */
#endif /* MACH_PV_PAGETABLES */
#if PAE
- set_cr3((unsigned)_kvtophys(kernel_pmap->pdpbase));
+ user_pt = kernel_pt = (unsigned)_kvtophys(kernel_pmap->pdpbase);
+ set_cr3(kernel_pt);
#ifndef MACH_HYP
if (!CPU_HAS_FEATURE(CPU_FEATURE_PAE))
panic("CPU doesn't have support for PAE.");
set_cr4(get_cr4() | CR4_PAE);
#endif /* MACH_HYP */
#else
- set_cr3((unsigned)_kvtophys(kernel_page_dir));
+ user_pt = kernel_pt = (unsigned)_kvtophys(kernel_page_dir);
+ set_cr3(kernel_pt);
#endif /* PAE */
#ifndef MACH_HYP
/* Turn paging on.
diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c
index ffbd2ae4..3feb745d 100644
--- a/i386/intel/pmap.c
+++ b/i386/intel/pmap.c
@@ -92,6 +92,21 @@
#define WRITE_PTE(pte_p, pte_entry) *(pte_p) = (pte_entry);
#endif /* MACH_PSEUDO_PHYS */
+#ifdef KPTI
+static inline void write_pde(pt_entry_t *pde_p, pt_entry_t *pdeu_p, pt_entry_t pde_entry)
+{
+ if (pde_entry & INTEL_PTE_USER)
+ /* User page, show it in shadow page table too */
+ *pdeu_p = pde_entry;
+ *pde_p = pde_entry;
+}
+#else
+static inline void write_pde(pt_entry_t *pde_p, pt_entry_t *pdeu_p, pt_entry_t pde_entry)
+{
+ *pde_p = pde_entry;
+}
+#endif
+
/*
* Private data structures.
*/
@@ -413,6 +428,19 @@ unsigned int inuse_ptepages_count = 0; /* debugging */
pt_entry_t *kernel_page_dir;
/*
+ * Pointer to the template for user page table.
+ * Initialized by pmap_bootstrap().
+ */
+pt_entry_t *user_page_dir;
+
+
+/*
+ * Current kernel and user page table, used by the user/kernel switch assembly
+ * routine
+ */
+vm_offset_t kernel_pt __section(".data.shared"), user_pt __section(".data.shared");
+
+/*
* Two slots for temporary physical page mapping, to allow for
* physical-to-physical transfers.
*/
@@ -432,6 +460,20 @@ pmap_pde(const pmap_t pmap, vm_offset_t addr)
return &page_dir[lin2pdenum(addr)];
}
+static inline pt_entry_t *
+pmap_user_pde(const pmap_t pmap, vm_offset_t addr)
+{
+ pt_entry_t *page_dir;
+ if (pmap == kernel_pmap)
+ addr = kvtolin(addr);
+#if PAE
+ page_dir = (pt_entry_t *) ptetokv(pmap->pdpbase_user[lin2pdpnum(addr)]);
+#else
+ page_dir = pmap->dirbase_user;
+#endif
+ return &page_dir[lin2pdenum(addr)];
+}
+
/*
* Given an offset and a map, compute the address of the
* pte. If the address is invalid with respect to the map
@@ -576,6 +618,40 @@ vm_offset_t pmap_map_bd(
return(virt);
}
+#ifdef KPTI
+/* Set this virtual address in the user page table, thus shared between the
+ * kernel and the user page tables. */
+static void pmap_bootstrap_shared_kernel_page(vm_offset_t va, int pteflags)
+{
+ pt_entry_t *pdp = user_page_dir + lin2pdenum_cont(kvtolin(va));
+ pt_entry_t pde = *pdp;
+ pt_entry_t *ptp;
+
+ if (pde & INTEL_PTE_VALID) {
+ ptp = (pt_entry_t *) ptetokv(pde);
+ } else {
+ unsigned i;
+
+ ptp = (pt_entry_t *) phystokv(pmap_grab_page());
+ for (i = 0; i < NPTES; i++)
+ ptp[i] = 0;
+
+ WRITE_PTE(pdp, pa_to_pte((vm_offset_t)_kvtophys(ptp))
+ | (pteflags & ~INTEL_PTE_GLOBAL));
+ }
+
+ WRITE_PTE(ptp + ptenum(kvtolin(va)),
+ pa_to_pte(_kvtophys(va)) | pteflags);
+}
+
+
+void pmap_share_kernel_page(pmap_t pmap, vm_offset_t va, int pteflags)
+{
+ pt_entry_t *pgp = pmap_user_pde(pmap, va);
+}
+
+#endif
+
/*
* Bootstrap the system enough to run with virtual memory.
* Allocate the kernel page directory and page tables,
@@ -635,7 +711,14 @@ void pmap_bootstrap(void)
vm_offset_t addr;
init_alloc_aligned(PDPNUM * INTEL_PGBYTES, &addr);
kernel_page_dir = (pt_entry_t*)phystokv(addr);
+#ifdef KPTI
+ init_alloc_aligned(PDPNUM * INTEL_PGBYTES, &addr);
+ user_page_dir = (pt_entry_t*)phystokv(addr);
+#endif
}
+#ifdef KPTI
+ kernel_pmap->pdpbase_user =
+#endif
kernel_pmap->pdpbase = (pt_entry_t*)phystokv(pmap_grab_page());
{
int i;
@@ -646,12 +729,22 @@ void pmap_bootstrap(void)
| INTEL_PTE_VALID);
}
#else /* PAE */
+#ifdef KPTI
+ kernel_pmap->dirbase_user =
+#endif
kernel_pmap->dirbase = kernel_page_dir = (pt_entry_t*)phystokv(pmap_grab_page());
+#ifdef KPTI
+ user_page_dir = (pt_entry_t*)phystokv(pmap_grab_page());
+#endif
#endif /* PAE */
{
unsigned i;
for (i = 0; i < NPDES; i++)
kernel_page_dir[i] = 0;
+#ifdef KPTI
+ for (i = 0; i < NPDES; i++)
+ user_page_dir[i] = 0;
+#endif
}
#ifdef MACH_PV_PAGETABLES
@@ -800,6 +893,17 @@ void pmap_bootstrap(void)
panic("couldn't pin page %p(%p)\n", ptable, (vm_offset_t) kv_to_ma (ptable));
#endif /* MACH_PV_PAGETABLES */
}
+
+#ifdef KPTI
+ for (va = (vm_offset_t) _sharedtext_start;
+ va < (vm_offset_t) _sharedtext_end;
+ va += INTEL_PGBYTES)
+ pmap_bootstrap_shared_kernel_page(va, INTEL_PTE_VALID | global);
+ for (va = (vm_offset_t) _shareddata_start;
+ va < (vm_offset_t) _shareddata_end;
+ va += INTEL_PGBYTES)
+ pmap_bootstrap_shared_kernel_page(va, INTEL_PTE_VALID | INTEL_PTE_WRITE | global);
+#endif
}
/* Architecture-specific code will turn on paging
@@ -1001,8 +1105,7 @@ void pmap_init(void)
KMEM_CACHE_PHYSMEM);
#if PAE
kmem_cache_init(&pdpt_cache, "pdpt",
- PDPNUM * sizeof(pt_entry_t),
- PDPNUM * sizeof(pt_entry_t), NULL,
+ INTEL_PGBYTES, INTEL_PGBYTES, NULL,
KMEM_CACHE_PHYSMEM);
#endif
s = (vm_size_t) sizeof(struct pv_entry);
@@ -1164,6 +1267,9 @@ pmap_page_table_page_dealloc(vm_offset_t pa)
pmap_t pmap_create(vm_size_t size)
{
pt_entry_t *page_dir[PDPNUM];
+#ifdef KPTI
+ pt_entry_t *user_page_dir[PDPNUM];
+#endif
int i;
pmap_t p;
pmap_statistics_t stats;
@@ -1192,14 +1298,40 @@ pmap_t pmap_create(vm_size_t size)
while (i >= 0) {
kmem_cache_free(&pd_cache,
(vm_address_t) page_dir[i]);
+#ifdef KPTI
+ kmem_cache_free(&pd_cache,
+ (vm_address_t) user_page_dir[i]);
+#endif
+ i -= 1;
+ }
+ kmem_cache_free(&pmap_cache, (vm_address_t) p);
+ return PMAP_NULL;
+ }
+#ifdef KPTI
+ user_page_dir[i] = (pt_entry_t *) kmem_cache_alloc(&pd_cache);
+ if (user_page_dir[i] == NULL) {
+ kmem_cache_free(&pd_cache,
+ (vm_address_t) page_dir[i]);
+ i -= 1;
+ while (i >= 0) {
+ kmem_cache_free(&pd_cache,
+ (vm_address_t) page_dir[i]);
+ kmem_cache_free(&pd_cache,
+ (vm_address_t) user_page_dir[i]);
i -= 1;
}
kmem_cache_free(&pmap_cache, (vm_address_t) p);
return PMAP_NULL;
}
+#endif
memcpy(page_dir[i],
(void *) kernel_page_dir + i * INTEL_PGBYTES,
INTEL_PGBYTES);
+#ifdef KPTI
+ memcpy(user_page_dir[i],
+ (void *) user_page_dir + i * INTEL_PGBYTES,
+ INTEL_PGBYTES);
+#endif
}
#ifdef LINUX_DEV
@@ -1226,22 +1358,51 @@ pmap_t pmap_create(vm_size_t size)
p->pdpbase = (pt_entry_t *) kmem_cache_alloc(&pdpt_cache);
if (p->pdpbase == NULL) {
for (i = 0; i < PDPNUM; i++)
+ {
kmem_cache_free(&pd_cache, (vm_address_t) page_dir[i]);
+#ifdef KPTI
+ kmem_cache_free(&pd_cache, (vm_address_t) user_page_dir[i]);
+#endif
+ }
+ kmem_cache_free(&pmap_cache, (vm_address_t) p);
+ return PMAP_NULL;
+ }
+
+#ifdef KPTI
+ p->pdpbase_user = (pt_entry_t *) kmem_cache_alloc(&pdpt_cache);
+ if (p->pdpbase_user == NULL) {
+ kmem_cache_free(&pdpt_cache, (vm_address_t) p->pdpbase);
+ for (i = 0; i < PDPNUM; i++)
+ {
+ kmem_cache_free(&pd_cache, (vm_address_t) page_dir[i]);
+ kmem_cache_free(&pd_cache, (vm_address_t) user_page_dir[i]);
+ }
kmem_cache_free(&pmap_cache, (vm_address_t) p);
return PMAP_NULL;
}
+#endif
{
for (i = 0; i < PDPNUM; i++)
+ {
WRITE_PTE(&p->pdpbase[i],
pa_to_pte(kvtophys((vm_offset_t) page_dir[i]))
| INTEL_PTE_VALID);
+#ifdef KPTI
+ WRITE_PTE(&p->pdpbase_user[i],
+ pa_to_pte(kvtophys((vm_offset_t) user_page_dir[i]))
+ | INTEL_PTE_VALID);
+#endif
+ }
}
#ifdef MACH_PV_PAGETABLES
pmap_set_page_readonly(p->pdpbase);
#endif /* MACH_PV_PAGETABLES */
#else /* PAE */
p->dirbase = page_dir[0];
+#ifdef KPTI
+ p->dirbase_user = user_page_dir[0];
+#endif
#endif /* PAE */
p->ref_count = 1;
@@ -1273,6 +1434,9 @@ void pmap_destroy(pmap_t p)
#endif
boolean_t free_all;
pt_entry_t *page_dir;
+#ifdef KPTI
+ pt_entry_t *user_page_dir;
+#endif
pt_entry_t *pdep;
phys_addr_t pa;
int c, s;
@@ -1295,9 +1459,15 @@ void pmap_destroy(pmap_t p)
for (i = 0; i <= lin2pdpnum(LINEAR_MIN_KERNEL_ADDRESS); i++) {
free_all = i < lin2pdpnum(LINEAR_MIN_KERNEL_ADDRESS);
page_dir = (pt_entry_t *) ptetokv(p->pdpbase[i]);
+#ifdef KPTI
+ user_page_dir = (pt_entry_t *) ptetokv(p->pdpbase_user[i]);
+#endif
#else
free_all = FALSE;
page_dir = p->dirbase;
+#ifdef KPTI
+ user_page_dir = p->dirbase_user;
+#endif
#endif
/*
@@ -1331,6 +1501,9 @@ void pmap_destroy(pmap_t p)
pmap_set_page_readwrite((void*) page_dir);
#endif /* MACH_PV_PAGETABLES */
kmem_cache_free(&pd_cache, (vm_offset_t) page_dir);
+#ifdef KPTI
+ kmem_cache_free(&pd_cache, (vm_offset_t) user_page_dir);
+#endif
#if PAE
}
@@ -1912,7 +2085,7 @@ Retry:
* Need to allocate a new page-table page.
*/
vm_offset_t ptp;
- pt_entry_t *pdp;
+ pt_entry_t *pdp, *pdpu;
int i;
if (pmap == kernel_pmap) {
@@ -1953,6 +2126,7 @@ Retry:
*/
i = ptes_per_vm_page;
pdp = pmap_pde(pmap, v);
+ pdpu = pmap_user_pde(pmap, v);
do {
#ifdef MACH_PV_PAGETABLES
pmap_set_page_readonly((void *) ptp);
@@ -1964,9 +2138,9 @@ Retry:
| INTEL_PTE_WRITE))
panic("%s:%d could not set pde %p(%p,%p) to %p(%p,%p) %p\n",__FILE__,__LINE__, pdp, kvtophys((vm_offset_t)pdp), (vm_offset_t) pa_to_ma(kvtophys((vm_offset_t)pdp)), ptp, kvtophys(ptp), (vm_offset_t) pa_to_ma(kvtophys(ptp)), (vm_offset_t) pa_to_pte(kv_to_ma(ptp)));
#else /* MACH_PV_PAGETABLES */
- *pdp = pa_to_pte(kvtophys(ptp)) | INTEL_PTE_VALID
- | INTEL_PTE_USER
- | INTEL_PTE_WRITE;
+ write_pde(pdp, pdpu, pa_to_pte(kvtophys(ptp)) | INTEL_PTE_VALID
+ | INTEL_PTE_USER
+ | INTEL_PTE_WRITE);
#endif /* MACH_PV_PAGETABLES */
pdp++; /* Note: This is safe b/c we stay in one page. */
ptp += INTEL_PGBYTES;
diff --git a/i386/intel/pmap.h b/i386/intel/pmap.h
index 5fa2a0c4..45a88ea1 100644
--- a/i386/intel/pmap.h
+++ b/i386/intel/pmap.h
@@ -86,6 +86,16 @@ typedef phys_addr_t pt_entry_t;
#define PTEMASK 0x3ff /* mask for page table index */
#endif /* PAE */
+#ifndef MACH_HYP
+#define KPTI 1
+#endif
+
+extern char _sharedtext_start[], _sharedtext_end[];
+extern char _shareddata_start[], _shareddata_end[];
+
+/* These are used by user/kernel switch assembler code to switch page table. */
+extern vm_offset_t kernel_pt, user_pt;
+
/*
* Convert linear offset to page descriptor index
*/
@@ -164,8 +174,14 @@ typedef volatile long cpu_set; /* set of CPUs - must be <= 32 */
struct pmap {
#if ! PAE
pt_entry_t *dirbase; /* page directory table */
+#ifdef KPTI
+ pt_entry_t *dirbase_user; /* page directory table for user */
+#endif
#else
pt_entry_t *pdpbase; /* page directory pointer table */
+#ifdef KPTI
+ pt_entry_t *pdpbase_user; /* page directory pointer table for user */
+#endif
#endif /* ! PAE */
int ref_count; /* reference count */
decl_simple_lock_data(,lock)
@@ -186,12 +202,6 @@ extern void pmap_map_mfn(void *addr, unsigned long mfn);
extern void pmap_clear_bootstrap_pagetable(pt_entry_t *addr);
#endif /* MACH_PV_PAGETABLES */
-#if PAE
-#define set_pmap(pmap) set_cr3(kvtophys((vm_offset_t)(pmap)->pdpbase))
-#else /* PAE */
-#define set_pmap(pmap) set_cr3(kvtophys((vm_offset_t)(pmap)->dirbase))
-#endif /* PAE */
-
typedef struct {
pt_entry_t *entry;
vm_offset_t vaddr;
@@ -455,12 +465,33 @@ extern void pmap_zero_page (phys_addr_t);
extern void pmap_copy_page (phys_addr_t, phys_addr_t);
/*
+ * pmap_share_kernel_page marks
+ */
+#ifdef KPTI
+extern void pmap_share_kernel_page(pmap_t pmap, vm_offset_t va, int pteflags);
+#else
+#define pmap_share_kernel_page(pmap, va, pteflags) ((void)0)
+#endif
+
+/*
* kvtophys(addr)
*
* Convert a kernel virtual address to a physical address
*/
extern phys_addr_t kvtophys (vm_offset_t);
+static inline void set_pmap(pmap_t pmap) {
+#if PAE
+ kernel_pt = kvtophys((vm_offset_t)(pmap)->pdpbase);
+ user_pt = kvtophys((vm_offset_t)(pmap)->pdpbase_user);
+#else /* PAE */
+ kernel_pt = kvtophys((vm_offset_t)(pmap)->dirbase);
+ user_pt = kvtophys((vm_offset_t)(pmap)->dirbase_user);
+#endif /* PAE */
+ /* We are using the kernel page table for now. */
+ set_cr3(kernel_pt);
+}
+
#if NCPUS > 1
void signal_cpus(
cpu_set use_list,
diff --git a/i386/ldscript b/i386/ldscript
index ddbbf910..79f25ea5 100644
--- a/i386/ldscript
+++ b/i386/ldscript
@@ -16,6 +16,14 @@ SECTIONS
AT (_START_MAP)
{
*(.text.start)
+
+ /* Separate pages for what should be mapped along userland */
+ . = ALIGN (CONSTANT(MAXPAGESIZE));
+ PROVIDE (_sharedtext_start = .);
+ *(.text.shared)
+ . = ALIGN (CONSTANT(MAXPAGESIZE));
+ PROVIDE (_sharedtext_end = .);
+
*(.text .stub .text.* .gnu.linkonce.t.*)
*(.text.unlikely .text.*_unlikely)
KEEP (*(.text.*personality*))
@@ -138,6 +146,14 @@ SECTIONS
.got.plt : { *(.got.plt) *(.igot.plt) }
.data :
{
+
+ /* Separate pages for what should be mapped along userland */
+ . = ALIGN (CONSTANT(MAXPAGESIZE));
+ PROVIDE (_shareddata_start = .);
+ *(.data.shared)
+ . = ALIGN (CONSTANT(MAXPAGESIZE));
+ PROVIDE (_shareddata_end = .);
+
*(.data .data.* .gnu.linkonce.d.*)
SORT(CONSTRUCTORS)
}