}
+struct mmu_context {
+ struct memblk mmu;
+ uint8_t *base;
+};
+
static uint16_t last_context;
static struct mmu_context *last_mmu;
+static struct mmu_context mmu_context[PTABSIZE]
/* For the moment. If we add separate va info then we can use that if not
we use the map table */
}
if (err)
*err = 0;
+#ifdef MISALIGNED
if (MISALIGNED(uaddr, 4)) {
*err = -1;
ssig(udata.u_proc, SIGBUS);
return -1;
}
+#endif
return *(uint32_t *)uaddr;
}
{
if (!valaddr(uaddr, 4))
return -1;
+#ifdef MISALIGNED
if (MISALIGNED(uaddr, 4)) {
ssig(udata.u_proc, SIGBUS);
return -1;
}
+#endif
return *(uint32_t *)uaddr;
}
return freemem >> 10;
}
+int pagemap_fork(ptptr p)
+{
+ struct mmu_context *parent = mmu_context + p->page;
+ struct mmu_context *child = mmu_context + udata.u_page;
+ return vmmu_dup(parent->mmu.next, &child.mmu);
+}
+
#define size (uint32_t)udata.u_argn
#define flags (uint32_t)udata.u_argn1
extern void vmmu_free(struct memblk *list);
extern int vmmu_dup(struct memblk *list, struct memblk *dest);
+extern int pagemap_fork(ptptr p);
+
/* Platform provided. Must be suitably aligned */
extern uint8_t *membase;
extern uint8_t *memtop;