Compare commits

...

21 Commits

Author SHA1 Message Date
Ben Gras
7be1d47d24 don't always copy for i/o instructions. 2009-06-08 15:59:29 +00:00
Ben Gras
3765538c76 merging in memory freeer. 2009-06-08 15:31:45 +00:00
Ben Gras
01732ffb5e quick hack (sorry) for making *sdevio* work to other
processes than the caller..

also disable kernel sanity checks
2009-06-08 14:33:15 +00:00
Ben Gras
8a0ab8630c rebase from trunk 2009-06-08 12:25:24 +00:00
Ben Gras
a236a39e33 .. 2009-06-08 07:33:42 +00:00
Ben Gras
a2358ad071 2009-06-08 06:35:18 +00:00
Ben Gras
de1b5e0076 cleanup, sanity checking 2009-06-08 06:08:11 +00:00
Ben Gras
782133423e allow empty senda 2009-06-08 04:39:26 +00:00
Ben Gras
9e72241374 minor cleanup 2009-06-08 04:30:16 +00:00
Ben Gras
ac86f5bb49 keep some processes mapped in always; direct message copying
where possible (no buffering); no more explicit vm checkranges
in kernel; new allocator for vm using avl tree without needing
remapping
2009-06-08 04:02:22 +00:00
Ben Gras
e2a7535c55 minor cleanup 2009-06-07 16:25:37 +00:00
Ben Gras
0702c826a2 pde cache check works
no more silly vm checkranges
2009-06-07 15:55:44 +00:00
Ben Gras
4dae6c4bbc my state.
trying to get some memory optimisation (less pagetable reloading,
less tlb purging) features working smoothly.

to be documented when committing to trunk :)
2009-06-06 23:27:10 +00:00
Ben Gras
9d56ac3fc9 only switch pagetable if necessary and it's different for copying messages 2009-06-03 15:28:13 +00:00
Ben Gras
37cd6bc06c move field offset from vir region from phys_block to phys_region, so
the same blocks of physical memory can be mapped in in different offsets
within regions.
2009-06-03 12:18:13 +00:00
Ben Gras
eb2959a560 state 2009-06-03 11:22:49 +00:00
Ben Gras
f16eb59bbf further messing with page fault handling 2009-05-29 18:47:31 +00:00
Ben Gras
85881e9995 no vm_setbuf any more (kernel doesn't create its page table any more),
no relocking field (locks not checked)
2009-05-28 14:23:38 +00:00
Ben Gras
9b73964f6d beng work in progress, to be explained in a future commit message :). 2009-05-28 13:47:20 +00:00
Ben Gras
78e5d6d4eb unused confusing malloc/free functions in sysutil 2009-05-28 12:14:37 +00:00
Ben Gras
6579bb3656 copy of beng's working copy 2009-05-20 16:54:58 +00:00
92 changed files with 3754 additions and 2204 deletions

View File

@ -493,7 +493,8 @@ PRIVATE void init_params()
dma_buf = mmap(0, ATA_DMA_BUF_SIZE, PROT_READ|PROT_WRITE,
MAP_PREALLOC | MAP_CONTIG | MAP_ANON, -1, 0);
prdt = mmap(0, PRDT_BYTES,
PROT_READ|PROT_WRITE, MAP_CONTIG | MAP_ANON, -1, 0);
PROT_READ|PROT_WRITE,
MAP_PREALLOC | MAP_CONTIG | MAP_ANON, -1, 0);
if(dma_buf == MAP_FAILED || prdt == MAP_FAILED) {
disable_dma = 1;
printf("at_wini%d: no dma\n", w_instance);

View File

@ -522,6 +522,7 @@ pci_init();
}
else
{
printf("lance buf: 0x%lx\n", vir2phys(lance_buf));
report( "LANCE", "DMA denied because address out of range", NO_NUM );
}

View File

@ -403,7 +403,8 @@ int safe;
#endif
/* Try to allocate a piece of memory for the RAM disk. */
if((mem = mmap(0, ramdev_size, PROT_READ|PROT_WRITE, MAP_ANON, -1, 0)) == MAP_FAILED) {
if((mem = mmap(0, ramdev_size, PROT_READ|PROT_WRITE,
MAP_PREALLOC|MAP_ANON, -1, 0)) == MAP_FAILED) {
printf("MEM: failed to get memory for ramdisk\n");
return(ENOMEM);
}

View File

@ -33,8 +33,8 @@ Created: Jan 2000 by Philip Homburg <philip@cs.vu.nl>
#include <string.h>
#include <minix/sysutil.h>
#define NR_PCIBUS 10
#define NR_PCIDEV 40
#define NR_PCIBUS 40
#define NR_PCIDEV 50
#define PBT_INTEL_HOST 1
#define PBT_PCIBRIDGE 2
@ -1135,16 +1135,24 @@ int bar_nr;
{
int reg, prefetch, type, dev_bar_nr;
u32_t bar, bar2;
u16_t cmd;
reg= PCI_BAR+4*bar_nr;
bar= pci_attr_r32_u(devind, reg);
if (bar & PCI_BAR_IO)
{
/* Size register */
/* Disable I/O access before probing for BAR's size */
cmd = pci_attr_r16(devind, PCI_CR);
pci_attr_w16(devind, PCI_CR, cmd & ~PCI_CR_IO_EN);
/* Probe BAR's size */
pci_attr_w32(devind, reg, 0xffffffff);
bar2= pci_attr_r32_u(devind, reg);
/* Restore original state */
pci_attr_w32(devind, reg, bar);
pci_attr_w16(devind, PCI_CR, cmd);
bar &= ~(u32_t)3; /* Clear non-address bits */
bar2 &= ~(u32_t)3;
@ -1168,10 +1176,17 @@ int bar_nr;
}
else
{
/* Size register */
/* Disable mem access before probing for BAR's size */
cmd = pci_attr_r16(devind, PCI_CR);
pci_attr_w16(devind, PCI_CR, cmd & ~PCI_CR_MEM_EN);
/* Probe BAR's size */
pci_attr_w32(devind, reg, 0xffffffff);
bar2= pci_attr_r32_u(devind, reg);
/* Restore original values */
pci_attr_w32(devind, reg, bar);
pci_attr_w16(devind, PCI_CR, cmd);
if (bar2 == 0)
return; /* Reg. is not implemented */
@ -2398,7 +2413,7 @@ u32_t value;
#if 0
printf("pcii_wreg32(%d, %d, 0x%X, 0x%X): %d.%d.%d\n",
busind, devind, port, value,
pcibus[busind].pb_bus, pcidev[devind].pd_dev,
pcibus[busind].pb_busnr, pcidev[devind].pd_dev,
pcidev[devind].pd_func);
#endif
PCII_WREG32_(pcibus[busind].pb_busnr,

View File

@ -305,7 +305,7 @@ message *m_ptr; /* pointer to alarm message */
random_update(RND_TIMING, &r, 1);
/* Schedule new alarm for next m_random call. */
if (OK != (s=sys_setalarm(KRANDOM_PERIOD, 0)))
if (OK != (s=sys_setalarm(random_isseeded() ? KRANDOM_PERIOD : sys_hz(), 0)))
report("RANDOM", "sys_setalarm failed", s);
}

View File

@ -52,7 +52,6 @@ PRIVATE unsigned font_lines; /* font lines per character */
PRIVATE unsigned scr_width; /* # characters on a line */
PRIVATE unsigned scr_lines; /* # lines on the screen */
PRIVATE unsigned scr_size; /* # characters on the screen */
PUBLIC unsigned info_location; /* location in video memory of struct */
/* tells mem_vid_copy() to blank the screen */
#define BLANK_MEM ((vir_bytes) 0)
@ -66,9 +65,6 @@ PRIVATE int disabled_sm; /* Scroll mode to be restored when re-enabling
char *console_memory = NULL;
/* boot_tty_info we use to communicate with the boot code. */
struct boot_tty_info boot_tty_info;
/* Per console data. */
typedef struct console {
tty_t *c_tty; /* associated TTY struct */
@ -90,24 +86,14 @@ typedef struct console {
int c_line; /* line no */
} console_t;
#define UPDATEBOOTINFO(ccons, infofield, value) { \
if(ccons->c_line == 0) { \
boot_tty_info.infofield = value; \
mem_vid_copy((vir_bytes) &boot_tty_info, \
info_location/2, sizeof(boot_tty_info)/2); \
} \
}
#define UPDATE_CURSOR(ccons, cursor) { \
ccons->c_cur = cursor; \
UPDATEBOOTINFO(ccons, conscursor, ccons->c_cur); \
if(curcons && ccons == curcons) \
set_6845(CURSOR, ccons->c_cur); \
}
#define UPDATE_ORIGIN(ccons, origin) { \
ccons->c_org = origin; \
UPDATEBOOTINFO(ccons, consorigin, ccons->c_org); \
if (curcons && ccons == curcons) \
set_6845(VID_ORG, ccons->c_org); \
}
@ -1001,13 +987,15 @@ tty_t *tp;
}
if (machine.vdu_ega) vid_size = EGA_SIZE;
wrap = ! machine.vdu_ega;
info_location = vid_size - sizeof(struct boot_tty_info);
console_memory = vm_map_phys(SELF, (void *) vid_base, vid_size);
if(console_memory == MAP_FAILED)
panic("TTY","Console couldn't map video memory", NO_NUM);
printf("TTY: vm_map_phys of 0x%lx OK, result 0x%lx",
vid_base, console_memory);
vid_size >>= 1; /* word count */
vid_mask = vid_size - 1;
@ -1015,7 +1003,7 @@ tty_t *tp;
scr_size = scr_lines * scr_width;
/* There can be as many consoles as video memory allows. */
nr_cons = (vid_size - sizeof(struct boot_tty_info)/2) / scr_size;
nr_cons = vid_size / scr_size;
if (nr_cons > NR_CONS) nr_cons = NR_CONS;
if (nr_cons > 1) wrap = 0;
@ -1038,12 +1026,6 @@ tty_t *tp;
scroll_screen(cons, SCROLL_UP);
cons->c_row = scr_lines - 1;
cons->c_column = 0;
memset(&boot_tty_info, 0, sizeof(boot_tty_info));
UPDATE_CURSOR(cons, cons->c_cur);
boot_tty_info.flags = BTIF_CONSCURSOR | BTIF_CONSORIGIN;
boot_tty_info.magic = TTYMAGIC;
UPDATE_ORIGIN(cons, cons->c_org);
}
select_console(0);
cons_ioctl(tp, 0);

View File

@ -766,8 +766,22 @@ int scode; /* scan code of key just struck or released */
return -1;
if(ch)
return ch;
printf("tty: ignoring unrecognized %s scancode 0x%x\n",
escape ? "escaped" : "straight", scode);
{
static char seen[2][NR_SCAN_CODES];
int notseen = 0, ei;
ei = escape ? 1 : 0;
if(scode >= 0 && scode < NR_SCAN_CODES) {
notseen = !seen[ei][scode];
seen[ei][scode] = 1;
} else {
printf("tty: scode %d makes no sense\n", scode);
}
if(notseen) {
printf("tty: ignoring unrecognized %s "
"scancode 0x%x\n",
escape ? "escaped" : "straight", scode);
}
}
return -1;
}

View File

@ -9,6 +9,7 @@ Created: Jan 2000 by Philip Homburg <philip@cs.vu.nl>
#define PCI_DID 0x02 /* Device ID, 16-bit */
#define PCI_CR 0x04 /* Command Register, 16-bit */
#define PCI_CR_MAST_EN 0x0004 /* Enable Busmaster Access */
#define PCI_CR_MEM_EN 0x0002 /* Enable Mem Cycles */
#define PCI_CR_IO_EN 0x0001 /* Enable I/O Cycles */
#define PCI_SR 0x06 /* PCI status, 16-bit */
#define PSR_SSE 0x4000 /* Signaled System Error */

View File

@ -311,7 +311,6 @@
# define SYS_GETINFO (KERNEL_CALL + 26) /* sys_getinfo() */
# define SYS_ABORT (KERNEL_CALL + 27) /* sys_abort() */
# define SYS_IOPENABLE (KERNEL_CALL + 28) /* sys_enable_iop() */
# define SYS_VM_SETBUF (KERNEL_CALL + 29) /* sys_vm_setbuf() */
# define SYS_SAFECOPYFROM (KERNEL_CALL + 31) /* sys_safecopyfrom() */
# define SYS_SAFECOPYTO (KERNEL_CALL + 32) /* sys_safecopyto() */
# define SYS_VSAFECOPY (KERNEL_CALL + 33) /* sys_vsafecopy() */
@ -526,6 +525,7 @@
* and sys_fork
*/
#define PR_FORK_FLAGS m1_i3
#define PR_FORK_MSGADDR m1_p1
/* Field names for SYS_INT86 */
#define INT86_REG86 m1_p1 /* pointer to registers */
@ -585,6 +585,7 @@
#define SVMCTL_MRG_LEN m1_i1 /* MEMREQ_GET reply: length */
#define SVMCTL_MRG_WRITE m1_i2 /* MEMREQ_GET reply: writeflag */
#define SVMCTL_MRG_EP m1_i3 /* MEMREQ_GET reply: process */
#define SVMCTL_MRG_REQUESTOR m1_p2 /* MEMREQ_GET reply: requestor */
/* Codes and field names for SYS_SYSCTL. */
#define SYSCTL_CODE m1_i1 /* SYSCTL_CODE_* below */
@ -603,6 +604,11 @@
#define VMCTL_MEMREQ_REPLY 15
#define VMCTL_INCSP 16
#define VMCTL_NOPAGEZERO 18
#define VMCTL_I386_KERNELLIMIT 19
#define VMCTL_I386_PAGEDIRS 20
#define VMCTL_I386_FREEPDE 23
#define VMCTL_ENABLE_PAGING 24
#define VMCTL_I386_INVLPG 25
/*===========================================================================*
* Messages for the Reincarnation Server *

View File

@ -95,7 +95,4 @@
#define SPROFILE 1 /* statistical profiling */
#define CPROFILE 0 /* call profiling */
/* Compile kernel so that first page of code and data can be unmapped. */
#define VM_KERN_NOPAGEZERO 1
#endif /* _CONFIG_H */

13
include/minix/debug.h Normal file
View File

@ -0,0 +1,13 @@
#ifndef _MINIX_DEBUG_H
#define _MINIX_DEBUG_H 1
/* For reminders of things to be fixed. */
#define FIXME(str) { static int fixme_warned = 0; \
if(!fixme_warned) { \
printf("FIXME: %s:%d: %s\n", __FILE__, __LINE__, str);\
fixme_warned = 1; \
} \
}
#endif /* _MINIX_DEBUG_H */

View File

@ -35,7 +35,7 @@ _PROTOTYPE( int sys_enable_iop, (endpoint_t proc));
_PROTOTYPE( int sys_exec, (endpoint_t proc, char *ptr,
char *aout, vir_bytes initpc));
_PROTOTYPE( int sys_fork, (endpoint_t parent, endpoint_t child, int *,
struct mem_map *ptr, u32_t vm));
struct mem_map *ptr, u32_t vm, vir_bytes *));
_PROTOTYPE( int sys_newmap, (endpoint_t proc, struct mem_map *ptr));
_PROTOTYPE( int sys_exit, (endpoint_t proc));
_PROTOTYPE( int sys_trace, (int req, endpoint_t proc, long addr, long *data_p));
@ -53,9 +53,8 @@ _PROTOTYPE( int sys_vmctl, (endpoint_t who, int param, u32_t value));
_PROTOTYPE( int sys_vmctl_get_pagefault_i386, (endpoint_t *who, u32_t *cr2, u32_t *err));
_PROTOTYPE( int sys_vmctl_get_cr3_i386, (endpoint_t who, u32_t *cr3) );
_PROTOTYPE( int sys_vmctl_get_memreq, (endpoint_t *who, vir_bytes *mem,
vir_bytes *len, int *wrflag) );
vir_bytes *len, int *wrflag, endpoint_t *) );
_PROTOTYPE( int sys_vmctl_enable_paging, (struct mem_map *));
_PROTOTYPE( int sys_readbios, (phys_bytes address, void *buf, size_t size));
_PROTOTYPE( int sys_stime, (time_t boottime));

View File

@ -97,9 +97,6 @@ struct kinfo {
int nr_tasks; /* number of kernel tasks */
char release[6]; /* kernel release number */
char version[6]; /* kernel version number */
#if DEBUG_LOCK_CHECK
int relocking; /* interrupt locking depth (should be 0) */
#endif
};
/* Load data accounted every this no. of seconds. */

View File

@ -14,6 +14,7 @@ sys/vm_i386.h
#define I386_VM_ACC 0x020 /* Accessed */
#define I386_VM_ADDR_MASK 0xFFFFF000 /* physical address */
#define I386_VM_ADDR_MASK_4MB 0xFFC00000 /* physical address */
#define I386_VM_OFFSET_MASK_4MB 0x003FFFFF /* physical address */
/* Page directory specific flags. */
#define I386_VM_BIGPAGE 0x080 /* 4MB page */
@ -35,6 +36,12 @@ sys/vm_i386.h
#define I386_VM_PFA_SHIFT 22 /* Page frame address shift */
/* CR0 bits */
#define I386_CR0_PE 0x00000001 /* Protected mode */
#define I386_CR0_MP 0x00000002 /* Monitor Coprocessor */
#define I386_CR0_EM 0x00000004 /* Emulate */
#define I386_CR0_TS 0x00000008 /* Task Switched */
#define I386_CR0_ET 0x00000010 /* Extension Type */
#define I386_CR0_WP 0x00010000 /* Enable paging */
#define I386_CR0_PG 0x80000000 /* Enable paging */
/* some CR4 bits */

View File

@ -10,7 +10,9 @@
#include "../../system.h"
#include <minix/type.h>
extern u32_t kernel_cr3;
#include "proto.h"
extern u32_t *vm_pagedirs;
/*===========================================================================*
* arch_do_vmctl *
@ -30,7 +32,7 @@ struct proc *p;
p->p_seg.p_cr3 = m_ptr->SVMCTL_VALUE;
p->p_misc_flags |= MF_FULLVM;
} else {
p->p_seg.p_cr3 = kernel_cr3;
p->p_seg.p_cr3 = 0;
p->p_misc_flags &= ~MF_FULLVM;
}
RTS_LOCK_UNSET(p, VMINHIBIT);
@ -53,8 +55,28 @@ struct proc *p;
m_ptr->SVMCTL_PF_I386_ERR = rp->p_pagefault.pf_flags;
return OK;
}
case VMCTL_I386_KERNELLIMIT:
{
int r;
/* VM wants kernel to increase its segment. */
r = prot_set_kern_seg_limit(m_ptr->SVMCTL_VALUE);
return r;
}
case VMCTL_I386_PAGEDIRS:
{
int pde;
vm_pagedirs = (u32_t *) m_ptr->SVMCTL_VALUE;
return OK;
}
case VMCTL_I386_FREEPDE:
{
i386_freepde(m_ptr->SVMCTL_VALUE);
return OK;
}
}
kprintf("arch_do_vmctl: strange param %d\n", m_ptr->SVMCTL_PARAM);
return EINVAL;
}

View File

@ -24,6 +24,8 @@
PUBLIC int do_sdevio(m_ptr)
register message *m_ptr; /* pointer to request message */
{
vir_bytes newoffset;
endpoint_t newep;
int proc_nr, proc_nr_e = m_ptr->DIO_VEC_ENDPT;
int count = m_ptr->DIO_VEC_SIZE;
long port = m_ptr->DIO_PORT;
@ -32,6 +34,9 @@ register message *m_ptr; /* pointer to request message */
struct proc *rp;
struct priv *privp;
struct io_range *iorp;
int rem;
vir_bytes addr;
struct proc *destproc;
/* Allow safe copies and accesses to SELF */
if ((m_ptr->DIO_REQUEST & _DIO_SAFEMASK) != _DIO_SAFE &&
@ -64,11 +69,23 @@ register message *m_ptr; /* pointer to request message */
/* Check for 'safe' variants. */
if((m_ptr->DIO_REQUEST & _DIO_SAFEMASK) == _DIO_SAFE) {
/* Map grant address to physical address. */
if ((phys_buf = umap_verify_grant(proc_addr(proc_nr), who_e,
if(verify_grant(proc_nr_e, who_e,
(vir_bytes) m_ptr->DIO_VEC_ADDR,
(vir_bytes) m_ptr->DIO_OFFSET, count,
req_dir == _DIO_INPUT ? CPF_WRITE : CPF_READ)) == 0)
return(EPERM);
count,
req_dir == _DIO_INPUT ? CPF_WRITE : CPF_READ,
(vir_bytes) m_ptr->DIO_OFFSET,
&newoffset, &newep) != OK) {
printf("do_sdevio: verify_grant failed\n");
return EPERM;
}
if(!isokendpt(newep, &proc_nr))
return(EINVAL);
destproc = proc_addr(proc_nr);
if ((phys_buf = umap_local(destproc, D,
(vir_bytes) newoffset, count)) == 0) {
printf("do_sdevio: umap_local failed\n");
return(EFAULT);
}
} else {
if(proc_nr != who_p)
{
@ -77,10 +94,14 @@ register message *m_ptr; /* pointer to request message */
return EPERM;
}
/* Get and check physical address. */
if ((phys_buf = umap_virtual(proc_addr(proc_nr), D,
if ((phys_buf = umap_local(proc_addr(proc_nr), D,
(vir_bytes) m_ptr->DIO_VEC_ADDR, count)) == 0)
return(EFAULT);
destproc = proc_addr(proc_nr);
}
/* current process must be target for phys_* to be OK */
vm_set_cr3(destproc);
switch (io_type)
{

View File

@ -10,72 +10,130 @@
#include <string.h>
#include <minix/sysutil.h>
#include "../../proc.h"
#include "../../proto.h"
#include "../../vm.h"
extern int vm_copy_in_progress;
extern int vm_copy_in_progress, catch_pagefaults;
extern struct proc *vm_copy_from, *vm_copy_to;
extern u32_t vm_copy_from_v, vm_copy_to_v;
extern u32_t vm_copy_from_p, vm_copy_to_p, vm_copy_cr3;
u32_t pagefault_cr2, pagefault_count = 0;
void pagefault(struct proc *pr, int trap_errno)
void pagefault(vir_bytes old_eip, struct proc *pr, int trap_errno,
u32_t *old_eipptr, u32_t *old_eaxptr, u32_t pagefaultcr2)
{
int s;
vir_bytes ph;
u32_t pte;
int procok = 0, pcok = 0, rangeok = 0;
int in_physcopy = 0;
vir_bytes test_eip;
if(pagefault_count != 1)
minix_panic("recursive pagefault", pagefault_count);
vmassert(old_eipptr);
vmassert(old_eaxptr);
/* Don't schedule this process until pagefault is handled. */
if(RTS_ISSET(pr, PAGEFAULT))
minix_panic("PAGEFAULT set", pr->p_endpoint);
RTS_LOCK_SET(pr, PAGEFAULT);
vmassert(*old_eipptr == old_eip);
vmassert(old_eipptr != &old_eip);
if(pr->p_endpoint <= INIT_PROC_NR && !(pr->p_misc_flags & MF_FULLVM)) {
#if 0
printf("kernel: pagefault in pr %d, addr 0x%lx, his cr3 0x%lx, actual cr3 0x%lx\n",
pr->p_endpoint, pagefaultcr2, pr->p_seg.p_cr3, read_cr3());
#endif
if(pr->p_seg.p_cr3) {
#if 0
vm_print(pr->p_seg.p_cr3);
#endif
vmassert(pr->p_seg.p_cr3 == read_cr3());
} else {
u32_t cr3;
int u = 0;
if(!intr_disabled()) {
lock;
u = 1;
}
cr3 = read_cr3();
vmassert(ptproc);
if(ptproc->p_seg.p_cr3 != cr3) {
util_stacktrace();
printf("cr3 wrong in pagefault; value 0x%lx, ptproc %s / %d, his cr3 0x%lx, pr %s / %d\n",
cr3,
ptproc->p_name, ptproc->p_endpoint,
ptproc->p_seg.p_cr3,
pr->p_name, pr->p_endpoint);
ser_dump_proc();
vm_print(cr3);
vm_print(ptproc->p_seg.p_cr3);
}
if(u) {
unlock;
}
}
test_eip = k_reenter ? old_eip : pr->p_reg.pc;
in_physcopy = (test_eip > (vir_bytes) phys_copy) &&
(test_eip < (vir_bytes) phys_copy_fault);
if((k_reenter || iskernelp(pr)) &&
catch_pagefaults && in_physcopy) {
#if 0
printf("pf caught! addr 0x%lx\n", pagefaultcr2);
#endif
*old_eipptr = phys_copy_fault;
*old_eaxptr = pagefaultcr2;
return;
}
/* System processes that don't have their own page table can't
* have page faults. VM does have its own page table but also
* can't have page faults (because VM has to handle them).
*/
if(k_reenter || (pr->p_endpoint <= INIT_PROC_NR &&
!(pr->p_misc_flags & MF_FULLVM)) || pr->p_endpoint == VM_PROC_NR) {
/* Page fault we can't / don't want to
* handle.
*/
kprintf("pagefault for process %d ('%s'), pc = 0x%x, addr = 0x%x, flags = 0x%x\n",
kprintf("pagefault for process %d ('%s'), pc = 0x%x, addr = 0x%x, flags = 0x%x, k_reenter %d\n",
pr->p_endpoint, pr->p_name, pr->p_reg.pc,
pagefault_cr2, trap_errno);
pagefaultcr2, trap_errno, k_reenter);
proc_stacktrace(pr);
minix_panic("page fault in system process", pr->p_endpoint);
return;
}
/* Don't schedule this process until pagefault is handled. */
vmassert(pr->p_seg.p_cr3 == read_cr3());
vmassert(!RTS_ISSET(pr, PAGEFAULT));
RTS_LOCK_SET(pr, PAGEFAULT);
/* Save pagefault details, suspend process,
* add process to pagefault chain,
* and tell VM there is a pagefault to be
* handled.
*/
pr->p_pagefault.pf_virtual = pagefault_cr2;
pr->p_pagefault.pf_virtual = pagefaultcr2;
pr->p_pagefault.pf_flags = trap_errno;
pr->p_nextpagefault = pagefaults;
pagefaults = pr;
lock_notify(HARDWARE, VM_PROC_NR);
pagefault_count = 0;
#if 0
kprintf("pagefault for process %d ('%s'), pc = 0x%x\n",
pr->p_endpoint, pr->p_name, pr->p_reg.pc);
proc_stacktrace(pr);
#endif
return;
}
/*===========================================================================*
* exception *
*===========================================================================*/
PUBLIC void exception(vec_nr, trap_errno, old_eip, old_cs, old_eflags)
PUBLIC void exception(vec_nr, trap_errno, old_eip, old_cs, old_eflags,
old_eipptr, old_eaxptr, pagefaultcr2)
unsigned vec_nr;
u32_t trap_errno;
u32_t old_eip;
U16_t old_cs;
u32_t old_eflags;
u32_t *old_eipptr;
u32_t *old_eaxptr;
u32_t pagefaultcr2;
{
/* An exception or unexpected interrupt has occurred. */
@ -108,16 +166,9 @@ struct proc *t;
register struct ex_s *ep;
struct proc *saved_proc;
#if DEBUG_SCHED_CHECK
for (t = BEG_PROC_ADDR; t < END_PROC_ADDR; ++t) {
if(t->p_magic != PMAGIC)
kprintf("entry %d broken\n", t->p_nr);
}
#endif
/* Save proc_ptr, because it may be changed by debug statements. */
saved_proc = proc_ptr;
ep = &ex_data[vec_nr];
if (vec_nr == 2) { /* spurious NMI on some machines */
@ -126,8 +177,9 @@ struct proc *t;
}
if(vec_nr == PAGE_FAULT_VECTOR) {
pagefault(saved_proc, trap_errno);
return;
pagefault(old_eip, saved_proc, trap_errno,
old_eipptr, old_eaxptr, pagefaultcr2);
return;
}
/* If an exception occurs while running a process, the k_reenter variable
@ -184,18 +236,21 @@ PUBLIC void proc_stacktrace(struct proc *proc)
v_bp = proc->p_reg.fp;
kprintf("%8.8s %6d 0x%lx ",
kprintf("%-8.8s %6d 0x%lx ",
proc->p_name, proc->p_endpoint, proc->p_reg.pc);
while(v_bp) {
if(data_copy(proc->p_endpoint, v_bp,
SYSTEM, (vir_bytes) &v_hbp, sizeof(v_hbp)) != OK) {
#define PRCOPY(pr, pv, v, n) \
(iskernelp(pr) ? (memcpy((char *) v, (char *) pv, n), OK) : \
data_copy(pr->p_endpoint, pv, SYSTEM, (vir_bytes) (v), n))
if(PRCOPY(proc, v_bp, &v_hbp, sizeof(v_hbp)) != OK) {
kprintf("(v_bp 0x%lx ?)", v_bp);
break;
}
if(data_copy(proc->p_endpoint, v_bp + sizeof(v_pc),
SYSTEM, (vir_bytes) &v_pc, sizeof(v_pc)) != OK) {
kprintf("(v_pc 0x%lx ?)", v_pc);
if(PRCOPY(proc, v_bp + sizeof(v_pc), &v_pc, sizeof(v_pc)) != OK) {
kprintf("(v_pc 0x%lx ?)", v_bp + sizeof(v_pc));
break;
}
kprintf("0x%lx ", (unsigned long) v_pc);

View File

@ -136,5 +136,6 @@
#define IOPL_MASK 0x003000
#define vir2phys(vir) (kinfo.data_base + (vir_bytes) (vir))
#define phys2vir(ph) ((vir_bytes) (ph) - kinfo.data_base)
#endif /* _I386_ACONST_H */

View File

@ -68,5 +68,7 @@ struct pagefault
u32_t pf_flags; /* Pagefault flags on stack. */
};
#define INMEMORY(p) (!p->p_seg.p_cr3 || ptproc == p)
#endif /* #ifndef _I386_TYPES_H */

View File

@ -8,7 +8,6 @@
#include <ibm/interrupt.h>
#include <archconst.h>
#include "../../const.h"
#include "vm.h"
#include "sconst.h"
! This file contains a number of assembly code utility routines needed by the
@ -28,6 +27,7 @@
.define _intr_unmask ! enable an irq at the 8259 controller
.define _intr_mask ! disable an irq
.define _phys_copy ! copy data from anywhere to anywhere in memory
.define _phys_copy_fault! phys_copy pagefault
.define _phys_memset ! write pattern anywhere in memory
.define _mem_rdw ! copy one word from [segment:offset]
.define _reset ! reset the system
@ -35,13 +35,12 @@
.define _level0 ! call a function at level 0
.define _read_cpu_flags ! read the cpu flags
.define _read_cr0 ! read cr0
.define _write_cr3 ! write cr3
.define _last_cr3
.define _getcr3val
.define _write_cr0 ! write a value in cr0
.define _read_cr4
.define _thecr3
.define _write_cr4
.define _kernel_cr3
.define _catch_pagefaults
! The routines only guarantee to preserve the registers the C compiler
! expects to be preserved (ebx, esi, edi, ebp, esp, segment registers, and
@ -156,55 +155,6 @@ csinit: mov eax, DS_SELECTOR
ret
!*===========================================================================*
!* cp_mess *
!*===========================================================================*
! PUBLIC void cp_mess(int src, phys_clicks src_clicks, vir_bytes src_offset,
! phys_clicks dst_clicks, vir_bytes dst_offset);
! This routine makes a fast copy of a message from anywhere in the address
! space to anywhere else. It also copies the source address provided as a
! parameter to the call into the first word of the destination message.
!
! Note that the message size, "Msize" is in DWORDS (not bytes) and must be set
! correctly. Changing the definition of message in the type file and not
! changing it here will lead to total disaster.
!
!CM_ARGS = 4 + 4 + 4 + 4 + 4 ! 4 + 4 + 4 + 4 + 4
!! es ds edi esi eip proc scl sof dcl dof
!
! .align 16
!_cp_mess:
! cld
! push esi
! push edi
! push ds
! push es
!
! mov eax, FLAT_DS_SELECTOR
! mov ds, ax
! mov es, ax
!
! mov esi, CM_ARGS+4(esp) ! src clicks
! shl esi, CLICK_SHIFT
! add esi, CM_ARGS+4+4(esp) ! src offset
! mov edi, CM_ARGS+4+4+4(esp) ! dst clicks
! shl edi, CLICK_SHIFT
! add edi, CM_ARGS+4+4+4+4(esp) ! dst offset
!
! mov eax, CM_ARGS(esp) ! process number of sender
! stos ! copy number of sender to dest message
! add esi, 4 ! do not copy first word
! mov ecx, Msize - 1 ! remember, first word does not count
! rep
! movs ! copy the message
!
! pop es
! pop ds
! pop edi
! pop esi
! ret ! that is all folks!
!
!*===========================================================================*
!* exit *
!*===========================================================================*
@ -236,8 +186,6 @@ _phys_insw:
push edi
push es
LOADKERNELCR3
mov ecx, FLAT_DS_SELECTOR
mov es, cx
mov edx, 8(ebp) ! port to read from
@ -264,8 +212,6 @@ _phys_insb:
push edi
push es
LOADKERNELCR3
mov ecx, FLAT_DS_SELECTOR
mov es, cx
mov edx, 8(ebp) ! port to read from
@ -293,8 +239,6 @@ _phys_outsw:
push esi
push ds
LOADKERNELCR3
mov ecx, FLAT_DS_SELECTOR
mov ds, cx
mov edx, 8(ebp) ! port to write to
@ -322,8 +266,6 @@ _phys_outsb:
push esi
push ds
LOADKERNELCR3
mov ecx, FLAT_DS_SELECTOR
mov ds, cx
mov edx, 8(ebp) ! port to write to
@ -416,7 +358,7 @@ dis_already:
!*===========================================================================*
!* phys_copy *
!*===========================================================================*
! PUBLIC void phys_copy(phys_bytes source, phys_bytes destination,
! PUBLIC phys_bytes phys_copy(phys_bytes source, phys_bytes destination,
! phys_bytes bytecount);
! Copy a block of physical memory.
@ -430,8 +372,6 @@ _phys_copy:
push edi
push es
LOADKERNELCR3
mov eax, FLAT_DS_SELECTOR
mov es, ax
@ -457,6 +397,8 @@ pc_small:
rep
eseg movsb
mov eax, 0 ! 0 means: no fault
_phys_copy_fault: ! kernel can send us here
pop es
pop edi
pop esi
@ -477,8 +419,6 @@ _phys_memset:
push ebx
push ds
LOADKERNELCR3
mov esi, 8(ebp)
mov eax, 16(ebp)
mov ebx, FLAT_DS_SELECTOR
@ -633,14 +573,13 @@ _write_cr4:
pop ebp
ret
!*===========================================================================*
!* write_cr3 *
!* getcr3val *
!*===========================================================================*
! PUBLIC void write_cr3(unsigned long value);
_write_cr3:
push ebp
mov ebp, esp
LOADCR3WITHEAX(0x22, 8(ebp))
pop ebp
! PUBLIC unsigned long getcr3val(void);
_getcr3val:
mov eax, cr3
mov (_thecr3), eax
ret

File diff suppressed because it is too large Load Diff

View File

@ -60,7 +60,6 @@ begbss:
#include <ibm/interrupt.h>
#include <archconst.h>
#include "../../const.h"
#include "vm.h"
#include "sconst.h"
/* Selected 386 tss offsets. */
@ -74,9 +73,8 @@ begbss:
.define _restart
.define save
.define _kernel_cr3
.define _pagefault_cr2
.define _pagefault_count
.define _reload_cr3
.define _write_cr3 ! write cr3
.define errexception
.define exception1
@ -101,6 +99,8 @@ begbss:
.define _params_size
.define _params_offset
.define _mon_ds
.define _schedcheck
.define _dirtypde
.define _hwint00 ! handlers for hardware interrupts
.define _hwint01
@ -218,12 +218,6 @@ csinit:
ltr ax
push 0 ! set flags to known good state
popf ! esp, clear nested task and int enable
#if VM_KERN_NOPAGEZERO
jmp laststep
.align I386_PAGE_SIZE
laststep:
#endif
jmp _main ! main()
@ -239,7 +233,6 @@ laststep:
#define hwint_master(irq) \
call save /* save interrupted process state */;\
push (_irq_handlers+4*irq) /* irq_handlers[irq] */;\
LOADCR3WITHEAX(irq, (_kernel_cr3)) /* switch to kernel page table */;\
call _intr_handle /* intr_handle(irq_handlers[irq]) */;\
pop ecx ;\
cmp (_irq_actids+4*irq), 0 /* interrupt still active? */;\
@ -291,7 +284,6 @@ _hwint07: ! Interrupt routine for irq 7 (printer)
#define hwint_slave(irq) \
call save /* save interrupted process state */;\
push (_irq_handlers+4*irq) /* irq_handlers[irq] */;\
LOADCR3WITHEAX(irq, (_kernel_cr3)) /* switch to kernel page table */;\
call _intr_handle /* intr_handle(irq_handlers[irq]) */;\
pop ecx ;\
cmp (_irq_actids+4*irq), 0 /* interrupt still active? */;\
@ -398,11 +390,9 @@ _p_s_call:
push eax ! source / destination
push ecx ! call number (ipc primitive to use)
! LOADCR3WITHEAX(0x20, (_kernel_cr3))
call _sys_call ! sys_call(call_nr, src_dst, m_ptr, bit_map)
! caller is now explicitly in proc_ptr
mov AXREG(esi), eax ! sys_call MUST PRESERVE si
mov AXREG(esi), eax
! Fall into code to restart proc/task running.
@ -413,14 +403,21 @@ _restart:
! Restart the current process or the next process if it is set.
cmp (_next_ptr), 0 ! see if another process is scheduled
jz 0f
mov eax, (_next_ptr)
mov (_proc_ptr), eax ! schedule new process
mov (_next_ptr), 0
0: mov esp, (_proc_ptr) ! will assume P_STACKBASE == 0
cli
call _schedcheck ! ask C function who we're running
mov esp, (_proc_ptr) ! will assume P_STACKBASE == 0
lldt P_LDT_SEL(esp) ! enable process' segment descriptors
LOADCR3WITHEAX(0x21, P_CR3(esp)) ! switch to process page table
cmp P_CR3(esp), 0 ! process does not have its own PT
jz 0f
mov eax, P_CR3(esp)
cmp eax, (loadedcr3)
jz 0f
mov cr3, eax
mov (loadedcr3), eax
mov eax, (_proc_ptr)
mov (_ptproc), eax
mov (_dirtypde), 0
0:
lea eax, P_STACKTOP(esp) ! arrange for next interrupt
mov (_tss+TSS3_S_SP0), eax ! to save state in process table
restart1:
@ -496,8 +493,7 @@ _page_fault:
push PAGE_FAULT_VECTOR
push eax
mov eax, cr2
sseg mov (_pagefault_cr2), eax
sseg inc (_pagefault_count)
sseg mov (pagefaultcr2), eax
pop eax
jmp errexception
@ -526,19 +522,26 @@ errexception:
sseg pop (ex_number)
sseg pop (trap_errno)
exception1: ! Common for all exceptions.
sseg mov (old_eax_ptr), esp ! where will eax be saved?
sseg sub (old_eax_ptr), PCREG-AXREG ! here
push eax ! eax is scratch register
mov eax, 0+4(esp) ! old eip
sseg mov (old_eip), eax
mov eax, esp
add eax, 4
sseg mov (old_eip_ptr), eax
movzx eax, 4+4(esp) ! old cs
sseg mov (old_cs), eax
mov eax, 8+4(esp) ! old eflags
sseg mov (old_eflags), eax
LOADCR3WITHEAX(0x24, (_kernel_cr3))
pop eax
call save
push (pagefaultcr2)
push (old_eax_ptr)
push (old_eip_ptr)
push (old_eflags)
push (old_cs)
push (old_eip)
@ -546,7 +549,38 @@ exception1: ! Common for all exceptions.
push (ex_number)
call _exception ! (ex_number, trap_errno, old_eip,
! old_cs, old_eflags)
add esp, 5*4
add esp, 8*4
ret
!*===========================================================================*
!* write_cr3 *
!*===========================================================================*
! PUBLIC void write_cr3(unsigned long value);
_write_cr3:
push ebp
mov ebp, esp
mov eax, 8(ebp)
cmp eax, (loadedcr3)
jz 0f
mov cr3, eax
mov (loadedcr3), eax
mov (_dirtypde), 0
0:
pop ebp
ret
!*===========================================================================*
!* reload_cr3 *
!*===========================================================================*
! PUBLIC void reload_cr3(void);
_reload_cr3:
push ebp
mov ebp, esp
mov (_dirtypde), 0
mov eax, cr3
mov cr3, eax
pop ebp
ret
!*===========================================================================*
@ -556,24 +590,12 @@ _level0_call:
call save
jmp (_level0_func)
!*===========================================================================*
!* load_kernel_cr3 *
!*===========================================================================*
.align 16
_load_kernel_cr3:
mov eax, (_kernel_cr3)
mov cr3, eax
ret
!*===========================================================================*
!* data *
!*===========================================================================*
.sect .rom ! Before the string table please
.data2 0x526F ! this must be the first data entry (magic #)
#if VM_KERN_NOPAGEZERO
.align I386_PAGE_SIZE
#endif
.sect .bss
k_stack:
@ -581,7 +603,11 @@ k_stack:
k_stktop: ! top of kernel stack
.comm ex_number, 4
.comm trap_errno, 4
.comm old_eip_ptr, 4
.comm old_eax_ptr, 4
.comm old_eip, 4
.comm old_cs, 4
.comm old_eflags, 4
.comm pagefaultcr2, 4
.comm loadedcr3, 4

View File

@ -189,6 +189,11 @@ PUBLIC void prot_init(void)
{ level0_call, LEVEL0_VECTOR, TASK_PRIVILEGE },
};
/* Click-round kernel. */
if(kinfo.data_base % CLICK_SIZE)
minix_panic("kinfo.data_base not aligned", NO_NUM);
kinfo.data_size = ((kinfo.data_size+CLICK_SIZE-1)/CLICK_SIZE) * CLICK_SIZE;
/* Build gdt and idt pointers in GDT where the BIOS expects them. */
dtp= (struct desctableptr_s *) &gdt[GDT_INDEX];
* (u16_t *) dtp->limit = (sizeof gdt) - 1;
@ -325,3 +330,36 @@ PUBLIC void alloc_segments(register struct proc *rp)
rp->p_reg.ds = (DS_LDT_INDEX*DESC_SIZE) | TI | privilege;
}
/*===========================================================================*
* prot_set_kern_seg_limit *
*===========================================================================*/
PUBLIC int prot_set_kern_seg_limit(vir_bytes limit)
{
struct proc *rp;
vir_bytes prev;
int orig_click;
int incr_clicks;
if(limit <= kinfo.data_base) {
kprintf("prot_set_kern_seg_limit: limit bogus\n");
return EINVAL;
}
/* Do actual increase. */
orig_click = kinfo.data_size / CLICK_SIZE;
kinfo.data_size = limit - kinfo.data_base;
incr_clicks = kinfo.data_size / CLICK_SIZE - orig_click;
prot_init();
/* Increase kernel processes too. */
for (rp = BEG_PROC_ADDR; rp < END_PROC_ADDR; ++rp) {
if (RTS_ISSET(rp, SLOT_FREE) || !iskernelp(rp))
continue;
rp->p_memmap[S].mem_len += incr_clicks;
alloc_segments(rp);
}
return OK;
}

View File

@ -49,11 +49,17 @@ _PROTOTYPE( void vir_insb, (u16_t port, struct proc *proc, u32_t vir, size_t cou
_PROTOTYPE( void vir_outsb, (u16_t port, struct proc *proc, u32_t vir, size_t count));
_PROTOTYPE( void vir_insw, (u16_t port, struct proc *proc, u32_t vir, size_t count));
_PROTOTYPE( void vir_outsw, (u16_t port, struct proc *proc, u32_t vir, size_t count));
_PROTOTYPE( void i386_updatepde, (int pde, u32_t val));
_PROTOTYPE( void i386_freepde, (int pde));
_PROTOTYPE( void getcr3val, (void));
_PROTOTYPE( void switchedcr3, (void));
_PROTOTYPE( void vm_set_cr3, (struct proc *));
/* exception.c */
_PROTOTYPE( void exception, (unsigned vec_nr, u32_t trap_errno,
u32_t old_eip, U16_t old_cs, u32_t old_eflags) );
u32_t old_eip, U16_t old_cs, u32_t old_eflags,
u32_t *old_eip_ptr, u32_t *old_eax_ptr, u32_t pagefaultcr2) );
/* klib386.s */
_PROTOTYPE( void level0, (void (*func)(void)) );
@ -70,7 +76,12 @@ _PROTOTYPE( void phys_insb, (U16_t port, phys_bytes buf, size_t count) );
_PROTOTYPE( void phys_insw, (U16_t port, phys_bytes buf, size_t count) );
_PROTOTYPE( void phys_outsb, (U16_t port, phys_bytes buf, size_t count) );
_PROTOTYPE( void phys_outsw, (U16_t port, phys_bytes buf, size_t count) );
_PROTOTYPE( void i386_invlpg, (U32_t addr) );
_PROTOTYPE( void i386_invlpg_level0, (void) );
_PROTOTYPE( int _memcpy_k, (void *dst, void *src, size_t n) );
_PROTOTYPE( int _memcpy_k_fault, (void) );
_PROTOTYPE( u32_t read_cr3, (void) );
_PROTOTYPE( void reload_cr3, (void) );
_PROTOTYPE( void phys_memset, (phys_bytes ph, u32_t c, phys_bytes bytes) );
/* protect.c */
_PROTOTYPE( void prot_init, (void) );
@ -79,6 +90,7 @@ _PROTOTYPE( void init_codeseg, (struct segdesc_s *segdp, phys_bytes base,
_PROTOTYPE( void init_dataseg, (struct segdesc_s *segdp, phys_bytes base,
vir_bytes size, int privilege) );
_PROTOTYPE( void enable_iop, (struct proc *pp) );
_PROTOTYPE( int prot_set_kern_seg_limit, (vir_bytes limit) );
/* functions defined in architecture-independent kernel source. */
#include "../../proto.h"

View File

@ -14,11 +14,11 @@
#include "proto.h"
#include "../../proc.h"
#include "../../debug.h"
#define CR0_EM 0x0004 /* set to enable trap on any FP instruction */
FORWARD _PROTOTYPE( void ser_debug, (int c));
FORWARD _PROTOTYPE( void ser_dump_stats, (void));
PUBLIC void arch_shutdown(int how)
{
@ -137,82 +137,123 @@ PUBLIC void do_ser_debug()
ser_debug(c);
}
PRIVATE void ser_dump_queues(void)
{
int q;
for(q = 0; q < NR_SCHED_QUEUES; q++) {
struct proc *p;
if(rdy_head[q])
printf("%2d: ", q);
for(p = rdy_head[q]; p; p = p->p_nextready) {
printf("%s / %d ", p->p_name, p->p_endpoint);
}
printf("\n");
}
}
PRIVATE void ser_debug(int c)
{
int u = 0;
do_serial_debug++;
kprintf("ser_debug: %d\n", c);
/* Disable interrupts so that we get a consistent state. */
if(!intr_disabled()) { lock; u = 1; };
switch(c)
{
case '1':
ser_dump_proc();
break;
case '2':
ser_dump_stats();
ser_dump_queues();
break;
#if DEBUG_TRACE
#define TOGGLECASE(ch, flag) \
case ch: { \
if(verboseflags & flag) { \
verboseflags &= ~flag; \
printf("%s disabled\n", #flag); \
} else { \
verboseflags |= flag; \
printf("%s enabled\n", #flag); \
} \
break; \
}
TOGGLECASE('8', VF_SCHEDULING)
TOGGLECASE('9', VF_PICKPROC)
#endif
}
do_serial_debug--;
if(u) { unlock; }
}
PRIVATE void printslot(struct proc *pp, int level)
{
struct proc *depproc = NULL;
int dep = NONE;
#define COL { int i; for(i = 0; i < level; i++) printf("> "); }
if(level >= NR_PROCS) {
kprintf("loop??\n");
return;
}
COL
kprintf("%d: %s %d prio %d/%d time %d/%d cr3 0x%lx rts %s misc %s",
proc_nr(pp), pp->p_name, pp->p_endpoint,
pp->p_priority, pp->p_max_priority, pp->p_user_time,
pp->p_sys_time, pp->p_seg.p_cr3,
rtsflagstr(pp->p_rts_flags), miscflagstr(pp->p_misc_flags));
if(pp->p_rts_flags & SENDING) {
dep = pp->p_sendto_e;
kprintf(" to: ");
} else if(pp->p_rts_flags & RECEIVING) {
dep = pp->p_getfrom_e;
kprintf(" from: ");
}
if(dep != NONE) {
if(dep == ANY) {
kprintf(" ANY\n");
} else {
int procno;
if(!isokendpt(dep, &procno)) {
kprintf(" ??? %d\n", dep);
} else {
depproc = proc_addr(procno);
if(depproc->p_rts_flags & SLOT_FREE) {
kprintf(" empty slot %d???\n", procno);
depproc = NULL;
} else {
kprintf(" %s\n", depproc->p_name);
}
}
}
} else {
kprintf("\n");
}
COL
proc_stacktrace(pp);
if(depproc)
printslot(depproc, level+1);
}
PUBLIC void ser_dump_proc()
{
struct proc *pp;
int u = 0;
/* Disable interrupts so that we get a consistent state. */
if(!intr_disabled()) { lock; u = 1; };
for (pp= BEG_PROC_ADDR; pp < END_PROC_ADDR; pp++)
{
if (pp->p_rts_flags & SLOT_FREE)
continue;
kprintf(
"%d: 0x%02x %s e %d src %d dst %d prio %d/%d time %d/%d EIP 0x%x\n",
proc_nr(pp),
pp->p_rts_flags, pp->p_name,
pp->p_endpoint, pp->p_getfrom_e, pp->p_sendto_e,
pp->p_priority, pp->p_max_priority,
pp->p_user_time, pp->p_sys_time,
pp->p_reg.pc);
proc_stacktrace(pp);
printslot(pp, 0);
}
if(u) { unlock; }
}
PRIVATE void ser_dump_stats()
{
kprintf("ipc_stats:\n");
kprintf("deadproc: %d\n", ipc_stats.deadproc);
kprintf("bad_endpoint: %d\n", ipc_stats.bad_endpoint);
kprintf("dst_not_allowed: %d\n", ipc_stats.dst_not_allowed);
kprintf("bad_call: %d\n", ipc_stats.bad_call);
kprintf("call_not_allowed: %d\n", ipc_stats.call_not_allowed);
kprintf("bad_buffer: %d\n", ipc_stats.bad_buffer);
kprintf("deadlock: %d\n", ipc_stats.deadlock);
kprintf("not_ready: %d\n", ipc_stats.not_ready);
kprintf("src_died: %d\n", ipc_stats.src_died);
kprintf("dst_died: %d\n", ipc_stats.dst_died);
kprintf("no_priv: %d\n", ipc_stats.no_priv);
kprintf("bad_size: %d\n", ipc_stats.bad_size);
kprintf("bad_senda: %d\n", ipc_stats.bad_senda);
if (ex64hi(ipc_stats.total))
{
kprintf("total: %x:%08x\n", ex64hi(ipc_stats.total),
ex64lo(ipc_stats.total));
}
else
kprintf("total: %u\n", ex64lo(ipc_stats.total));
kprintf("sys_stats:\n");
kprintf("bad_req: %d\n", sys_stats.bad_req);
kprintf("not_allowed: %d\n", sys_stats.not_allowed);
if (ex64hi(sys_stats.total))
{
kprintf("total: %x:%08x\n", ex64hi(sys_stats.total),
ex64lo(sys_stats.total));
}
else
kprintf("total: %u\n", ex64lo(sys_stats.total));
}
#if SPROFILE

View File

@ -1,27 +0,0 @@
.define _load_kernel_cr3
.define _last_cr3
#define LOADKERNELCR3 ;\
inc (_cr3switch) ;\
mov eax, (_kernel_cr3) ;\
cmp (_last_cr3), eax ;\
jz 9f ;\
push _load_kernel_cr3 ;\
call _level0 ;\
pop eax ;\
mov eax, (_kernel_cr3) ;\
mov (_last_cr3), eax ;\
inc (_cr3reload) ;\
9:
#define LOADCR3WITHEAX(type, newcr3) ;\
sseg inc (_cr3switch) ;\
sseg mov eax, newcr3 ;\
sseg cmp (_last_cr3), eax ;\
jz 8f ;\
mov cr3, eax ;\
sseg inc (_cr3reload) ;\
sseg mov (_last_cr3), eax ;\
8:

View File

@ -99,6 +99,7 @@ message *m_ptr; /* pointer to request message */
/* Despite its name, this routine is not called on every clock tick. It
* is called on those clock ticks when a lot of work needs to be done.
*/
vmassert(!vm_running || (read_cr3() == ptproc->p_seg.p_cr3));
/* A process used up a full quantum. The interrupt handler stored this
* process in 'prev_ptr'. First make sure that the process is not on the
@ -120,12 +121,17 @@ message *m_ptr; /* pointer to request message */
}
}
vmassert(!vm_running || (read_cr3() == ptproc->p_seg.p_cr3));
/* Check if a clock timer expired and run its watchdog function. */
if (next_timeout <= realtime) {
vmassert(!vm_running || (read_cr3() == ptproc->p_seg.p_cr3));
tmrs_exptimers(&clock_timers, realtime, NULL);
vmassert(!vm_running || (read_cr3() == ptproc->p_seg.p_cr3));
next_timeout = (clock_timers == NULL) ?
TMR_NEVER : clock_timers->tmr_exp_time;
vmassert(!vm_running || (read_cr3() == ptproc->p_seg.p_cr3));
}
vmassert(!vm_running || (read_cr3() == ptproc->p_seg.p_cr3));
return;
}
@ -188,6 +194,8 @@ irq_hook_t *hook;
if(minix_panicing) return;
vmassert(!vm_running || (read_cr3() == ptproc->p_seg.p_cr3));
/* Get number of ticks and update realtime. */
ticks = lost_ticks + 1;
lost_ticks = 0;
@ -223,6 +231,7 @@ irq_hook_t *hook;
if (do_serial_debug)
do_ser_debug();
vmassert(!vm_running || (read_cr3() == ptproc->p_seg.p_cr3));
return(1); /* reenable interrupts */
}

View File

@ -25,6 +25,8 @@ check_runqueues_f(char *file, int line)
minix_panic("check_runqueues called with interrupts enabled", NO_NUM);
}
FIXME("check_runqueues being done");
#define MYPANIC(msg) { \
kprintf("check_runqueues:%s:%d: %s\n", file, line, msg); \
minix_panic("check_runqueues failed", NO_NUM); \
@ -94,7 +96,9 @@ check_runqueues_f(char *file, int line)
for (xp = BEG_PROC_ADDR; xp < END_PROC_ADDR; ++xp) {
if(xp->p_magic != PMAGIC)
MYPANIC("p_magic wrong in proc table");
if (! isemptyp(xp) && xp->p_ready && ! xp->p_found) {
if (isemptyp(xp))
continue;
if(xp->p_ready && ! xp->p_found) {
kprintf("sched error: ready proc %d not on queue\n", xp->p_nr);
MYPANIC("ready proc not on scheduling queue");
if (l++ > MAX_LOOP) { MYPANIC("loop in debug.c?"); }
@ -103,3 +107,43 @@ check_runqueues_f(char *file, int line)
}
#endif /* DEBUG_SCHED_CHECK */
PUBLIC char *
rtsflagstr(int flags)
{
static char str[100];
str[0] = '\0';
#define FLAG(n) if(flags & n) { strcat(str, #n " "); }
FLAG(SLOT_FREE);
FLAG(NO_PRIORITY);
FLAG(SENDING);
FLAG(RECEIVING);
FLAG(SIGNALED);
FLAG(SIG_PENDING);
FLAG(P_STOP);
FLAG(NO_PRIV);
FLAG(NO_ENDPOINT);
FLAG(VMINHIBIT);
FLAG(PAGEFAULT);
FLAG(VMREQUEST);
FLAG(VMREQTARGET);
return str;
}
PUBLIC char *
miscflagstr(int flags)
{
static char str[100];
str[0] = '\0';
FLAG(MF_REPLY_PEND);
FLAG(MF_ASYNMSG);
FLAG(MF_FULLVM);
FLAG(MF_DELIVERMSG);
return str;
}

View File

@ -8,6 +8,7 @@
*/
#include <ansi.h>
#include <minix/debug.h>
#include "config.h"
/* Enable prints such as
@ -24,7 +25,46 @@
#define DEBUG_TIME_LOCKS 1
/* Runtime sanity checking. */
#define DEBUG_VMASSERT 1
#define DEBUG_VMASSERT 0
#define DEBUG_SCHED_CHECK 0
#define DEBUG_STACK_CHECK 0
#define DEBUG_TRACE 0
#if DEBUG_TRACE
#define VF_SCHEDULING (1L << 1)
#define VF_PICKPROC (1L << 2)
#define TRACE(code, statement) if(verboseflags & code) { printf("%s:%d: ", __FILE__, __LINE__); statement }
#else
#define TRACE(code, statement)
#endif
#define NOREC_ENTER(varname) \
static int varname = 0; \
int mustunlock = 0; \
if(!intr_disabled()) { lock; mustunlock = 1; } \
if(varname) { \
minix_panic(#varname " recursive enter", __LINE__); \
} \
varname = 1;
#define NOREC_RETURN(varname, v) do { \
if(!varname) \
minix_panic(#varname " flag off", __LINE__); \
if(!intr_disabled()) \
minix_panic(#varname " interrupts on", __LINE__); \
varname = 0; \
if(mustunlock) { unlock; } \
return v; \
} while(0)
#if DEBUG_VMASSERT
#define vmassert(t) { \
if(!(t)) { minix_panic("vm: assert " #t " failed\n", __LINE__); } }
#else
#define vmassert(t) { }
#endif
#endif /* DEBUG_H */

View File

@ -16,6 +16,7 @@
#include <minix/config.h>
#include <archtypes.h>
#include "config.h"
#include "debug.h"
/* Variables relating to shutting down MINIX. */
EXTERN char kernel_exception; /* TRUE after system exceptions */
@ -29,14 +30,13 @@ EXTERN struct k_randomness krandom; /* gather kernel random information */
EXTERN struct loadinfo kloadinfo; /* status of load average */
/* Process scheduling information and the kernel reentry count. */
EXTERN struct proc *prev_ptr; /* previously running process */
EXTERN struct proc *proc_ptr; /* pointer to currently running process */
EXTERN struct proc *next_ptr; /* next process to run after restart() */
EXTERN struct proc *prev_ptr;
EXTERN struct proc *bill_ptr; /* process to bill for clock ticks */
EXTERN struct proc *vmrestart; /* first process on vmrestart queue */
EXTERN struct proc *vmrequest; /* first process on vmrequest queue */
EXTERN struct proc *pagefaults; /* first process on pagefault queue */
EXTERN struct proc *softnotify; /* first process on softnotify queue */
EXTERN char k_reenter; /* kernel reentry count (entry count less 1) */
EXTERN unsigned lost_ticks; /* clock ticks counted outside clock task */
@ -47,32 +47,6 @@ EXTERN int irq_actids[NR_IRQ_VECTORS]; /* IRQ ID bits active */
EXTERN int irq_use; /* map of all in-use irq's */
EXTERN u32_t system_hz; /* HZ value */
EXTERN struct ipc_stats
{
unsigned long deadproc;
unsigned long bad_endpoint;
unsigned long dst_not_allowed;
unsigned long bad_call;
unsigned long call_not_allowed;
unsigned long bad_buffer;
unsigned long deadlock;
unsigned long not_ready;
unsigned long src_died;
unsigned long dst_died;
unsigned long no_priv;
unsigned long bad_size;
unsigned long bad_senda;
u64_t total;
} ipc_stats;
extern endpoint_t ipc_stats_target;
EXTERN struct system_stats
{
unsigned long bad_req;
unsigned long not_allowed;
u64_t total;
} sys_stats;
/* Miscellaneous. */
EXTERN reg_t mon_ss, mon_sp; /* boot monitor stack */
EXTERN int mon_return; /* true if we can return to monitor */
@ -85,18 +59,14 @@ EXTERN char params_buffer[512]; /* boot monitor parameters */
EXTERN int minix_panicing;
EXTERN int locklevel;
EXTERN unsigned long cr3switch;
EXTERN unsigned long cr3reload;
#if DEBUG_TRACE
EXTERN int verboseflags;
#endif
/* VM */
EXTERN phys_bytes vm_base;
EXTERN phys_bytes vm_size;
EXTERN phys_bytes vm_mem_high;
EXTERN int vm_running;
EXTERN int must_notify_vm;
/* Verbose flags (debugging). */
EXTERN int verbose_vm;
EXTERN int catch_pagefaults;
EXTERN struct proc *ptproc;
/* Timing */
EXTERN util_timingdata_t timingdata[TIMING_CATEGORIES];

View File

@ -17,6 +17,7 @@
#include <minix/com.h>
#include <minix/endpoint.h>
#include "proc.h"
#include "debug.h"
/* Prototype declarations for PRIVATE functions. */
FORWARD _PROTOTYPE( void announce, (void));
@ -158,6 +159,9 @@ PUBLIC void main()
rp->p_reg.sp -= sizeof(reg_t);
}
/* scheduling functions depend on proc_ptr pointing somewhere. */
if(!proc_ptr) proc_ptr = rp;
/* If this process has its own page table, VM will set the
* PT up and manage it. VM will signal the kernel when it has
* done this; until then, don't let it run.
@ -183,8 +187,21 @@ PUBLIC void main()
/* MINIX is now ready. All boot image processes are on the ready queue.
* Return to the assembly code to start running the current process.
*/
bill_ptr = proc_addr(IDLE); /* it has to point somewhere */
bill_ptr = proc_addr(IDLE); /* it has to point somewhere */
announce(); /* print MINIX startup banner */
/* Warnings for sanity checks that take time. These warnings are printed
* so it's a clear warning no full release should be done with them
* enabled.
*/
#if DEBUG_SCHED_CHECK
FIXME("DEBUG_SCHED_CHECK enabled");
#endif
#if DEBUG_VMASSERT
FIXME("DEBUG_VMASSERT enabled");
#endif
#if DEBUG_PROC_CHECK
FIXME("PROC check enabled");
#endif
restart();
}

View File

@ -8,8 +8,6 @@
*
* lock_notify: notify a process of a system event
* lock_send: send a message to a process
* lock_enqueue: put a process on one of the scheduling queues
* lock_dequeue: remove a process from the scheduling queues
*
* Changes:
* Aug 19, 2005 rewrote scheduling code (Jorrit N. Herder)
@ -67,8 +65,10 @@ FORWARD _PROTOTYPE( int try_one, (struct proc *src_ptr, struct proc *dst_ptr));
FORWARD _PROTOTYPE( void sched, (struct proc *rp, int *queue, int *front));
FORWARD _PROTOTYPE( void pick_proc, (void));
#define BuildMess(m_ptr, src, dst_ptr) \
(m_ptr)->m_source = proc_addr(src)->p_endpoint; \
#define PICK_ANY 1
#define PICK_HIGHERONLY 2
#define BuildNotifyMessage(m_ptr, src, dst_ptr) \
(m_ptr)->m_type = NOTIFY_FROM(src); \
(m_ptr)->NOTIFY_TIMESTAMP = get_uptime(); \
switch (src) { \
@ -82,49 +82,86 @@ FORWARD _PROTOTYPE( void pick_proc, (void));
break; \
}
#define CopyMess(s,sp,sm,dp,dm) do { \
vir_bytes dstlin; \
endpoint_t e = proc_addr(s)->p_endpoint; \
struct vir_addr src, dst; \
int r; \
if((dstlin = umap_local((dp), D, (vir_bytes) dm, sizeof(message))) == 0){\
minix_panic("CopyMess: umap_local failed", __LINE__); \
} \
\
if(vm_running && \
(r=vm_checkrange((dp), (dp), dstlin, sizeof(message), 1, 0)) != OK) { \
if(r != VMSUSPEND) \
minix_panic("CopyMess: vm_checkrange error", __LINE__); \
(dp)->p_vmrequest.saved.msgcopy.dst = (dp); \
(dp)->p_vmrequest.saved.msgcopy.dst_v = (vir_bytes) dm; \
if(data_copy((sp)->p_endpoint, \
(vir_bytes) (sm), SYSTEM, \
(vir_bytes) &(dp)->p_vmrequest.saved.msgcopy.msgbuf, \
sizeof(message)) != OK) { \
minix_panic("CopyMess: data_copy failed", __LINE__);\
} \
(dp)->p_vmrequest.saved.msgcopy.msgbuf.m_source = e; \
(dp)->p_vmrequest.type = VMSTYPE_MSGCOPY; \
} else { \
src.proc_nr_e = (sp)->p_endpoint; \
dst.proc_nr_e = (dp)->p_endpoint; \
src.segment = dst.segment = D; \
src.offset = (vir_bytes) (sm); \
dst.offset = (vir_bytes) (dm); \
if(virtual_copy(&src, &dst, sizeof(message)) != OK) { \
kprintf("copymess: copy %d:%lx to %d:%lx failed\n",\
(sp)->p_endpoint, (sm), (dp)->p_endpoint, dm);\
minix_panic("CopyMess: virtual_copy (1) failed", __LINE__); \
} \
src.proc_nr_e = SYSTEM; \
src.offset = (vir_bytes) &e; \
if(virtual_copy(&src, &dst, sizeof(e)) != OK) { \
kprintf("copymess: copy %d:%lx to %d:%lx\n", \
(sp)->p_endpoint, (sm), (dp)->p_endpoint, dm);\
minix_panic("CopyMess: virtual_copy (2) failed", __LINE__); \
} \
} \
} while(0)
/*===========================================================================*
* QueueMess *
*===========================================================================*/
PRIVATE int QueueMess(endpoint_t ep, vir_bytes msg_lin, struct proc *dst)
{
int k;
phys_bytes addr;
NOREC_ENTER(queuemess);
/* Queue a message from the src process (in memory) to the dst
* process (using dst process table entry). Do actual copy to
* kernel here; it's an error if the copy fails into kernel.
*/
vmassert(!(dst->p_misc_flags & MF_DELIVERMSG));
vmassert(dst->p_delivermsg_lin);
vmassert(isokendpt(ep, &k));
#if 0
if(INMEMORY(dst)) {
PHYS_COPY_CATCH(msg_lin, dst->p_delivermsg_lin,
sizeof(message), addr);
if(!addr) {
PHYS_COPY_CATCH(vir2phys(&ep), dst->p_delivermsg_lin,
sizeof(ep), addr);
if(!addr) {
NOREC_RETURN(queuemess, OK);
}
}
}
#endif
PHYS_COPY_CATCH(msg_lin, vir2phys(&dst->p_delivermsg), sizeof(message), addr);
if(addr) {
NOREC_RETURN(queuemess, EFAULT);
}
dst->p_delivermsg.m_source = ep;
dst->p_misc_flags |= MF_DELIVERMSG;
NOREC_RETURN(queuemess, OK);
}
/*===========================================================================*
* schedcheck *
*===========================================================================*/
PUBLIC void schedcheck(void)
{
/* This function is called an instant before proc_ptr is
* to be scheduled again.
*/
NOREC_ENTER(schedch);
vmassert(intr_disabled());
if(next_ptr) {
proc_ptr = next_ptr;
next_ptr = NULL;
}
vmassert(proc_ptr);
vmassert(!proc_ptr->p_rts_flags);
while(proc_ptr->p_misc_flags & MF_DELIVERMSG) {
vmassert(!next_ptr);
vmassert(!proc_ptr->p_rts_flags);
TRACE(VF_SCHEDULING, printf("delivering to %s / %d\n",
proc_ptr->p_name, proc_ptr->p_endpoint););
if(delivermsg(proc_ptr) == VMSUSPEND) {
vmassert(next_ptr);
TRACE(VF_SCHEDULING, printf("suspending %s / %d\n",
proc_ptr->p_name, proc_ptr->p_endpoint););
vmassert(proc_ptr->p_rts_flags);
vmassert(next_ptr != proc_ptr);
proc_ptr = next_ptr;
vmassert(!proc_ptr->p_rts_flags);
next_ptr = NULL;
}
}
TRACE(VF_SCHEDULING, printf("starting %s / %d\n",
proc_ptr->p_name, proc_ptr->p_endpoint););
#if DEBUG_TRACE
proc_ptr->p_schedules++;
#endif
NOREC_RETURN(schedch, );
}
/*===========================================================================*
* sys_call *
@ -145,9 +182,15 @@ long bit_map; /* notification event set or flags */
int result; /* the system call's result */
int src_dst_p; /* Process slot number */
size_t msg_size;
phys_bytes linaddr = 0;
if (caller_ptr->p_endpoint == ipc_stats_target)
ipc_stats.total= add64u(ipc_stats.total, 1);
#if DEBUG_SCHED_CHECK
if(caller_ptr->p_misc_flags & MF_DELIVERMSG) {
kprintf("sys_call: MF_DELIVERMSG on for %s / %d\n",
caller_ptr->p_name, caller_ptr->p_endpoint);
minix_panic("MF_DELIVERMSG on", NO_NUM);
}
#endif
#if 0
if(src_dst_e != 4 && src_dst_e != 5 &&
@ -163,12 +206,10 @@ long bit_map; /* notification event set or flags */
}
#endif
#if 1
#if DEBUG_SCHED_CHECK
if (RTS_ISSET(caller_ptr, SLOT_FREE))
{
kprintf("called by the dead?!?\n");
if (caller_ptr->p_endpoint == ipc_stats_target)
ipc_stats.deadproc++;
return EINVAL;
}
#endif
@ -193,8 +234,6 @@ long bit_map; /* notification event set or flags */
kprintf("sys_call: trap %d by %d with bad endpoint %d\n",
call_nr, proc_nr(caller_ptr), src_dst_e);
#endif
if (caller_ptr->p_endpoint == ipc_stats_target)
ipc_stats.bad_endpoint++;
return EINVAL;
}
src_dst_p = src_dst_e;
@ -214,8 +253,6 @@ long bit_map; /* notification event set or flags */
kprintf("sys_call: trap %d by %d with bad endpoint %d\n",
call_nr, proc_nr(caller_ptr), src_dst_e);
#endif
if (caller_ptr->p_endpoint == ipc_stats_target)
ipc_stats.bad_endpoint++;
return EDEADSRCDST;
}
@ -233,8 +270,6 @@ long bit_map; /* notification event set or flags */
call_nr, proc_nr(caller_ptr),
caller_ptr->p_name, src_dst_p);
#endif
if (caller_ptr->p_endpoint == ipc_stats_target)
ipc_stats.dst_not_allowed++;
return(ECALLDENIED); /* call denied by ipc mask */
}
}
@ -245,10 +280,8 @@ long bit_map; /* notification event set or flags */
#if DEBUG_ENABLE_IPC_WARNINGS
kprintf(
"sys_call: ipc mask denied trap %d from %d to %d\n",
call_nr, proc_nr(caller_ptr), src_dst_p);
call_nr, caller_ptr->p_endpoint, src_dst_e);
#endif
if (caller_ptr->p_endpoint == ipc_stats_target)
ipc_stats.dst_not_allowed++;
return(ECALLDENIED); /* call denied by ipc mask */
}
}
@ -261,8 +294,6 @@ long bit_map; /* notification event set or flags */
kprintf("sys_call: trap %d not allowed, caller %d, src_dst %d\n",
call_nr, proc_nr(caller_ptr), src_dst_p);
#endif
if (caller_ptr->p_endpoint == ipc_stats_target)
ipc_stats.bad_call++;
return(ETRAPDENIED); /* trap denied by mask or kernel */
}
@ -275,8 +306,6 @@ long bit_map; /* notification event set or flags */
kprintf("sys_call: trap %d not allowed, caller %d, src_dst %d\n",
call_nr, proc_nr(caller_ptr), src_dst_p);
#endif
if (caller_ptr->p_endpoint == ipc_stats_target)
ipc_stats.call_not_allowed++;
return(ETRAPDENIED); /* trap denied by mask or kernel */
}
@ -285,8 +314,6 @@ long bit_map; /* notification event set or flags */
kprintf("sys_call: trap %d not allowed, caller %d, src_dst %d\n",
call_nr, proc_nr(caller_ptr), src_dst_e);
#endif
if (caller_ptr->p_endpoint == ipc_stats_target)
ipc_stats.call_not_allowed++;
return(ETRAPDENIED); /* trap denied by mask or kernel */
}
@ -307,61 +334,6 @@ long bit_map; /* notification event set or flags */
msg_size = sizeof(*m_ptr);
}
/* If the call involves a message buffer, i.e., for SEND, SENDREC,
* or RECEIVE, check the message pointer. This check allows a message to be
* anywhere in data or stack or gap. It will have to be made more elaborate
* for machines which don't have the gap mapped.
*
* We use msg_size decided above.
*/
if (call_nr == SEND || call_nr == SENDREC ||
call_nr == RECEIVE || call_nr == SENDA || call_nr == SENDNB) {
int r;
phys_bytes lin;
/* Map to linear address. */
if(msg_size > 0 &&
(lin = umap_local(caller_ptr, D, (vir_bytes) m_ptr, msg_size)) == 0) {
kprintf("umap_local failed for %s / %d on 0x%lx size %d\n",
caller_ptr->p_name, caller_ptr->p_endpoint,
m_ptr, msg_size);
return EFAULT;
}
/* Check if message pages in calling process are mapped.
* We don't have to check the recipient if this is a send,
* because this code will do that before its receive() starts.
*
* It is important the range is verified as _writable_, because
* the kernel will want to write to the SENDA buffer in the future,
* and those pages may not be shared between processes.
*/
if(vm_running && msg_size > 0 &&
(r=vm_checkrange(caller_ptr, caller_ptr, lin, msg_size, 1, 0)) != OK) {
if(r != VMSUSPEND) {
kprintf("SYSTEM:sys_call:vm_checkrange: err %d\n", r);
return r;
}
/* We can't go ahead with this call. Caller is suspended
* and we have to save the state in its process struct.
*/
caller_ptr->p_vmrequest.saved.sys_call.call_nr = call_nr;
caller_ptr->p_vmrequest.saved.sys_call.m_ptr = m_ptr;
caller_ptr->p_vmrequest.saved.sys_call.src_dst_e = src_dst_e;
caller_ptr->p_vmrequest.saved.sys_call.bit_map = bit_map;
caller_ptr->p_vmrequest.type = VMSTYPE_SYS_CALL;
kprintf("SYSTEM: %s:%d: suspending call 0x%lx on ipc buffer 0x%lx length 0x%lx\n",
caller_ptr->p_name, caller_ptr->p_endpoint, call_nr, m_ptr, msg_size);
/* vm_checkrange() will have suspended caller with VMREQUEST. */
return OK;
}
}
/* Check for a possible deadlock for blocking SEND(REC) and RECEIVE. */
if (call_nr == SEND || call_nr == SENDREC || call_nr == RECEIVE) {
if (group_size = deadlock(call_nr, caller_ptr, src_dst_p)) {
@ -369,8 +341,6 @@ long bit_map; /* notification event set or flags */
kprintf("sys_call: trap %d from %d to %d deadlocked, group size %d\n",
call_nr, proc_nr(caller_ptr), src_dst_p, group_size);
#endif
if (caller_ptr->p_endpoint == ipc_stats_target)
ipc_stats.deadlock++;
return(ELOCKED);
}
}
@ -386,7 +356,7 @@ long bit_map; /* notification event set or flags */
switch(call_nr) {
case SENDREC:
/* A flag is set so that notifications cannot interrupt SENDREC. */
caller_ptr->p_misc_flags |= REPLY_PENDING;
caller_ptr->p_misc_flags |= MF_REPLY_PEND;
/* fall through */
case SEND:
result = mini_send(caller_ptr, src_dst_e, m_ptr, 0);
@ -395,7 +365,7 @@ long bit_map; /* notification event set or flags */
/* fall through for SENDREC */
case RECEIVE:
if (call_nr == RECEIVE)
caller_ptr->p_misc_flags &= ~REPLY_PENDING;
caller_ptr->p_misc_flags &= ~MF_REPLY_PEND;
result = mini_receive(caller_ptr, src_dst_e, m_ptr, 0);
break;
case NOTIFY:
@ -484,22 +454,6 @@ int src_dst; /* src or dst process */
return(0); /* not a deadlock */
}
/*===========================================================================*
* sys_call_restart *
*===========================================================================*/
PUBLIC void sys_call_restart(caller)
struct proc *caller;
{
int r;
kprintf("restarting sys_call code 0x%lx, "
"m_ptr 0x%lx, srcdst %d, bitmap 0x%lx, but not really\n",
caller->p_vmrequest.saved.sys_call.call_nr,
caller->p_vmrequest.saved.sys_call.m_ptr,
caller->p_vmrequest.saved.sys_call.src_dst_e,
caller->p_vmrequest.saved.sys_call.bit_map);
caller->p_reg.retreg = r;
}
/*===========================================================================*
* mini_send *
*===========================================================================*/
@ -516,14 +470,19 @@ int flags;
register struct proc *dst_ptr;
register struct proc **xpp;
int dst_p;
phys_bytes linaddr;
vir_bytes addr;
int r;
if(!(linaddr = umap_local(caller_ptr, D, (vir_bytes) m_ptr,
sizeof(message)))) {
return EFAULT;
}
dst_p = _ENDPOINT_P(dst_e);
dst_ptr = proc_addr(dst_p);
if (RTS_ISSET(dst_ptr, NO_ENDPOINT))
{
if (caller_ptr->p_endpoint == ipc_stats_target)
ipc_stats.dst_died++;
return EDSTDIED;
}
@ -532,18 +491,20 @@ int flags;
*/
if (WILLRECEIVE(dst_ptr, caller_ptr->p_endpoint)) {
/* Destination is indeed waiting for this message. */
CopyMess(caller_ptr->p_nr, caller_ptr, m_ptr, dst_ptr,
dst_ptr->p_messbuf);
vmassert(!(dst_ptr->p_misc_flags & MF_DELIVERMSG));
if((r=QueueMess(caller_ptr->p_endpoint, linaddr, dst_ptr)) != OK)
return r;
RTS_UNSET(dst_ptr, RECEIVING);
} else {
if(flags & NON_BLOCKING) {
if (caller_ptr->p_endpoint == ipc_stats_target)
ipc_stats.not_ready++;
return(ENOTREADY);
}
/* Destination is not waiting. Block and dequeue caller. */
caller_ptr->p_messbuf = m_ptr;
PHYS_COPY_CATCH(linaddr, vir2phys(&caller_ptr->p_sendmsg),
sizeof(message), addr);
if(addr) { return EFAULT; }
RTS_SET(caller_ptr, SENDING);
caller_ptr->p_sendto_e = dst_e;
@ -576,6 +537,18 @@ int flags;
sys_map_t *map;
bitchunk_t *chunk;
int i, r, src_id, src_proc_nr, src_p;
phys_bytes linaddr;
vmassert(!(caller_ptr->p_misc_flags & MF_DELIVERMSG));
if(!(linaddr = umap_local(caller_ptr, D, (vir_bytes) m_ptr,
sizeof(message)))) {
return EFAULT;
}
/* This is where we want our message. */
caller_ptr->p_delivermsg_lin = linaddr;
caller_ptr->p_delivermsg_vir = (vir_bytes) m_ptr;
if(src_e == ANY) src_p = ANY;
else
@ -583,8 +556,6 @@ int flags;
okendpt(src_e, &src_p);
if (RTS_ISSET(proc_addr(src_p), NO_ENDPOINT))
{
if (caller_ptr->p_endpoint == ipc_stats_target)
ipc_stats.src_died++;
return ESRCDIED;
}
}
@ -597,10 +568,11 @@ int flags;
if (!RTS_ISSET(caller_ptr, SENDING)) {
/* Check if there are pending notifications, except for SENDREC. */
if (! (caller_ptr->p_misc_flags & REPLY_PENDING)) {
if (! (caller_ptr->p_misc_flags & MF_REPLY_PEND)) {
map = &priv(caller_ptr)->s_notify_pending;
for (chunk=&map->chunk[0]; chunk<&map->chunk[NR_SYS_CHUNKS]; chunk++) {
endpoint_t hisep;
/* Find a pending notification from the requested source. */
if (! *chunk) continue; /* no bits in chunk */
@ -617,8 +589,13 @@ int flags;
*chunk &= ~(1 << i); /* no longer pending */
/* Found a suitable source, deliver the notification message. */
BuildMess(&m, src_proc_nr, caller_ptr); /* assemble message */
CopyMess(src_proc_nr, proc_addr(HARDWARE), &m, caller_ptr, m_ptr);
BuildNotifyMessage(&m, src_proc_nr, caller_ptr); /* assemble message */
hisep = proc_addr(src_proc_nr)->p_endpoint;
vmassert(!(caller_ptr->p_misc_flags & MF_DELIVERMSG));
vmassert(src_e == ANY || hisep == src_e);
if((r=QueueMess(hisep, vir2phys(&m), caller_ptr)) != OK) {
minix_panic("mini_receive: local QueueMess failed", NO_NUM);
}
return(OK); /* report success */
}
}
@ -627,20 +604,20 @@ int flags;
xpp = &caller_ptr->p_caller_q;
while (*xpp != NIL_PROC) {
if (src_e == ANY || src_p == proc_nr(*xpp)) {
#if 1
#if DEBUG_SCHED_CHECK
if (RTS_ISSET(*xpp, SLOT_FREE) || RTS_ISSET(*xpp, NO_ENDPOINT))
{
kprintf("%d: receive from %d; found dead %d (%s)?\n",
caller_ptr->p_endpoint, src_e, (*xpp)->p_endpoint,
(*xpp)->p_name);
if (caller_ptr->p_endpoint == ipc_stats_target)
ipc_stats.deadproc++;
return EINVAL;
}
#endif
/* Found acceptable message. Copy it and update status. */
CopyMess((*xpp)->p_nr, *xpp, (*xpp)->p_messbuf, caller_ptr, m_ptr);
vmassert(!(caller_ptr->p_misc_flags & MF_DELIVERMSG));
QueueMess((*xpp)->p_endpoint,
vir2phys(&(*xpp)->p_sendmsg), caller_ptr);
RTS_UNSET(*xpp, SENDING);
*xpp = (*xpp)->p_q_link; /* remove from queue */
return(OK); /* report success */
@ -659,7 +636,6 @@ int flags;
}
else
{
caller_ptr->p_messbuf = m_ptr;
r= try_async(caller_ptr);
}
if (r == OK)
@ -672,12 +648,9 @@ int flags;
*/
if ( ! (flags & NON_BLOCKING)) {
caller_ptr->p_getfrom_e = src_e;
caller_ptr->p_messbuf = m_ptr;
RTS_SET(caller_ptr, RECEIVING);
return(OK);
} else {
if (caller_ptr->p_endpoint == ipc_stats_target)
ipc_stats.not_ready++;
return(ENOTREADY);
}
}
@ -692,19 +665,22 @@ int dst; /* which process to notify */
register struct proc *dst_ptr = proc_addr(dst);
int src_id; /* source id for late delivery */
message m; /* the notification message */
int r;
/* Check to see if target is blocked waiting for this message. A process
* can be both sending and receiving during a SENDREC system call.
*/
if (WILLRECEIVE(dst_ptr, caller_ptr->p_endpoint) &&
! (dst_ptr->p_misc_flags & REPLY_PENDING)) {
! (dst_ptr->p_misc_flags & MF_REPLY_PEND)) {
/* Destination is indeed waiting for a message. Assemble a notification
* message and deliver it. Copy from pseudo-source HARDWARE, since the
* message is in the kernel's address space.
*/
BuildMess(&m, proc_nr(caller_ptr), dst_ptr);
CopyMess(proc_nr(caller_ptr), proc_addr(HARDWARE), &m,
dst_ptr, dst_ptr->p_messbuf);
BuildNotifyMessage(&m, proc_nr(caller_ptr), dst_ptr);
vmassert(!(dst_ptr->p_misc_flags & MF_DELIVERMSG));
if((r=QueueMess(caller_ptr->p_endpoint, vir2phys(&m), dst_ptr)) != OK) {
minix_panic("mini_notify: local QueueMess failed", NO_NUM);
}
RTS_UNSET(dst_ptr, RECEIVING);
return(OK);
}
@ -749,21 +725,20 @@ struct proc *caller_ptr;
asynmsg_t *table;
size_t size;
{
int i, dst_p, done, do_notify;
int i, dst_p, done, do_notify, r;
unsigned flags;
struct proc *dst_ptr;
struct priv *privp;
message *m_ptr;
asynmsg_t tabent;
vir_bytes table_v = (vir_bytes) table;
vir_bytes linaddr;
privp= priv(caller_ptr);
if (!(privp->s_flags & SYS_PROC))
{
kprintf(
"mini_senda: warning caller has no privilege structure\n");
if (caller_ptr->p_endpoint == ipc_stats_target)
ipc_stats.no_priv++;
return EPERM;
}
@ -777,6 +752,13 @@ size_t size;
return OK;
}
if(!(linaddr = umap_local(caller_ptr, D, (vir_bytes) table,
size * sizeof(*table)))) {
printf("mini_senda: umap_local failed; 0x%lx len 0x%lx\n",
table, size * sizeof(*table));
return EFAULT;
}
/* Limit size to something reasonable. An arbitrary choice is 16
* times the number of process table entries.
*
@ -785,8 +767,6 @@ size_t size;
*/
if (size > 16*(NR_TASKS + NR_PROCS))
{
if (caller_ptr->p_endpoint == ipc_stats_target)
ipc_stats.bad_size++;
return EDOM;
}
@ -808,8 +788,6 @@ size_t size;
if (flags & ~(AMF_VALID|AMF_DONE|AMF_NOTIFY) ||
!(flags & AMF_VALID))
{
if (caller_ptr->p_endpoint == ipc_stats_target)
ipc_stats.bad_senda++;
return EINVAL;
}
@ -833,11 +811,6 @@ size_t size;
continue;
}
#if 0
kprintf("mini_senda: entry[%d]: flags 0x%x dst %d/%d\n",
i, tabent.flags, tabent.dst, dst_p);
#endif
dst_ptr = proc_addr(dst_p);
/* NO_ENDPOINT should be removed */
@ -866,12 +839,13 @@ size_t size;
m_ptr= &table[i].msg; /* Note: pointer in the
* caller's address space.
*/
CopyMess(caller_ptr->p_nr, caller_ptr, m_ptr, dst_ptr,
dst_ptr->p_messbuf);
/* Copy message from sender. */
tabent.result= QueueMess(caller_ptr->p_endpoint,
linaddr + (vir_bytes) &table[i].msg -
(vir_bytes) table, dst_ptr);
if(tabent.result == OK)
RTS_UNSET(dst_ptr, RECEIVING);
RTS_UNSET(dst_ptr, RECEIVING);
tabent.result= OK;
A_INSERT(i, result);
tabent.flags= flags | AMF_DONE;
A_INSERT(i, flags);
@ -894,13 +868,6 @@ size_t size;
{
privp->s_asyntab= (vir_bytes)table;
privp->s_asynsize= size;
#if 0
if(caller_ptr->p_endpoint > INIT_PROC_NR) {
kprintf("kernel: %s (%d) asynsend table at 0x%lx, %d\n",
caller_ptr->p_name, caller_ptr->p_endpoint,
table, size);
}
#endif
}
return OK;
}
@ -915,7 +882,7 @@ struct proc *caller_ptr;
int r;
struct priv *privp;
struct proc *src_ptr;
/* Try all privilege structures */
for (privp = BEG_PRIV_ADDR; privp < END_PRIV_ADDR; ++privp)
{
@ -923,11 +890,8 @@ struct proc *caller_ptr;
continue;
if (privp->s_asynsize == 0)
continue;
#if 0
kprintf("try_async: found asyntable for proc %d\n",
privp->s_proc_nr);
#endif
src_ptr= proc_addr(privp->s_proc_nr);
vmassert(!(caller_ptr->p_misc_flags & MF_DELIVERMSG));
r= try_one(src_ptr, caller_ptr);
if (r == OK)
return r;
@ -957,6 +921,7 @@ struct proc *dst_ptr;
asynmsg_t tabent;
vir_bytes table_v;
struct proc *caller_ptr;
int r;
privp= priv(src_ptr);
size= privp->s_asynsize;
@ -986,8 +951,6 @@ struct proc *dst_ptr;
{
kprintf("try_one: bad bits in table\n");
privp->s_asynsize= 0;
if (src_ptr->p_endpoint == ipc_stats_target)
ipc_stats.bad_senda++;
return EINVAL;
}
@ -1015,10 +978,11 @@ struct proc *dst_ptr;
m_ptr= &table_ptr[i].msg; /* Note: pointer in the
* caller's address space.
*/
CopyMess(src_ptr->p_nr, src_ptr, m_ptr, dst_ptr,
dst_ptr->p_messbuf);
A_RETRIEVE(i, msg);
r = QueueMess(src_ptr->p_endpoint, vir2phys(&tabent.msg),
dst_ptr);
tabent.result= OK;
tabent.result= r;
A_INSERT(i, result);
tabent.flags= flags | AMF_DONE;
A_INSERT(i, flags);
@ -1066,43 +1030,6 @@ int dst_e; /* (endpoint) who is to be notified */
return(result);
}
/*===========================================================================*
* soft_notify *
*===========================================================================*/
PUBLIC int soft_notify(dst_e)
int dst_e; /* (endpoint) who is to be notified */
{
int dst, u = 0;
struct proc *dstp, *sys = proc_addr(SYSTEM);
/* Delayed interface to notify() from SYSTEM that is safe/easy to call
* from more places than notify().
*/
if(!intr_disabled()) { lock; u = 1; }
{
if(!isokendpt(dst_e, &dst))
minix_panic("soft_notify to dead ep", dst_e);
dstp = proc_addr(dst);
if(!dstp->p_softnotified) {
dstp->next_soft_notify = softnotify;
softnotify = dstp;
dstp->p_softnotified = 1;
if (RTS_ISSET(sys, RECEIVING)) {
sys->p_messbuf->m_source = SYSTEM;
RTS_UNSET(sys, RECEIVING);
}
}
}
if(u) { unlock; }
return OK;
}
/*===========================================================================*
* enqueue *
*===========================================================================*/
@ -1117,15 +1044,19 @@ register struct proc *rp; /* this process is now runnable */
int q; /* scheduling queue to use */
int front; /* add to front or back */
NOREC_ENTER(enqueuefunc);
#if DEBUG_SCHED_CHECK
if(!intr_disabled()) { minix_panic("enqueue with interrupts enabled", NO_NUM); }
CHECK_RUNQUEUES;
if (rp->p_ready) minix_panic("enqueue already ready process", NO_NUM);
#endif
/* Determine where to insert to process. */
sched(rp, &q, &front);
vmassert(q >= 0);
vmassert(q < IDLE_Q || rp->p_endpoint == IDLE);
/* Now add the process to the queue. */
if (rdy_head[q] == NIL_PROC) { /* add to empty queue */
rdy_head[q] = rdy_tail[q] = rp; /* create a new queue */
@ -1141,19 +1072,29 @@ register struct proc *rp; /* this process is now runnable */
rp->p_nextready = NIL_PROC; /* mark new end */
}
/* Now select the next process to run, if there isn't a current
* process yet or current process isn't ready any more, or
* it's PREEMPTIBLE.
*/
if(!proc_ptr || proc_ptr->p_rts_flags ||
(priv(proc_ptr)->s_flags & PREEMPTIBLE)) {
pick_proc();
}
#if DEBUG_SCHED_CHECK
rp->p_ready = 1;
CHECK_RUNQUEUES;
#endif
/* Now select the next process to run, if there isn't a current
* process yet or current process isn't ready any more, or
* it's PREEMPTIBLE.
*/
vmassert(proc_ptr);
#if 0
if(!proc_ptr || proc_ptr->p_rts_flags)
#else
if((proc_ptr->p_priority > rp->p_priority) &&
(priv(proc_ptr)->s_flags & PREEMPTIBLE))
#endif
pick_proc();
#if DEBUG_SCHED_CHECK
CHECK_RUNQUEUES;
#endif
NOREC_RETURN(enqueuefunc, );
}
/*===========================================================================*
@ -1170,14 +1111,17 @@ register struct proc *rp; /* this process is no longer runnable */
register struct proc **xpp; /* iterate over queue */
register struct proc *prev_xp;
NOREC_ENTER(dequeuefunc);
#if DEBUG_STACK_CHECK
/* Side-effect for kernel: check if the task's stack still is ok? */
if (iskernelp(rp)) {
if (*priv(rp)->s_stack_guard != STACK_GUARD)
minix_panic("stack overrun by task", proc_nr(rp));
}
#endif
#if DEBUG_SCHED_CHECK
CHECK_RUNQUEUES;
if(!intr_disabled()) { minix_panic("dequeue with interrupts enabled", NO_NUM); }
if (! rp->p_ready) minix_panic("dequeue() already unready process", NO_NUM);
#endif
@ -1193,17 +1137,23 @@ register struct proc *rp; /* this process is no longer runnable */
*xpp = (*xpp)->p_nextready; /* replace with next chain */
if (rp == rdy_tail[q]) /* queue tail removed */
rdy_tail[q] = prev_xp; /* set new tail */
#if DEBUG_SCHED_CHECK
rp->p_ready = 0;
CHECK_RUNQUEUES;
#endif
if (rp == proc_ptr || rp == next_ptr) /* active process removed */
pick_proc(); /* pick new process to run */
pick_proc(); /* pick new process to run */
break;
}
prev_xp = *xpp; /* save previous in chain */
}
#if DEBUG_SCHED_CHECK
rp->p_ready = 0;
CHECK_RUNQUEUES;
#endif
NOREC_RETURN(dequeuefunc, );
}
/*===========================================================================*
@ -1249,25 +1199,28 @@ PRIVATE void pick_proc()
* clock task can tell who to bill for system time.
*/
register struct proc *rp; /* process to run */
int q; /* iterate over queues */
int q; /* iterate over queues */
NOREC_ENTER(pick);
/* Check each of the scheduling queues for ready processes. The number of
* queues is defined in proc.h, and priorities are set in the task table.
* The lowest queue contains IDLE, which is always ready.
*/
for (q=0; q < NR_SCHED_QUEUES; q++) {
if ( (rp = rdy_head[q]) != NIL_PROC) {
next_ptr = rp; /* run process 'rp' next */
#if 0
if(rp->p_endpoint != 4 && rp->p_endpoint != 5 && rp->p_endpoint != IDLE && rp->p_endpoint != SYSTEM)
kprintf("[run %s]", rp->p_name);
#endif
if (priv(rp)->s_flags & BILLABLE)
bill_ptr = rp; /* bill for system time */
return;
}
int found = 0;
if(!(rp = rdy_head[q])) {
TRACE(VF_PICKPROC, printf("queue %d empty\n", q););
continue;
}
TRACE(VF_PICKPROC, printf("found %s / %d on queue %d\n",
rp->p_name, rp->p_endpoint, q););
next_ptr = rp; /* run process 'rp' next */
vmassert(!next_ptr->p_rts_flags);
if (priv(rp)->s_flags & BILLABLE)
bill_ptr = rp; /* bill for system time */
NOREC_RETURN(pick, );
}
minix_panic("no ready process", NO_NUM);
}
/*===========================================================================*
@ -1302,9 +1255,6 @@ timer_t *tp; /* watchdog timer pointer */
unlock;
}
}
#if DEBUG
kprintf("ticks_added: %d\n", ticks_added);
#endif
/* Now schedule a new watchdog timer to balance the queues again. The
* period depends on the total amount of quantum ticks added.
@ -1328,37 +1278,6 @@ message *m_ptr; /* pointer to message buffer */
return(result);
}
/*===========================================================================*
* lock_enqueue *
*===========================================================================*/
PUBLIC void lock_enqueue(rp)
struct proc *rp; /* this process is now runnable */
{
/* Safe gateway to enqueue() for tasks. */
lock;
enqueue(rp);
unlock;
}
/*===========================================================================*
* lock_dequeue *
*===========================================================================*/
PUBLIC void lock_dequeue(rp)
struct proc *rp; /* this process is no longer runnable */
{
/* Safe gateway to dequeue() for tasks. */
if (k_reenter >= 0) {
/* We're in an exception or interrupt, so don't lock (and ...
* don't unlock).
*/
dequeue(rp);
} else {
lock;
dequeue(rp);
unlock;
}
}
/*===========================================================================*
* endpoint_lookup *
*===========================================================================*/
@ -1401,24 +1320,18 @@ int *p, fatalflag;
*p = _ENDPOINT_P(e);
if(!isokprocn(*p)) {
#if DEBUG_ENABLE_IPC_WARNINGS
#if 0
kprintf("kernel:%s:%d: bad endpoint %d: proc %d out of range\n",
file, line, e, *p);
#endif
#endif
} else if(isemptyn(*p)) {
#if DEBUG_ENABLE_IPC_WARNINGS
#if 0
kprintf("kernel:%s:%d: bad endpoint %d: proc %d empty\n", file, line, e, *p);
#endif
#endif
} else if(proc_addr(*p)->p_endpoint != e) {
#if DEBUG_ENABLE_IPC_WARNINGS
#if 0
kprintf("kernel:%s:%d: bad endpoint %d: proc %d has ept %d (generation %d vs. %d)\n", file, line,
e, *p, proc_addr(*p)->p_endpoint,
_ENDPOINT_G(e), _ENDPOINT_G(proc_addr(*p)->p_endpoint));
#endif
#endif
} else ok = 1;
if(!ok && fatalflag) {

View File

@ -10,6 +10,7 @@
* struct proc, be sure to change sconst.h to match.
*/
#include <minix/com.h>
#include <minix/portio.h>
#include "const.h"
#include "priv.h"
@ -36,7 +37,6 @@ struct proc {
struct proc *p_nextready; /* pointer to next ready process */
struct proc *p_caller_q; /* head of list of procs wishing to send */
struct proc *p_q_link; /* link to next proc wishing to send */
message *p_messbuf; /* pointer to passed message buffer */
int p_getfrom_e; /* from whom does process want to receive? */
int p_sendto_e; /* to whom does process want to send? */
@ -46,6 +46,11 @@ struct proc {
endpoint_t p_endpoint; /* endpoint number, generation-aware */
message p_sendmsg; /* Message from this process if SENDING */
message p_delivermsg; /* Message for this process if MF_DELIVERMSG */
vir_bytes p_delivermsg_vir; /* Virtual addr this proc wants message at */
vir_bytes p_delivermsg_lin; /* Linear addr this proc wants message at */
/* If handler functions detect a process wants to do something with
* memory that isn't present, VM has to fix it. Until it has asked
* what needs to be done and fixed it, save necessary state here.
@ -57,28 +62,12 @@ struct proc {
struct proc *nextrestart; /* next in vmrestart chain */
struct proc *nextrequestor; /* next in vmrequest chain */
#define VMSTYPE_SYS_NONE 0
#define VMSTYPE_SYS_MESSAGE 1
#define VMSTYPE_SYS_CALL 2
#define VMSTYPE_MSGCOPY 3
#define VMSTYPE_KERNELCALL 1
#define VMSTYPE_DELIVERMSG 2
int type; /* suspended operation */
union {
/* VMSTYPE_SYS_MESSAGE */
message reqmsg; /* suspended request message */
/* VMSTYPE_SYS_CALL */
struct {
int call_nr;
message *m_ptr;
int src_dst_e;
long bit_map;
} sys_call;
/* VMSTYPE_MSGCOPY */
struct {
struct proc *dst;
vir_bytes dst_v;
message msgbuf;
} msgcopy;
} saved;
/* Parameters of request to VM */
@ -89,10 +78,9 @@ struct proc {
/* VM result when available */
int vmresult;
/* Target gets this set. (But caller and target can be
* the same, so we can't put this in the 'saved' union.)
*/
struct proc *requestor;
#if DEBUG_VMASSERT
char stacktrace[200];
#endif
/* If the suspended operation is a sys_call, its details are
* stored here.
@ -107,21 +95,26 @@ struct proc {
#define PMAGIC 0xC0FFEE1
int p_magic; /* check validity of proc pointers */
#endif
#if DEBUG_TRACE
int p_schedules;
#endif
};
/* Bits for the runtime flags. A process is runnable iff p_rts_flags == 0. */
#define SLOT_FREE 0x01 /* process slot is free */
#define NO_PRIORITY 0x02 /* process has been stopped */
#define SENDING 0x04 /* process blocked trying to send */
#define RECEIVING 0x08 /* process blocked trying to receive */
#define SIGNALED 0x10 /* set when new kernel signal arrives */
#define SIG_PENDING 0x20 /* unready while signal being processed */
#define P_STOP 0x40 /* set when process is being traced */
#define NO_PRIV 0x80 /* keep forked system process from running */
#define NO_ENDPOINT 0x100 /* process cannot send or receive messages */
#define VMINHIBIT 0x200 /* not scheduled until pagetable set by VM */
#define PAGEFAULT 0x400 /* process has unhandled pagefault */
#define VMREQUEST 0x800 /* originator of vm memory request */
#define SLOT_FREE 0x01 /* process slot is free */
#define NO_PRIORITY 0x02 /* process has been stopped */
#define SENDING 0x04 /* process blocked trying to send */
#define RECEIVING 0x08 /* process blocked trying to receive */
#define SIGNALED 0x10 /* set when new kernel signal arrives */
#define SIG_PENDING 0x20 /* unready while signal being processed */
#define P_STOP 0x40 /* set when process is being traced */
#define NO_PRIV 0x80 /* keep forked system process from running */
#define NO_ENDPOINT 0x100 /* process cannot send or receive messages */
#define VMINHIBIT 0x200 /* not scheduled until pagetable set by VM */
#define PAGEFAULT 0x400 /* process has unhandled pagefault */
#define VMREQUEST 0x800 /* originator of vm memory request */
#define VMREQTARGET 0x1000 /* target of vm memory request */
/* These runtime flags can be tested and manipulated by these macros. */
@ -131,47 +124,60 @@ struct proc {
/* Set flag and dequeue if the process was runnable. */
#define RTS_SET(rp, f) \
do { \
vmassert(intr_disabled()); \
if(!(rp)->p_rts_flags) { dequeue(rp); } \
(rp)->p_rts_flags |= (f); \
vmassert(intr_disabled()); \
} while(0)
/* Clear flag and enqueue if the process was not runnable but is now. */
#define RTS_UNSET(rp, f) \
do { \
int rts; \
rts = (rp)->p_rts_flags; \
vmassert(intr_disabled()); \
rts = (rp)->p_rts_flags; \
(rp)->p_rts_flags &= ~(f); \
if(rts && !(rp)->p_rts_flags) { enqueue(rp); } \
vmassert(intr_disabled()); \
} while(0)
/* Set flag and dequeue if the process was runnable. */
#define RTS_LOCK_SET(rp, f) \
do { \
if(!(rp)->p_rts_flags) { lock_dequeue(rp); } \
int u = 0; \
if(!intr_disabled()) { u = 1; lock; } \
if(!(rp)->p_rts_flags) { dequeue(rp); } \
(rp)->p_rts_flags |= (f); \
if(u) { unlock; } \
} while(0)
/* Clear flag and enqueue if the process was not runnable but is now. */
#define RTS_LOCK_UNSET(rp, f) \
do { \
int rts; \
rts = (rp)->p_rts_flags; \
int u = 0; \
if(!intr_disabled()) { u = 1; lock; } \
rts = (rp)->p_rts_flags; \
(rp)->p_rts_flags &= ~(f); \
if(rts && !(rp)->p_rts_flags) { lock_enqueue(rp); } \
if(rts && !(rp)->p_rts_flags) { enqueue(rp); } \
if(u) { unlock; } \
} while(0)
/* Set flags to this value. */
#define RTS_LOCK_SETFLAGS(rp, f) \
do { \
if(!(rp)->p_rts_flags && (f)) { lock_dequeue(rp); } \
(rp)->p_rts_flags = (f); \
int u = 0; \
if(!intr_disabled()) { u = 1; lock; } \
if(!(rp)->p_rts_flags && (f)) { dequeue(rp); } \
(rp)->p_rts_flags = (f); \
if(u) { unlock; } \
} while(0)
/* Misc flags */
#define REPLY_PENDING 0x01 /* reply to IPC_REQUEST is pending */
#define MF_VM 0x08 /* process uses VM */
#define MF_REPLY_PEND 0x01 /* reply to IPC_REQUEST is pending */
#define MF_ASYNMSG 0x10 /* Asynchrous message pending */
#define MF_FULLVM 0x20
#define MF_DELIVERMSG 0x40 /* Copy message for him before running */
/* Scheduling priorities for p_priority. Values must start at zero (highest
* priority) and increment. Priorities of the processes in the boot image

View File

@ -33,13 +33,11 @@ _PROTOTYPE( int sys_call, (int call_nr, int src_dst,
message *m_ptr, long bit_map) );
_PROTOTYPE( void sys_call_restart, (struct proc *caller) );
_PROTOTYPE( int lock_notify, (int src, int dst) );
_PROTOTYPE( int soft_notify, (int dst) );
_PROTOTYPE( int lock_send, (int dst, message *m_ptr) );
_PROTOTYPE( void lock_enqueue, (struct proc *rp) );
_PROTOTYPE( void lock_dequeue, (struct proc *rp) );
_PROTOTYPE( void enqueue, (struct proc *rp) );
_PROTOTYPE( void dequeue, (struct proc *rp) );
_PROTOTYPE( void balance_queues, (struct timer *tp) );
_PROTOTYPE( void schedcheck, (void) );
_PROTOTYPE( struct proc *endpoint_lookup, (endpoint_t ep) );
#if DEBUG_ENABLE_IPC_WARNINGS
_PROTOTYPE( int isokendpt_f, (char *file, int line, endpoint_t e, int *p, int f));
@ -87,6 +85,8 @@ _PROTOTYPE( void cons_seth, (int pos, int n) );
#define CHECK_RUNQUEUES check_runqueues_f(__FILE__, __LINE__)
_PROTOTYPE( void check_runqueues_f, (char *file, int line) );
#endif
_PROTOTYPE( char *rtsflagstr, (int flags) );
_PROTOTYPE( char *miscflagstr, (int flags) );
/* system/do_safecopy.c */
_PROTOTYPE( int verify_grant, (endpoint_t, endpoint_t, cp_grant_id_t, vir_bytes,
@ -102,18 +102,21 @@ _PROTOTYPE( void stop_profile_clock, (void) );
#endif
/* functions defined in architecture-dependent files. */
_PROTOTYPE( void phys_copy, (phys_bytes source, phys_bytes dest,
_PROTOTYPE( phys_bytes phys_copy, (phys_bytes source, phys_bytes dest,
phys_bytes count) );
_PROTOTYPE( void phys_copy_fault, (void));
#define virtual_copy(src, dst, bytes) virtual_copy_f(src, dst, bytes, 0)
#define virtual_copy_vmcheck(src, dst, bytes) virtual_copy_f(src, dst, bytes, 1)
_PROTOTYPE( int virtual_copy_f, (struct vir_addr *src, struct vir_addr *dst,
vir_bytes bytes, int vmcheck) );
_PROTOTYPE( int data_copy, (endpoint_t from, vir_bytes from_addr,
endpoint_t to, vir_bytes to_addr, size_t bytes));
_PROTOTYPE( int data_copy_vmcheck, (endpoint_t from, vir_bytes from_addr,
endpoint_t to, vir_bytes to_addr, size_t bytes));
#define data_copy_to(d, p, v, n) data_copy(SYSTEM, (d), (p), (v), (n));
#define data_copy_from(d, p, v, n) data_copy((p), (v), SYSTEM, (d), (n));
_PROTOTYPE( void alloc_segments, (struct proc *rp) );
_PROTOTYPE( void vm_init, (void) );
_PROTOTYPE( void vm_init, (struct proc *first) );
_PROTOTYPE( void vm_map_range, (u32_t base, u32_t size, u32_t offset) );
_PROTOTYPE( int vm_copy, (vir_bytes src, struct proc *srcproc,
vir_bytes dst, struct proc *dstproc, phys_bytes bytes));
@ -126,7 +129,7 @@ _PROTOTYPE( phys_bytes umap_remote, (struct proc* rp, int seg,
_PROTOTYPE( phys_bytes umap_virtual, (struct proc* rp, int seg,
vir_bytes vir_addr, vir_bytes bytes) );
_PROTOTYPE( phys_bytes seg2phys, (U16_t) );
_PROTOTYPE( void phys_memset, (phys_bytes source, unsigned long pattern,
_PROTOTYPE( int vm_phys_memset, (phys_bytes source, u8_t pattern,
phys_bytes count) );
_PROTOTYPE( vir_bytes alloc_remote_segment, (u32_t *, segframe_t *,
int, phys_bytes, vir_bytes, int));
@ -160,5 +163,10 @@ _PROTOTYPE( int vm_checkrange, (struct proc *caller, struct proc *target,
vir_bytes start, vir_bytes length, int writeflag, int checkonly));
_PROTOTYPE( void proc_stacktrace, (struct proc *proc) );
_PROTOTYPE( int vm_lookup, (struct proc *proc, vir_bytes virtual, vir_bytes *result, u32_t *ptent));
_PROTOTYPE( int vm_suspend, (struct proc *caller, struct proc *target,
phys_bytes lin, phys_bytes size, int wrflag, int type));
_PROTOTYPE( int delivermsg, (struct proc *target));
_PROTOTYPE( phys_bytes arch_switch_copymsg, (struct proc *rp, message *m,
phys_bytes lin));
#endif /* PROTO_H */

View File

@ -38,6 +38,7 @@
#include <sys/sigcontext.h>
#include <minix/endpoint.h>
#include <minix/safecopies.h>
#include <minix/portio.h>
#include <minix/u64.h>
#include <sys/vm_i386.h>
@ -56,7 +57,6 @@ char *callnames[NR_SYS_CALLS];
call_vec[(call_nr-KERNEL_CALL)] = (handler)
FORWARD _PROTOTYPE( void initialize, (void));
FORWARD _PROTOTYPE( void softnotify_check, (void));
FORWARD _PROTOTYPE( struct proc *vmrestart_check, (message *));
/*===========================================================================*
@ -75,26 +75,18 @@ PUBLIC void sys_task()
/* Initialize the system task. */
initialize();
while (TRUE) {
struct proc *restarting;
restarting = vmrestart_check(&m);
softnotify_check();
if(softnotify)
minix_panic("softnotify non-NULL before receive (1)", NO_NUM);
if(!restarting) {
int r;
/* Get work. Block and wait until a request message arrives. */
if(softnotify)
minix_panic("softnotify non-NULL before receive (2)", NO_NUM);
if((r=receive(ANY, &m)) != OK)
minix_panic("receive() failed", r);
if(m.m_source == SYSTEM)
continue;
if(softnotify)
minix_panic("softnotify non-NULL after receive", NO_NUM);
}
}
sys_call_code = (unsigned) m.m_type;
call_nr = sys_call_code - KERNEL_CALL;
@ -102,37 +94,13 @@ PUBLIC void sys_task()
okendpt(who_e, &who_p);
caller_ptr = proc_addr(who_p);
if (caller_ptr->p_endpoint == ipc_stats_target)
sys_stats.total= add64u(sys_stats.total, 1);
/* See if the caller made a valid request and try to handle it. */
if (call_nr < 0 || call_nr >= NR_SYS_CALLS) { /* check call number */
#if DEBUG_ENABLE_IPC_WARNINGS
kprintf("SYSTEM: illegal request %d from %d.\n",
call_nr,m.m_source);
#endif
if (caller_ptr->p_endpoint == ipc_stats_target)
sys_stats.bad_req++;
result = EBADREQUEST; /* illegal message type */
}
else if (!GET_BIT(priv(caller_ptr)->s_k_call_mask, call_nr)) {
#if DEBUG_ENABLE_IPC_WARNINGS
static int curr= 0, limit= 100, extra= 20;
if (curr < limit+extra)
{
kprintf("SYSTEM: request %d from %d denied.\n",
call_nr, m.m_source);
} else if (curr == limit+extra)
{
kprintf("sys_task: no debug output for a while\n");
}
else if (curr == 2*limit-1)
limit *= 2;
curr++;
#endif
if (caller_ptr->p_endpoint == ipc_stats_target)
sys_stats.not_allowed++;
result = ECALLDENIED; /* illegal message type */
}
else {
@ -144,15 +112,18 @@ PUBLIC void sys_task()
* until VM tells us it's allowed. VM has been notified
* and we must wait for its reply to restart the call.
*/
vmassert(RTS_ISSET(caller_ptr, VMREQUEST));
vmassert(caller_ptr->p_vmrequest.type == VMSTYPE_KERNELCALL);
memcpy(&caller_ptr->p_vmrequest.saved.reqmsg, &m, sizeof(m));
caller_ptr->p_vmrequest.type = VMSTYPE_SYS_MESSAGE;
} else if (result != EDONTREPLY) {
/* Send a reply, unless inhibited by a handler function.
* Use the kernel function lock_send() to prevent a system
* call trap.
*/
if(restarting)
RTS_LOCK_UNSET(restarting, VMREQUEST);
if(restarting) {
vmassert(!RTS_ISSET(restarting, VMREQUEST));
vmassert(!RTS_ISSET(restarting, VMREQTARGET));
}
m.m_type = result; /* report status of call */
if(WILLRECEIVE(caller_ptr, SYSTEM)) {
if (OK != (s=lock_send(m.m_source, &m))) {
@ -220,7 +191,6 @@ PRIVATE void initialize(void)
map(SYS_NEWMAP, do_newmap); /* set up a process memory map */
map(SYS_SEGCTL, do_segctl); /* add segment and get selector */
map(SYS_MEMSET, do_memset); /* write char to memory area */
map(SYS_VM_SETBUF, do_vm_setbuf); /* PM passes buffer for page tables */
map(SYS_VMCTL, do_vmctl); /* various VM process settings */
/* Copying. */
@ -307,7 +277,7 @@ PUBLIC void send_sig(int proc_nr, int sig_nr)
rp = proc_addr(proc_nr);
sigaddset(&priv(rp)->s_sig_pending, sig_nr);
soft_notify(rp->p_endpoint);
lock_notify(SYSTEM, rp->p_endpoint);
}
/*===========================================================================*
@ -424,7 +394,9 @@ register struct proc *rc; /* slot of process to clean up */
if(isemptyp(rc)) minix_panic("clear_proc: empty process", rc->p_endpoint);
if(rc->p_endpoint == PM_PROC_NR || rc->p_endpoint == VFS_PROC_NR) {
#if 0
if(rc->p_endpoint == PM_PROC_NR || rc->p_endpoint == VFS_PROC_NR)
{
/* This test is great for debugging system processes dying,
* but as this happens normally on reboot, not good permanent code.
*/
@ -434,6 +406,7 @@ register struct proc *rc; /* slot of process to clean up */
util_stacktrace();
minix_panic("clear_proc: system process died", rc->p_endpoint);
}
#endif
/* Make sure that the exiting process is no longer scheduled. */
RTS_LOCK_SET(rc, NO_ENDPOINT);
@ -500,13 +473,6 @@ register struct proc *rc; /* slot of process to clean up */
#endif
}
}
/* No pending soft notifies. */
for(np = softnotify; np; np = np->next_soft_notify) {
if(np == rc) {
minix_panic("dying proc was on next_soft_notify", np->p_endpoint);
}
}
}
/*===========================================================================*
@ -540,28 +506,6 @@ int access; /* does grantee want to CPF_READ or _WRITE? */
return umap_virtual(proc_addr(proc_nr), D, v_offset, bytes);
}
/*===========================================================================*
* softnotify_check *
*===========================================================================*/
PRIVATE void softnotify_check(void)
{
struct proc *np, *nextnp;
if(!softnotify)
return;
for(np = softnotify; np; np = nextnp) {
if(!np->p_softnotified)
minix_panic("softnotify but no p_softnotified", NO_NUM);
lock_notify(SYSTEM, np->p_endpoint);
nextnp = np->next_soft_notify;
np->next_soft_notify = NULL;
np->p_softnotified = 0;
}
softnotify = NULL;
}
/*===========================================================================*
* vmrestart_check *
*===========================================================================*/
@ -575,23 +519,18 @@ PRIVATE struct proc *vmrestart_check(message *m)
if(!(restarting = vmrestart))
return NULL;
if(restarting->p_rts_flags & SLOT_FREE)
minix_panic("SYSTEM: VMREQUEST set for empty process", NO_NUM);
vmassert(!RTS_ISSET(restarting, SLOT_FREE));
vmassert(RTS_ISSET(restarting, VMREQUEST));
type = restarting->p_vmrequest.type;
restarting->p_vmrequest.type = VMSTYPE_SYS_NONE;
vmrestart = restarting->p_vmrequest.nextrestart;
if(!RTS_ISSET(restarting, VMREQUEST))
minix_panic("SYSTEM: VMREQUEST not set for process on vmrestart queue",
restarting->p_endpoint);
switch(type) {
case VMSTYPE_SYS_MESSAGE:
case VMSTYPE_KERNELCALL:
memcpy(m, &restarting->p_vmrequest.saved.reqmsg, sizeof(*m));
if(m->m_source != restarting->p_endpoint)
minix_panic("SYSTEM: vmrestart source doesn't match",
NO_NUM);
restarting->p_vmrequest.saved.reqmsg.m_source = NONE;
vmassert(m->m_source == restarting->p_endpoint);
/* Original caller could've disappeared in the meantime. */
if(!isokendpt(m->m_source, &who_p)) {
kprintf("SYSTEM: ignoring call %d from dead %d\n",
@ -610,26 +549,6 @@ PRIVATE struct proc *vmrestart_check(message *m)
}
}
return restarting;
case VMSTYPE_SYS_CALL:
kprintf("SYSTEM: restart sys_call\n");
/* Restarting a kernel trap. */
sys_call_restart(restarting);
/* Handled; restart system loop. */
return NULL;
case VMSTYPE_MSGCOPY:
/* Do delayed message copy. */
if((r=data_copy(SYSTEM,
(vir_bytes) &restarting->p_vmrequest.saved.msgcopy.msgbuf,
restarting->p_vmrequest.saved.msgcopy.dst->p_endpoint,
(vir_bytes) restarting->p_vmrequest.saved.msgcopy.dst_v,
sizeof(message))) != OK) {
minix_panic("SYSTEM: delayed msgcopy failed", r);
}
RTS_LOCK_UNSET(restarting, VMREQUEST);
/* Handled; restart system loop. */
return NULL;
default:
minix_panic("strange restart type", type);
}

View File

@ -91,9 +91,6 @@ _PROTOTYPE( int do_memset, (message *m_ptr) );
#define do_memset do_unused
#endif
_PROTOTYPE( int do_vm_setbuf, (message *m_ptr) );
_PROTOTYPE( int do_vm_map, (message *m_ptr) );
_PROTOTYPE( int do_abort, (message *m_ptr) );
#if ! USE_ABORT
#define do_abort do_unused

View File

@ -51,7 +51,6 @@ OBJECTS = \
$(SYSTEM)(do_sigreturn.o) \
$(SYSTEM)(do_abort.o) \
$(SYSTEM)(do_getinfo.o) \
$(SYSTEM)(do_vm_setbuf.o) \
$(SYSTEM)(do_sprofile.o) \
$(SYSTEM)(do_cprofile.o) \
$(SYSTEM)(do_profbuf.o) \
@ -162,9 +161,6 @@ $(SYSTEM)(do_vm.o): do_vm.o
do_vm.o: do_vm.c
$(CC) do_vm.c
$(SYSTEM)(do_vm_setbuf.o): do_vm_setbuf.c
$(CC) do_vm_setbuf.c
$(SYSTEM)(do_sprofile.o): do_sprofile.c
$(CC) do_sprofile.c

View File

@ -63,19 +63,8 @@ register message *m_ptr; /* pointer to request message */
}
if (i >= nr_io_range)
{
static int curr= 0, limit= 100, extra= 20;
if (curr < limit+extra)
{
kprintf("do_devio: port 0x%x (size %d) not allowed\n",
m_ptr->DIO_PORT, size);
} else if (curr == limit+extra)
{
kprintf("do_devio: no debug output for a while\n");
}
else if (curr == 2*limit-1)
limit *= 2;
curr++;
return EPERM;
}
}
@ -83,19 +72,8 @@ register message *m_ptr; /* pointer to request message */
doit:
if (m_ptr->DIO_PORT & (size-1))
{
static int curr= 0, limit= 100, extra= 20;
if (curr < limit+extra)
{
kprintf("do_devio: unaligned port 0x%x (size %d)\n",
m_ptr->DIO_PORT, size);
} else if (curr == limit+extra)
{
kprintf("do_devio: no debug output for a while\n");
}
else if (curr == 2*limit-1)
limit *= 2;
curr++;
return EPERM;
}

View File

@ -31,6 +31,11 @@ register message *m_ptr; /* pointer to request message */
rp = proc_addr(proc);
if(rp->p_misc_flags & MF_DELIVERMSG) {
rp->p_misc_flags &= ~MF_DELIVERMSG;
rp->p_delivermsg_lin = 0;
}
/* Save command name for debugging, ps(1) output, etc. */
if(data_copy(who_e, (vir_bytes) m_ptr->PR_NAME_PTR,
SYSTEM, (vir_bytes) rp->p_name, (phys_bytes) P_NAME_LEN - 1) != OK)

View File

@ -7,6 +7,7 @@
*/
#include "../system.h"
#include "../vm.h"
#include <signal.h>
#include <minix/endpoint.h>
@ -31,10 +32,25 @@ register message *m_ptr; /* pointer to request message */
if(!isokendpt(m_ptr->PR_ENDPT, &p_proc))
return EINVAL;
rpp = proc_addr(p_proc);
rpc = proc_addr(m_ptr->PR_SLOT);
if (isemptyp(rpp) || ! isemptyp(rpc)) return(EINVAL);
vmassert(!(rpp->p_misc_flags & MF_DELIVERMSG));
/* needs to be receiving so we know where the message buffer is */
if(!RTS_ISSET(rpp, RECEIVING)) {
printf("kernel: fork not done synchronously?\n");
return EINVAL;
}
/* memory becomes readonly */
if (priv(rpp)->s_asynsize > 0) {
printf("kernel: process with waiting asynsend table can't fork\n");
return EINVAL;
}
map_ptr= (struct mem_map *) m_ptr->PR_MEM_PTR;
/* Copy parent 'proc' struct to child. And reinitialize some fields. */
@ -75,9 +91,11 @@ register message *m_ptr; /* pointer to request message */
/* Calculate endpoint identifier, so caller knows what it is. */
m_ptr->PR_ENDPT = rpc->p_endpoint;
m_ptr->PR_FORK_MSGADDR = (char *) rpp->p_delivermsg_vir;
/* Install new map */
r = newmap(rpc, map_ptr);
FIXLINMSG(rpc);
/* Don't schedule process in VM mode until it has a new pagetable. */
if(m_ptr->PR_FORK_FLAGS & PFF_VMINHIBIT) {

View File

@ -28,9 +28,8 @@ register message *m_ptr; /* pointer to request message */
*/
size_t length;
vir_bytes src_vir;
int proc_nr, nr_e, nr;
int proc_nr, nr_e, nr, r;
struct proc *caller;
phys_bytes ph;
int wipe_rnd_bin = -1;
caller = proc_addr(who_p);
@ -67,19 +66,6 @@ register message *m_ptr; /* pointer to request message */
src_vir = (vir_bytes) irq_hooks;
break;
}
case GET_SCHEDINFO: {
/* This is slightly complicated because we need two data structures
* at once, otherwise the scheduling information may be incorrect.
* Copy the queue heads and fall through to copy the process table.
*/
if((ph=umap_local(caller, D, (vir_bytes) m_ptr->I_VAL_PTR2,length)) == 0)
return EFAULT;
length = sizeof(struct proc *) * NR_SCHED_QUEUES;
CHECKRANGE_OR_SUSPEND(proc_addr(who_p), ph, length, 1);
data_copy(SYSTEM, (vir_bytes) rdy_head,
who_e, (vir_bytes) m_ptr->I_VAL_PTR2, length);
/* fall through to GET_PROCTAB */
}
case GET_PROCTAB: {
length = sizeof(struct proc) * (NR_PROCS + NR_TASKS);
src_vir = (vir_bytes) proc;
@ -174,15 +160,16 @@ register message *m_ptr; /* pointer to request message */
/* Try to make the actual copy for the requested data. */
if (m_ptr->I_VAL_LEN > 0 && length > m_ptr->I_VAL_LEN) return (E2BIG);
if((ph=umap_local(caller, D, (vir_bytes) m_ptr->I_VAL_PTR,length)) == 0)
return EFAULT;
CHECKRANGE_OR_SUSPEND(caller, ph, length, 1);
if(data_copy(SYSTEM, src_vir, who_e, (vir_bytes) m_ptr->I_VAL_PTR, length) == OK) {
r = data_copy_vmcheck(SYSTEM, src_vir, who_e,
(vir_bytes) m_ptr->I_VAL_PTR, length);
if(r != OK) return r;
if(wipe_rnd_bin >= 0 && wipe_rnd_bin < RANDOM_SOURCES) {
krandom.bin[wipe_rnd_bin].r_size = 0;
krandom.bin[wipe_rnd_bin].r_next = 0;
}
}
return(OK);
}

View File

@ -8,6 +8,7 @@
*/
#include "../system.h"
#include "../vm.h"
#if USE_MEMSET
@ -18,10 +19,8 @@ PUBLIC int do_memset(m_ptr)
register message *m_ptr;
{
/* Handle sys_memset(). This writes a pattern into the specified memory. */
unsigned long p;
unsigned char c = m_ptr->MEM_PATTERN;
p = c | (c << 8) | (c << 16) | (c << 24);
phys_memset((phys_bytes) m_ptr->MEM_PTR, p, (phys_bytes) m_ptr->MEM_COUNT);
vm_phys_memset((phys_bytes) m_ptr->MEM_PTR, c, (phys_bytes) m_ptr->MEM_COUNT);
return(OK);
}

View File

@ -61,22 +61,11 @@ endpoint_t *e_granter; /* new granter (magic grants) */
if(!HASGRANTTABLE(granter_proc)) return EPERM;
if(priv(granter_proc)->s_grant_entries <= grant) {
static int curr= 0, limit= 100, extra= 20;
if (curr < limit+extra)
{
kprintf(
"verify_grant: grant verify failed in ep %d proc %d: "
"grant %d out of range for table size %d\n",
granter, proc_nr, grant,
priv(granter_proc)->s_grant_entries);
} else if (curr == limit+extra)
{
kprintf("verify_grant: no debug output for a while\n");
}
else if (curr == 2*limit-1)
limit *= 2;
curr++;
return(EPERM);
}
@ -219,23 +208,9 @@ int access; /* CPF_READ for a copy from granter to grantee, CPF_WRITE
/* Verify permission exists. */
if((r=verify_grant(granter, grantee, grantid, bytes, access,
g_offset, &v_offset, &new_granter)) != OK) {
static int curr= 0, limit= 100, extra= 20;
if (curr < limit+extra)
{
#if 0
kprintf(
"grant %d verify to copy %d->%d by %d failed: err %d\n",
grantid, *src, *dst, grantee, r);
#endif
} else if (curr == limit+extra)
{
kprintf(
"do_safecopy`safecopy: no debug output for a while\n");
}
else if (curr == 2*limit-1)
limit *= 2;
curr++;
return r;
}

View File

@ -29,18 +29,13 @@ message *m_ptr; /* pointer to request message */
struct sigcontext sc, *scp;
struct sigframe fr, *frp;
int proc, r;
phys_bytes ph;
if (!isokendpt(m_ptr->SIG_ENDPT, &proc)) return(EINVAL);
if (iskerneln(proc)) return(EPERM);
rp = proc_addr(proc);
ph = umap_local(proc_addr(who_p), D, (vir_bytes) m_ptr->SIG_CTXT_PTR, sizeof(struct sigmsg));
if(!ph) return EFAULT;
CHECKRANGE_OR_SUSPEND(proc_addr(who_p), ph, sizeof(struct sigmsg), 1);
/* Get the sigmsg structure into our address space. */
if((r=data_copy(who_e, (vir_bytes) m_ptr->SIG_CTXT_PTR,
if((r=data_copy_vmcheck(who_e, (vir_bytes) m_ptr->SIG_CTXT_PTR,
SYSTEM, (vir_bytes) &smsg, (phys_bytes) sizeof(struct sigmsg))) != OK)
return r;
@ -59,12 +54,9 @@ message *m_ptr; /* pointer to request message */
sc.sc_flags = 0; /* unused at this time */
sc.sc_mask = smsg.sm_mask;
ph = umap_local(rp, D, (vir_bytes) scp, sizeof(struct sigcontext));
if(!ph) return EFAULT;
CHECKRANGE_OR_SUSPEND(rp, ph, sizeof(struct sigcontext), 1);
/* Copy the sigcontext structure to the user's stack. */
if((r=data_copy(SYSTEM, (vir_bytes) &sc, m_ptr->SIG_ENDPT, (vir_bytes) scp,
(vir_bytes) sizeof(struct sigcontext))) != OK)
if((r=data_copy_vmcheck(SYSTEM, (vir_bytes) &sc, m_ptr->SIG_ENDPT,
(vir_bytes) scp, (vir_bytes) sizeof(struct sigcontext))) != OK)
return r;
/* Initialize the sigframe structure. */
@ -78,11 +70,9 @@ message *m_ptr; /* pointer to request message */
fr.sf_signo = smsg.sm_signo;
fr.sf_retadr = (void (*)()) smsg.sm_sigreturn;
ph = umap_local(rp, D, (vir_bytes) frp, sizeof(struct sigframe));
if(!ph) return EFAULT;
CHECKRANGE_OR_SUSPEND(rp, ph, sizeof(struct sigframe), 1);
/* Copy the sigframe structure to the user's stack. */
if((r=data_copy(SYSTEM, (vir_bytes) &fr, m_ptr->SIG_ENDPT, (vir_bytes) frp,
if((r=data_copy_vmcheck(SYSTEM, (vir_bytes) &fr,
m_ptr->SIG_ENDPT, (vir_bytes) frp,
(vir_bytes) sizeof(struct sigframe))) != OK)
return r;

View File

@ -16,7 +16,6 @@
PUBLIC int do_sysctl(m_ptr)
register message *m_ptr; /* pointer to request message */
{
phys_bytes ph;
vir_bytes len, buf;
static char mybuf[DIAG_BUFSIZE];
struct proc *caller, *target;
@ -33,10 +32,7 @@ register message *m_ptr; /* pointer to request message */
caller->p_endpoint, len);
return EINVAL;
}
if((ph=umap_local(caller, D, buf, len)) == 0)
return EFAULT;
CHECKRANGE_OR_SUSPEND(caller, ph, len, 1);
if((s=data_copy(who_e, buf, SYSTEM, (vir_bytes) mybuf, len)) != OK) {
if((s=data_copy_vmcheck(who_e, buf, SYSTEM, (vir_bytes) mybuf, len)) != OK) {
kprintf("do_sysctl: diag for %d: len %d: copy failed: %d\n",
caller->p_endpoint, len, s);
return s;

View File

@ -48,13 +48,11 @@ register message *m_ptr; /* pointer to request message */
case LOCAL_SEG:
phys_addr = lin_addr = umap_local(targetpr, seg_index, offset, count);
if(!lin_addr) return EFAULT;
CHECKRANGE_OR_SUSPEND(targetpr, lin_addr, count, 1);
naughty = 1;
break;
case REMOTE_SEG:
phys_addr = lin_addr = umap_remote(targetpr, seg_index, offset, count);
if(!lin_addr) return EFAULT;
CHECKRANGE_OR_SUSPEND(targetpr, lin_addr, count, 1);
naughty = 1;
break;
case GRANT_SEG:
@ -93,7 +91,6 @@ register message *m_ptr; /* pointer to request message */
kprintf("SYSTEM:do_umap: umap_local failed\n");
return EFAULT;
}
CHECKRANGE_OR_SUSPEND(targetpr, lin_addr, count, 1);
if(vm_lookup(targetpr, lin_addr, &phys_addr, NULL) != OK) {
kprintf("SYSTEM:do_umap: vm_lookup failed\n");
return EFAULT;

View File

@ -1,29 +0,0 @@
/* The system call implemented in this file:
* m_type: SYS_VM_SETBUF
*
* The parameters for this system call are:
* m4_l1: Start of the buffer
* m4_l2: Length of the buffer
* m4_l3: End of main memory
*/
#include "../system.h"
#define VM_DEBUG 0 /* enable/ disable debug output */
/*===========================================================================*
* do_vm_setbuf *
*===========================================================================*/
PUBLIC int do_vm_setbuf(m_ptr)
message *m_ptr; /* pointer to request message */
{
vm_base= m_ptr->m4_l1;
vm_size= m_ptr->m4_l2;
vm_mem_high= m_ptr->m4_l3;
#if VM_DEBUG
kprintf("do_vm_setbuf: got 0x%x @ 0x%x for 0x%x\n",
vm_size, vm_base, vm_mem_high);
#endif
return OK;
}

View File

@ -13,6 +13,8 @@
#include <minix/type.h>
#include <minix/config.h>
extern int verifyrange;
/*===========================================================================*
* do_vmctl *
*===========================================================================*/
@ -21,12 +23,10 @@ register message *m_ptr; /* pointer to request message */
{
int proc_nr, i;
endpoint_t ep = m_ptr->SVMCTL_WHO;
struct proc *p, *rp;
struct proc *p, *rp, *target;
if(ep == SELF) { ep = m_ptr->m_source; }
vm_init();
if(!isokendpt(ep, &proc_nr)) {
kprintf("do_vmctl: unexpected endpoint %d from VM\n", ep);
return EINVAL;
@ -42,14 +42,33 @@ register message *m_ptr; /* pointer to request message */
/* Send VM the information about the memory request. */
if(!(rp = vmrequest))
return ESRCH;
if(!RTS_ISSET(rp, VMREQUEST))
minix_panic("do_vmctl: no VMREQUEST set", NO_NUM);
vmassert(RTS_ISSET(rp, VMREQUEST));
#if 0
printf("kernel: vm request sent by: %s / %d about %d; 0x%lx-0x%lx, wr %d, stack: %s ",
rp->p_name, rp->p_endpoint, rp->p_vmrequest.who,
rp->p_vmrequest.start,
rp->p_vmrequest.start + rp->p_vmrequest.length,
rp->p_vmrequest.writeflag, rp->p_vmrequest.stacktrace);
printf("type %d\n", rp->p_vmrequest.type);
#endif
#if DEBUG_VMASSERT
okendpt(rp->p_vmrequest.who, &proc_nr);
target = proc_addr(proc_nr);
if(!RTS_ISSET(target, VMREQTARGET)) {
printf("set stack: %s\n", rp->p_vmrequest.stacktrace);
minix_panic("VMREQTARGET not set for target",
NO_NUM);
}
#endif
/* Reply with request fields. */
m_ptr->SVMCTL_MRG_ADDR = (char *) rp->p_vmrequest.start;
m_ptr->SVMCTL_MRG_LEN = rp->p_vmrequest.length;
m_ptr->SVMCTL_MRG_WRITE = rp->p_vmrequest.writeflag;
m_ptr->SVMCTL_MRG_EP = rp->p_vmrequest.who;
m_ptr->SVMCTL_MRG_REQUESTOR = (void *) rp->p_endpoint;
rp->p_vmrequest.vmresult = VMSUSPEND;
/* Remove from request chain. */
@ -57,46 +76,63 @@ register message *m_ptr; /* pointer to request message */
return OK;
case VMCTL_MEMREQ_REPLY:
if(!(rp = p->p_vmrequest.requestor))
minix_panic("do_vmctl: no requestor set", ep);
p->p_vmrequest.requestor = NULL;
if(!RTS_ISSET(rp, VMREQUEST))
minix_panic("do_vmctl: no VMREQUEST set", ep);
if(rp->p_vmrequest.vmresult != VMSUSPEND)
minix_panic("do_vmctl: result not VMSUSPEND set",
rp->p_vmrequest.vmresult);
rp->p_vmrequest.vmresult = m_ptr->SVMCTL_VALUE;
if(rp->p_vmrequest.vmresult == VMSUSPEND)
minix_panic("VM returned VMSUSPEND?", NO_NUM);
if(rp->p_vmrequest.vmresult != OK)
vmassert(RTS_ISSET(p, VMREQUEST));
vmassert(p->p_vmrequest.vmresult == VMSUSPEND);
okendpt(p->p_vmrequest.who, &proc_nr);
target = proc_addr(proc_nr);
p->p_vmrequest.vmresult = m_ptr->SVMCTL_VALUE;
vmassert(p->p_vmrequest.vmresult != VMSUSPEND);
if(p->p_vmrequest.vmresult != OK)
kprintf("SYSTEM: VM replied %d to mem request\n",
rp->p_vmrequest.vmresult);
p->p_vmrequest.vmresult);
/* Put on restart chain. */
rp->p_vmrequest.nextrestart = vmrestart;
vmrestart = rp;
#if 0
printf("memreq reply: vm request sent by: %s / %d about %d; 0x%lx-0x%lx, wr %d, stack: %s ",
p->p_name, p->p_endpoint, p->p_vmrequest.who,
p->p_vmrequest.start,
p->p_vmrequest.start + p->p_vmrequest.length,
p->p_vmrequest.writeflag, p->p_vmrequest.stacktrace);
printf("type %d\n", p->p_vmrequest.type);
#endif
vmassert(RTS_ISSET(target, VMREQTARGET));
RTS_LOCK_UNSET(target, VMREQTARGET);
if(p->p_vmrequest.type == VMSTYPE_KERNELCALL) {
/* Put on restart chain. */
p->p_vmrequest.nextrestart = vmrestart;
vmrestart = p;
} else if(p->p_vmrequest.type == VMSTYPE_DELIVERMSG) {
vmassert(p->p_misc_flags & MF_DELIVERMSG);
vmassert(p == target);
vmassert(RTS_ISSET(p, VMREQUEST));
vmassert(RTS_ISSET(p, VMREQTARGET));
RTS_LOCK_UNSET(p, VMREQUEST);
RTS_LOCK_UNSET(target, VMREQTARGET);
} else {
#if DEBUG_VMASSERT
/* Sanity check. */
if(rp->p_vmrequest.vmresult == OK) {
if(CHECKRANGE(p,
rp->p_vmrequest.start,
rp->p_vmrequest.length,
rp->p_vmrequest.writeflag) != OK) {
kprintf("SYSTEM: request %d:0x%lx-0x%lx, wrflag %d, failed\n",
rp->p_endpoint,
rp->p_vmrequest.start, rp->p_vmrequest.start + rp->p_vmrequest.length,
rp->p_vmrequest.writeflag);
minix_panic("SYSTEM: fail but VM said OK", NO_NUM);
}
printf("suspended with stack: %s\n",
p->p_vmrequest.stacktrace);
#endif
minix_panic("strange request type",
p->p_vmrequest.type);
}
#endif
return OK;
#if VM_KERN_NOPAGEZERO
case VMCTL_NOPAGEZERO:
case VMCTL_ENABLE_PAGING:
if(vm_running)
minix_panic("do_vmctl: paging already enabled", NO_NUM);
vm_init(p);
if(!vm_running)
minix_panic("do_vmctl: paging enabling failed", NO_NUM);
vmassert(p->p_delivermsg_lin ==
umap_local(p, D, p->p_delivermsg_vir, sizeof(message)));
if(newmap(p, (struct mem_map *) m_ptr->SVMCTL_VALUE) != OK)
minix_panic("do_vmctl: newmap failed", NO_NUM);
FIXLINMSG(p);
vmassert(p->p_delivermsg_lin);
return OK;
#endif
}
/* Try architecture-specific vmctls. */

View File

@ -35,7 +35,7 @@
/* Define stack sizes for the kernel tasks included in the system image. */
#define NO_STACK 0
#define SMALL_STACK (256 * sizeof(char *))
#define SMALL_STACK (1024 * sizeof(char *))
#define IDL_S SMALL_STACK /* 3 intr, 3 temps, 4 db for Intel */
#define HRD_S NO_STACK /* dummy task, uses kernel stack */
#define TSK_S SMALL_STACK /* system and clock task */
@ -96,7 +96,8 @@ PRIVATE int
drv_c[] = { DRV_C },
tty_c[] = { DRV_C, SYS_PHYSCOPY, SYS_ABORT, SYS_IOPENABLE,
SYS_READBIOS },
mem_c[] = { DRV_C, SYS_PHYSCOPY, SYS_PHYSVCOPY, SYS_IOPENABLE };
mem_c[] = { DRV_C, SYS_PHYSCOPY, SYS_PHYSVCOPY, SYS_IOPENABLE },
usr_c[] = { SYS_SYSCTL };
/* The system image table lists all programs that are part of the boot image.
* The order of the entries here MUST agree with the order of the programs
@ -117,16 +118,16 @@ PUBLIC struct boot_image image[] = {
{CLOCK,clock_task,TSK_F, 8, TASK_Q, TSK_S, TSK_T, 0, no_c,"clock" },
{SYSTEM, sys_task,TSK_F, 8, TASK_Q, TSK_S, TSK_T, 0, no_c,"system"},
{HARDWARE, 0,TSK_F, 8, TASK_Q, HRD_S, 0, 0, no_c,"kernel"},
{PM_PROC_NR, 0,SVM_F, 32, 4, 0, SRV_T, SRV_M, c(pm_c),"pm" },
{FS_PROC_NR, 0,SVM_F, 32, 5, 0, SRV_T, SRV_M, c(fs_c),"vfs" },
{PM_PROC_NR, 0,SRV_F, 32, 4, 0, SRV_T, SRV_M, c(pm_c),"pm" },
{FS_PROC_NR, 0,SRV_F, 32, 5, 0, SRV_T, SRV_M, c(fs_c),"vfs" },
{RS_PROC_NR, 0,SVM_F, 4, 4, 0, SRV_T, SYS_M, c(rs_c),"rs" },
{DS_PROC_NR, 0,SVM_F, 4, 4, 0, SRV_T, SYS_M, c(ds_c),"ds" },
{DS_PROC_NR, 0,SRV_F, 4, 4, 0, SRV_T, SYS_M, c(ds_c),"ds" },
{TTY_PROC_NR, 0,SVM_F, 4, 1, 0, SRV_T, SYS_M,c(tty_c),"tty" },
{MEM_PROC_NR, 0,SVM_F, 4, 3, 0, SRV_T, SYS_M,c(mem_c),"memory"},
{LOG_PROC_NR, 0,SVM_F, 4, 2, 0, SRV_T, SYS_M,c(drv_c),"log" },
{LOG_PROC_NR, 0,SRV_F, 4, 2, 0, SRV_T, SYS_M,c(drv_c),"log" },
{MFS_PROC_NR, 0,SVM_F, 32, 5, 0, SRV_T, SRV_M, c(fs_c),"mfs" },
{VM_PROC_NR, 0,SRV_F, 32, 2, 0, SRV_T, SRV_M, c(vm_c),"vm" },
{INIT_PROC_NR, 0,USR_F, 8, USER_Q, 0, USR_T, USR_M, no_c,"init" },
{INIT_PROC_NR, 0,USR_F, 8, USER_Q, 0, USR_T, USR_M, c(usr_c),"init" },
};
/* Verify the size of the system image table at compile time. Also verify that

View File

@ -37,7 +37,7 @@ char *mess;
int nr;
{
/* The system has run aground of a fatal kernel error. Terminate execution. */
if (minix_panicing ++) return; /* prevent recursive panics */
if (!minix_panicing++) {
if (mess != NULL) {
kprintf("kernel panic: %s", mess);
@ -46,8 +46,10 @@ int nr;
kprintf("\n");
}
kprintf("proc_ptr %s / %d\n", proc_ptr->p_name, proc_ptr->p_endpoint);
kprintf("kernel stacktrace: ");
util_stacktrace();
}
/* Abort MINIX. */
minix_shutdown(NULL);
@ -80,12 +82,13 @@ int c; /* character to append */
kmess.km_next = (kmess.km_next + 1) % _KMESS_BUF_SIZE;
} else {
int p, outprocs[] = OUTPUT_PROCS_ARRAY;
if(do_serial_debug) return;
if(minix_panicing || do_serial_debug) return;
for(p = 0; outprocs[p] != NONE; p++) {
if(isokprocn(outprocs[p]) && !isemptyn(outprocs[p])) {
send_sig(outprocs[p], SIGKMESS);
}
}
if(!(minix_panicing || do_serial_debug)) {
for(p = 0; outprocs[p] != NONE; p++) {
if(isokprocn(outprocs[p]) && !isemptyn(outprocs[p])) {
send_sig(outprocs[p], SIGKMESS);
}
}
}
}
return;
}

View File

@ -2,18 +2,19 @@
#ifndef _VM_H
#define _VM_H 1
#define CHECKRANGE_OR_SUSPEND(pr, start, length, wr) { int mr; \
if(vm_running && (mr=vm_checkrange(proc_addr(who_p), pr, start, length, wr, 0)) != OK) { \
return mr; \
} }
#define CHECKRANGE(pr, start, length, wr) \
vm_checkrange(proc_addr(who_p), pr, start, length, wr, 1)
/* Pseudo error code indicating a process request has to be
* restarted after an OK from VM.
*/
/* Pseudo error codes */
#define VMSUSPEND -996
#define EFAULT_SRC -995
#define EFAULT_DST -994
#define FIXLINMSG(prp) { prp->p_delivermsg_lin = umap_local(prp, D, prp->p_delivermsg_vir, sizeof(message)); }
#define PHYS_COPY_CATCH(src, dst, size, a) { \
vmassert(intr_disabled()); \
catch_pagefaults++; \
a = phys_copy(src, dst, size); \
catch_pagefaults--; \
}
#endif

View File

@ -17,6 +17,6 @@ u64_t *newpos;
m.m2_i2 = whence;
if (_syscall(FS, LLSEEK, &m) < 0) return -1;
if (newpos)
*newpos= make64(m.m2_l2, m.m2_l1);
*newpos= make64(m.m2_l1, m.m2_l2);
return 0;
}

View File

@ -66,7 +66,6 @@ libsys_FILES=" \
sys_vinl.c \
sys_vinw.c \
sys_vircopy.c \
sys_vm_setbuf.c \
sys_vmctl.c \
sys_voutb.c \
sys_voutl.c \

View File

@ -1,11 +1,12 @@
#include "syslib.h"
PUBLIC int sys_fork(parent, child, child_endpoint, map_ptr, flags)
PUBLIC int sys_fork(parent, child, child_endpoint, map_ptr, flags, msgaddr)
endpoint_t parent; /* process doing the fork */
endpoint_t child; /* which proc has been created by the fork */
int *child_endpoint;
struct mem_map *map_ptr;
u32_t flags;
vir_bytes *msgaddr;
{
/* A process has forked. Tell the kernel. */
@ -18,5 +19,6 @@ u32_t flags;
m.PR_FORK_FLAGS = flags;
r = _taskcall(SYSTASK, SYS_FORK, &m);
*child_endpoint = m.PR_ENDPT;
*msgaddr = m.PR_FORK_MSGADDR;
return r;
}

View File

@ -1,21 +0,0 @@
#include "syslib.h"
/*===========================================================================*
* sys_vm_setbuf *
*===========================================================================*/
PUBLIC int sys_vm_setbuf(base, size, high)
phys_bytes base;
phys_bytes size;
phys_bytes high;
{
message m;
int result;
m.m4_l1= base;
m.m4_l2= size;
m.m4_l3= high;
result = _taskcall(SYSTASK, SYS_VM_SETBUF, &m);
return(result);
}

View File

@ -43,7 +43,7 @@ PUBLIC int sys_vmctl_get_cr3_i386(endpoint_t who, u32_t *cr3)
}
PUBLIC int sys_vmctl_get_memreq(endpoint_t *who, vir_bytes *mem,
vir_bytes *len, int *wrflag)
vir_bytes *len, int *wrflag, endpoint_t *requestor)
{
message m;
int r;
@ -56,7 +56,16 @@ PUBLIC int sys_vmctl_get_memreq(endpoint_t *who, vir_bytes *mem,
*mem = (vir_bytes) m.SVMCTL_MRG_ADDR;
*len = m.SVMCTL_MRG_LEN;
*wrflag = m.SVMCTL_MRG_WRITE;
*requestor = (endpoint_t) m.SVMCTL_MRG_REQUESTOR;
}
return r;
}
PUBLIC int sys_vmctl_enable_paging(struct mem_map *map)
{
message m;
m.SVMCTL_WHO = SELF;
m.SVMCTL_PARAM = VMCTL_ENABLE_PAGING;
m.SVMCTL_VALUE = (int) map;
return _taskcall(SYSTASK, SYS_VMCTL, &m);
}

View File

@ -6,7 +6,6 @@ LIBRARIES=libsys
libsys_FILES=" \
asynsend.c \
kmalloc.c \
kprintf.c \
kputc.c \
tickdelay.c \

View File

@ -1,177 +0,0 @@
/* malloc(), realloc(), free() - simple memory allocation routines
*
* This is a very small and simple minded malloc Author: Kees J. Bot
* implementation. Ideal for things like a 29 Jan 1994
* bootstrap program, or for debugging. Six times
* slower than any good malloc.
*/
#define nil 0
#define sbrk _sbrk
#include <stddef.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <limits.h>
#if !DEBUG
#define NDEBUG 1
#define debug(expr) ((void) 0)
#else
#define debug(expr) expr
#endif
#include <assert.h>
typedef struct cell {
size_t size; /* Size of a malloc()'ed object. */
#if DEBUG
unsigned magic; /* To recognize a cell. */
#endif
struct cell *next; /* Next cell on the free list. */
#if DEBUG
unsigned sacred; /* Don't touch while unallocated. */
#endif
} cell_t;
#if UINT_MAX <= 0xFFFF
#define MAGIC 0x537B
#else
#define MAGIC 0x537BC0D8
#endif
/* Size of the header of an object. */
#define HDR_SIZE offsetof(cell_t, next)
/* An offset from a cell pointer to a next cell. */
#define offset(cp, size) ((cell_t *) ((char *) (cp) + (size)))
/* Address of the object in a cell and back. */
#define cell2obj(cp) ((void *) ((char *) (cp) + HDR_SIZE))
#define obj2cell(op) ((cell_t *) ((char *) (op) - HDR_SIZE))
/* The free list. */
static cell_t *freelist;
void *malloc(size_t size)
/* Allocate an object of at least the given size. */
{
cell_t **pcp, *cp;
size += HDR_SIZE;
if (size < sizeof(cell_t)) size= sizeof(cell_t);
/* Align to a word. Use a real malloc if you need better alignment. */
size= (size + sizeof(int) - 1) & ~(sizeof(int) - 1);
/* Space for a magic number at the end of the chunk. */
debug(size += sizeof(unsigned));
for (;;) {
/* Do a first fit search. */
pcp= &freelist;
while ((cp= *pcp) != nil) {
cell_t *next= cp->next;
assert(cp->magic == MAGIC);
assert(cp->sacred == MAGIC);
if (offset(cp, cp->size) == next) {
/* Join adjacent free cells. */
assert(next->magic == MAGIC);
assert(next->sacred == MAGIC);
cp->size+= next->size;
cp->next= next->next;
continue; /* Try again. */
}
if (size <= cp->size) break; /* Big enough. */
/* Next cell. */
pcp= &cp->next;
}
if (cp != nil) break; /* Found a big enough chunk. */
/* Allocate a new chunk at the break. */
if ((cp= (cell_t *) sbrk(size)) == (cell_t *) -1) {
return nil;
}
cp->size= size;
cp->next= nil;
debug(cp->magic= MAGIC);
debug(cp->sacred= MAGIC);
*pcp= cp;
}
/* We've got a cell that is big enough. Can we break it up? */
if (cp->size >= size + sizeof(cell_t)) {
cell_t *next= offset(cp, size);
next->size= cp->size - size;
next->next= cp->next;
debug(next->magic= MAGIC);
debug(next->sacred= MAGIC);
cp->size= size;
cp->next= next;
}
/* Unchain the cell we've found and return an address in it. */
*pcp= cp->next;
debug(memset(cell2obj(cp), 0xAA, cp->size - HDR_SIZE));
debug(((unsigned *) offset(cp, cp->size))[-1]= MAGIC);
return cell2obj(cp);
}
void free(void *op)
/* Deallocate an object. */
{
cell_t **prev, *next, *cp;
if (op == nil) return; /* Aaargh. */
cp= obj2cell(op);
assert(cp->magic == MAGIC);
assert(((unsigned *) offset(cp, cp->size))[-1] == MAGIC);
debug(cp->sacred= MAGIC);
/* Find the spot where the object belongs. */
prev= &freelist;
while ((next= *prev) != nil && next < cp) {
assert(next->magic == MAGIC);
assert(next->sacred == MAGIC);
prev= &next->next;
}
/* Put the new free cell in the list. */
*prev= cp;
cp->next= next;
#if DEBUG
/* Check the rest of the list. */
while (next != nil) {
assert(next->magic == MAGIC);
assert(next->sacred == MAGIC);
next= next->next;
}
#endif
}
void *realloc(void *op, size_t size)
/* Change the size of an object. Don't bother being smart, just copy it. */
{
size_t oldsize;
void *new;
oldsize= op == nil ? 0 : obj2cell(op)->size - HDR_SIZE;
new= malloc(size);
memcpy(new, op, oldsize > size ? size : oldsize);
free(op);
return new;
}
/*
* $PchId: malloc.c,v 1.4 1996/02/22 09:15:56 philip Exp $
*/

View File

@ -2,7 +2,7 @@
.SH NAME
ls \- list the contents of a directory
.SH SYNOPSIS
\fBls\fP [\fB\-acdfghilpqrstu1ACDFLMRTX\fP] [\fIname\fP...]
\fBls\fP [\fB\-acdfghilnpqrstu1ACDFLMRTX\fP] [\fIname\fP...]
.SH DESCRIPTION
For each file argument, list it. For each directory argument, list its
contents. The current working directory is listed when no files are named.
@ -30,7 +30,7 @@ and
.PP
Files whose names start with a dot are by default not listed.
.PP
Note that standard MINIX 3 doesn't have symbolic links or sockets and
Note that standard MINIX 3 doesn't have sockets, and
.B \-u
and
.B \-c

View File

@ -21,7 +21,6 @@ struct hook_entry {
{ F5, monparams_dmp, "Boot monitor parameters" },
{ F6, irqtab_dmp, "IRQ hooks and policies" },
{ F7, kmessages_dmp, "Kernel messages" },
{ F9, sched_dmp, "Scheduling queues" },
{ F10, kenv_dmp, "Kernel parameters" },
{ F11, timing_dmp, "Timing details (if enabled)" },
{ SF1, mproc_dmp, "Process manager process table" },

View File

@ -224,54 +224,6 @@ PUBLIC void image_dmp()
printf("\n");
}
/*===========================================================================*
* sched_dmp *
*===========================================================================*/
PUBLIC void sched_dmp()
{
struct proc *rdy_head[NR_SCHED_QUEUES];
struct kinfo kinfo;
register struct proc *rp;
vir_bytes ptr_diff;
int r;
/* First obtain a scheduling information. */
if ((r = sys_getschedinfo(proc, rdy_head)) != OK) {
report("IS","warning: couldn't get copy of process table", r);
return;
}
/* Then obtain kernel addresses to correct pointer information. */
if ((r = sys_getkinfo(&kinfo)) != OK) {
report("IS","warning: couldn't get kernel addresses", r);
return;
}
/* Update all pointers. Nasty pointer algorithmic ... */
ptr_diff = (vir_bytes) proc - (vir_bytes) kinfo.proc_addr;
for (r=0;r<NR_SCHED_QUEUES; r++)
if (rdy_head[r] != NIL_PROC)
rdy_head[r] =
(struct proc *)((vir_bytes) rdy_head[r] + ptr_diff);
for (rp=BEG_PROC_ADDR; rp < END_PROC_ADDR; rp++)
if (rp->p_nextready != NIL_PROC)
rp->p_nextready =
(struct proc *)((vir_bytes) rp->p_nextready + ptr_diff);
/* Now show scheduling queues. */
printf("Dumping scheduling queues.\n");
for (r=0;r<NR_SCHED_QUEUES; r++) {
rp = rdy_head[r];
if (!rp) continue;
printf("%2d: ", r);
while (rp != NIL_PROC) {
printf("%3d ", rp->p_nr);
rp = rp->p_nextready;
}
printf("\n");
}
printf("\n");
}
/*===========================================================================*
* kenv_dmp *
@ -311,9 +263,6 @@ PUBLIC void kenv_dmp()
printf("- nr_tasks: %3u\n", kinfo.nr_tasks);
printf("- release: %.6s\n", kinfo.release);
printf("- version: %.6s\n", kinfo.version);
#if DEBUG_LOCK_CHECK
printf("- relocking: %d\n", kinfo.relocking);
#endif
printf("\n");
}

View File

@ -25,7 +25,6 @@ OBJ = cache.o device.o link.o \
all build: $(SERVER)
$(SERVER): $(OBJ)
$(CC) -o $@ $(LDFLAGS) $(OBJ) $(LIBS)
install -S `expr $(NR_BUFS) \* $(BS) \* 2.2` $(SERVER)
install: $(SERVER)
-mv $(DEST) $(DEST).prev

View File

@ -17,6 +17,7 @@
#include "fs.h"
#include <minix/com.h>
#include <minix/u64.h>
#include <string.h>
#include "buf.h"
#include "super.h"
#include "inode.h"
@ -24,6 +25,9 @@
FORWARD _PROTOTYPE( void rm_lru, (struct buf *bp) );
FORWARD _PROTOTYPE( int rw_block, (struct buf *, int) );
char saved[100];
char *savedptr;
/*===========================================================================*
* get_block *
*===========================================================================*/
@ -48,10 +52,19 @@ int only_search; /* if NO_READ, don't read, else act normal */
*/
int b;
register struct buf *bp, *prev_ptr;
static struct buf *bp, *prev_ptr;
savedptr = (vir_bytes) &b + sizeof(b);
memcpy(saved, savedptr, sizeof(saved));
#define CHECK \
if(memcmp(saved, savedptr, sizeof(saved))) \
panic(__FILE__,"memory corruption", __LINE__);
ASSERT(fs_block_size > 0);
CHECK;
/* Search the hash chain for (dev, block). Do_read() can use
* get_block(NO_DEV ...) to get an unnamed block to fill with zeros when
* someone wants to read from a hole in a file, in which case this search
@ -60,8 +73,11 @@ int only_search; /* if NO_READ, don't read, else act normal */
if (dev != NO_DEV) {
b = BUFHASH(block);
bp = buf_hash[b];
CHECK;
while (bp != NIL_BUF) {
CHECK;
if (bp->b_blocknr == block && bp->b_dev == dev) {
CHECK;
/* Block needed has been found. */
if (bp->b_count == 0) rm_lru(bp);
bp->b_count++; /* record that block is in use */
@ -69,6 +85,7 @@ int only_search; /* if NO_READ, don't read, else act normal */
ASSERT(bp->b_dev == dev);
ASSERT(bp->b_dev != NO_DEV);
ASSERT(bp->bp);
CHECK;
return(bp);
} else {
/* This block is not the one sought. */
@ -137,6 +154,7 @@ int only_search; /* if NO_READ, don't read, else act normal */
ASSERT(bp->bp);
CHECK;
return(bp); /* return the newly acquired block */
}

View File

@ -346,12 +346,9 @@ PUBLIC int do_getprocnr()
if (((rmp->mp_flags & (IN_USE | ZOMBIE)) == IN_USE) &&
strncmp(rmp->mp_name, search_key, key_len)==0) {
mp->mp_reply.endpt = rmp->mp_endpoint;
printf("PM: name %s result: %d\n", search_key,
rmp->mp_endpoint);
return(OK);
}
}
printf("PM: name %s result: ESRCH\n", search_key);
return(ESRCH);
} else { /* return own/parent process number */
#if 0

View File

@ -528,7 +528,8 @@ doterminate:
rmp->mp_sigstatus = (char) signo;
if (sigismember(&core_sset, signo) && slot != FS_PROC_NR) {
printf("PM: signal %d for %d / %s\n", signo, rmp->mp_pid, rmp->mp_name);
printf("PM: signal %d for pid %d / %s\n",
signo, rmp->mp_pid, rmp->mp_name);
s= dump_core(rmp);
if (s == SUSPEND) {
return;

View File

@ -26,6 +26,7 @@
#include <minix/const.h>
#include <minix/endpoint.h>
#include <minix/safecopies.h>
#include <minix/debug.h>
#include "file.h"
#include "fproc.h"
#include "param.h"
@ -59,6 +60,10 @@ PUBLIC int main()
SANITYCHECK;
#if DO_SANITYCHECKS
FIXME("VFS: DO_SANITYCHECKS is on");
#endif
/* This is the main loop that gets work, processes it, and sends replies. */
while (TRUE) {
SANITYCHECK;
@ -285,9 +290,14 @@ PRIVATE void get_work()
continue;
}
if(who_p >= 0 && fproc[who_p].fp_endpoint != who_e) {
printf("FS: receive endpoint inconsistent (%d, %d, %d).\n",
who_e, fproc[who_p].fp_endpoint, who_e);
if(fproc[who_p].fp_endpoint == NONE) {
printf("slot unknown even\n");
}
printf("FS: receive endpoint inconsistent (source %d, who_p %d, stored ep %d, who_e %d).\n",
m_in.m_source, who_p, fproc[who_p].fp_endpoint, who_e);
#if 0
panic(__FILE__, "FS: inconsistent endpoint ", NO_NUM);
#endif
continue;
}
call_nr = m_in.m_type;

View File

@ -4,7 +4,7 @@ SERVER = vm
include /etc/make.conf
OBJ = main.o alloc.o utility.o exec.o exit.o fork.o break.o \
signal.o vfs.o mmap.o slaballoc.o region.o pagefaults.o
signal.o vfs.o mmap.o slaballoc.o region.o pagefaults.o addravl.o
ARCHOBJ = $(ARCH)/vm.o $(ARCH)/pagetable.o $(ARCH)/arch_pagefaults.o $(ARCH)/util.o
CPPFLAGS=-I../../kernel/arch/$(ARCH)/include -I$(ARCH)

5
servers/vm/addravl.c Normal file
View File

@ -0,0 +1,5 @@
#include "pagerange.h"
#include "addravl.h"
#include "cavl_impl.h"

17
servers/vm/addravl.h Normal file
View File

@ -0,0 +1,17 @@
#define AVL_UNIQUE(id) addr_ ## id
#define AVL_HANDLE pagerange_t *
#define AVL_KEY phys_bytes
#define AVL_MAX_DEPTH 30 /* good for 2 million nodes */
#define AVL_NULL NULL
#define AVL_GET_LESS(h, a) (h)->less
#define AVL_GET_GREATER(h, a) (h)->greater
#define AVL_SET_LESS(h1, h2) (h1)->less = h2;
#define AVL_SET_GREATER(h1, h2) (h1)->greater = h2;
#define AVL_GET_BALANCE_FACTOR(h) (h)->factor
#define AVL_SET_BALANCE_FACTOR(h, f) (h)->factor = f;
#define AVL_COMPARE_KEY_KEY(k1, k2) ((k1) > (k2) ? 1 : ((k1) < (k2) ? -1 : 0))
#define AVL_COMPARE_KEY_NODE(k, h) AVL_COMPARE_KEY_KEY((k), (h)->addr)
#define AVL_COMPARE_NODE_NODE(h1, h2) AVL_COMPARE_KEY_KEY((h1)->addr, (h2)->addr)
#include "cavl_if.h"

View File

@ -23,6 +23,7 @@
#include <minix/const.h>
#include <minix/sysutil.h>
#include <minix/syslib.h>
#include <minix/debug.h>
#include <sys/mman.h>
@ -36,9 +37,11 @@
#include "proto.h"
#include "util.h"
#include "glo.h"
#include "pagerange.h"
#include "addravl.h"
/* Initially, no free pages are known. */
PRIVATE phys_bytes free_pages_head = NO_MEM; /* Physical address in bytes. */
/* AVL tree of free pages. */
addr_avl addravl;
/* Used for sanity check. */
PRIVATE phys_bytes mem_low, mem_high;
@ -102,26 +105,6 @@ FORWARD _PROTOTYPE( void holes_sanity_f, (char *fn, int line) );
}
void availbytes(vir_bytes *bytes, vir_bytes *chunks)
{
phys_bytes p, nextp;
*bytes = 0;
*chunks = 0;
for(p = free_pages_head; p != NO_MEM; p = nextp) {
phys_bytes thissize, ret;
GET_PARAMS(p, thissize, nextp);
(*bytes) += thissize;
(*chunks)++;
if(nextp != NO_MEM) {
vm_assert(nextp > p);
vm_assert(nextp > p + thissize);
}
}
return;
}
#if SANITYCHECKS
/*===========================================================================*
@ -410,6 +393,8 @@ struct memory *chunks; /* list of free memory chunks */
hole_head = NIL_HOLE;
free_slots = &hole[0];
addr_init(&addravl);
/* Use the chunks of physical memory to allocate holes. */
for (i=NR_MEMS-1; i>=0; i--) {
if (chunks[i].size > 0) {
@ -425,214 +410,126 @@ struct memory *chunks; /* list of free memory chunks */
CHECKHOLES;
}
int countnodes(void)
{
addr_iter iter;
int n = 0;
addr_start_iter_least(&addravl, &iter);
while(addr_get_iter(&iter)) {
n++;
addr_incr_iter(&iter);
}
return n;
}
/*===========================================================================*
* alloc_pages *
*===========================================================================*/
PRIVATE PUBLIC phys_bytes alloc_pages(int pages, int memflags)
{
phys_bytes bytes, p, nextp, prevp = NO_MEM;
phys_bytes prevsize = 0;
addr_iter iter;
pagerange_t *pr;
int incr;
phys_bytes boundary16 = 16 * 1024 * 1024 / VM_PAGE_SIZE;
phys_bytes mem;
#if SANITYCHECKS
vir_bytes avail1, avail2, chunks1, chunks2;
availbytes(&avail1, &chunks1);
#endif
vm_assert(pages > 0);
bytes = CLICK2ABS(pages);
vm_assert(ABS2CLICK(bytes) == pages);
#if SANITYCHECKS
#define ALLOCRETURNCHECK \
availbytes(&avail2, &chunks2); \
vm_assert(avail1 - bytes == avail2); \
vm_assert(chunks1 == chunks2 || chunks1-1 == chunks2);
#else
#define ALLOCRETURNCHECK
#endif
for(p = free_pages_head; p != NO_MEM; p = nextp) {
phys_bytes thissize, ret;
GET_PARAMS(p, thissize, nextp);
if(thissize >= bytes) {
/* We found a chunk that's big enough. */
ret = p + thissize - bytes;
thissize -= bytes;
if(thissize == 0) {
/* Special case: remove this link entirely. */
if(prevp == NO_MEM)
free_pages_head = nextp;
else {
vm_assert(prevsize > 0);
SET_PARAMS(prevp, prevsize, nextp);
}
} else {
/* Remove memory from this chunk. */
SET_PARAMS(p, thissize, nextp);
}
/* Clear memory if requested. */
if(memflags & PAF_CLEAR) {
int s;
if ((s= sys_memset(0, ret, bytes)) != OK) {
vm_panic("alloc_pages: sys_memset failed", s);
}
}
/* Check if returned range is actual good memory. */
vm_assert_range(ret, bytes);
ALLOCRETURNCHECK;
/* Return it in clicks. */
return ABS2CLICK(ret);
}
prevp = p;
prevsize = thissize;
if(memflags & PAF_LOWER16MB) {
addr_start_iter_least(&addravl, &iter);
incr = 1;
} else {
addr_start_iter_greatest(&addravl, &iter);
incr = 0;
}
return NO_MEM;
while((pr = addr_get_iter(&iter))) {
SLABSANE(pr);
if(pr->size >= pages) {
if(memflags & PAF_LOWER16MB) {
if(pr->addr + pages > boundary16)
return NO_MEM;
}
/* good block found! */
break;
}
if(incr)
addr_incr_iter(&iter);
else
addr_decr_iter(&iter);
}
if(!pr)
return NO_MEM;
SLABSANE(pr);
mem = pr->addr;
vm_assert(pr->size >= pages);
if(pr->size == pages) {
pagerange_t *prr;
prr = addr_remove(&addravl, pr->addr);
vm_assert(prr);
vm_assert(prr == pr);
SLABFREE(pr);
} else {
pr->addr += pages;
pr->size -= pages;
}
if(memflags & PAF_CLEAR) {
int s;
if ((s= sys_memset(0, CLICK_SIZE*mem,
VM_PAGE_SIZE*pages)) != OK)
vm_panic("alloc_mem: sys_memset failed", s);
}
return mem;
}
/*===========================================================================*
* free_pages *
*===========================================================================*/
PRIVATE PUBLIC void free_pages(phys_bytes pageno, int npages)
PRIVATE void free_pages(phys_bytes pageno, int npages)
{
phys_bytes p, origsize,
size, nextaddr, thissize, prevp = NO_MEM, pageaddr;
pagerange_t *pr, *p;
addr_iter iter;
#if SANITYCHECKS
vir_bytes avail1, avail2, chunks1, chunks2;
availbytes(&avail1, &chunks1);
#endif
vm_assert(!addr_search(&addravl, pageno, AVL_EQUAL));
#if SANITYCHECKS
#define FREERETURNCHECK \
availbytes(&avail2, &chunks2); \
vm_assert(avail1 + origsize == avail2); \
vm_assert(chunks1 == chunks2 || chunks1+1 == chunks2 || chunks1-1 == chunks2);
#else
#define FREERETURNCHECK
#endif
/* Basic sanity check. */
vm_assert(npages > 0);
vm_assert(pageno != NO_MEM); /* Page number must be reasonable. */
/* Convert page and pages to bytes. */
pageaddr = CLICK2ABS(pageno);
origsize = size = npages * VM_PAGE_SIZE; /* Size in bytes. */
vm_assert(pageaddr != NO_MEM);
vm_assert(ABS2CLICK(pageaddr) == pageno);
vm_assert_range(pageaddr, size);
/* More sanity checks. */
vm_assert(ABS2CLICK(size) == npages); /* Sanity. */
vm_assert(pageaddr + size > pageaddr); /* Must not overflow. */
/* Special case: no free pages. */
if(free_pages_head == NO_MEM) {
free_pages_head = pageaddr;
SET_PARAMS(pageaddr, size, NO_MEM);
FREERETURNCHECK;
return;
}
/* Special case: the free block is before the current head. */
if(pageaddr < free_pages_head) {
phys_bytes newsize, newnext, headsize, headnext;
vm_assert(pageaddr + size <= free_pages_head);
GET_PARAMS(free_pages_head, headsize, headnext);
newsize = size;
if(pageaddr + size == free_pages_head) {
/* Special case: contiguous. */
newsize += headsize;
newnext = headnext;
} else {
newnext = free_pages_head;
}
SET_PARAMS(pageaddr, newsize, newnext);
free_pages_head = pageaddr;
FREERETURNCHECK;
return;
}
/* Find where to put the block in the free list. */
for(p = free_pages_head; p < pageaddr; p = nextaddr) {
GET_PARAMS(p, thissize, nextaddr);
if(nextaddr == NO_MEM) {
/* Special case: page is at the end of the list. */
if(p + thissize == pageaddr) {
/* Special case: contiguous. */
SET_PARAMS(p, thissize + size, NO_MEM);
FREERETURNCHECK;
} else {
SET_PARAMS(p, thissize, pageaddr);
SET_PARAMS(pageaddr, size, NO_MEM);
FREERETURNCHECK;
}
return;
}
prevp = p;
}
/* Normal case: insert page block between two others.
* The first block starts at 'prevp' and is 'thissize'.
* The second block starts at 'p' and is 'nextsize'.
* The block that has to come in between starts at
* 'pageaddr' and is size 'size'.
*/
vm_assert(p != NO_MEM);
vm_assert(prevp != NO_MEM);
vm_assert(prevp < p);
vm_assert(p == nextaddr);
#if SANITYCHECKS
{
vir_bytes prevpsize, prevpnext;
GET_PARAMS(prevp, prevpsize, prevpnext);
vm_assert(prevpsize == thissize);
vm_assert(prevpnext == p);
availbytes(&avail2, &chunks2);
vm_assert(avail1 == avail2);
}
#endif
if(prevp + thissize == pageaddr) {
/* Special case: first block is contiguous with freed one. */
phys_bytes newsize = thissize + size;
SET_PARAMS(prevp, newsize, p);
pageaddr = prevp;
size = newsize;
/* try to merge with higher neighbour */
if((pr=addr_search(&addravl, pageno+npages, AVL_EQUAL))) {
SLABSANE(pr);
pr->addr -= npages;
pr->size += npages;
} else {
SET_PARAMS(prevp, thissize, pageaddr);
if(!SLABALLOC(pr))
vm_panic("alloc_pages: can't alloc", NO_NUM);
SLABSANE(pr);
vm_assert(npages > 0);
pr->addr = pageno;
pr->size = npages;
addr_insert(&addravl, pr);
}
/* The block has been inserted (and possibly merged with the
* first one). Check if it has to be merged with the second one.
*/
addr_start_iter(&addravl, &iter, pr->addr, AVL_EQUAL);
p = addr_get_iter(&iter);
vm_assert(p);
vm_assert(p == pr);
if(pageaddr + size == p) {
phys_bytes nextsize, nextnextaddr;
/* Special case: freed block is contiguous with next one. */
GET_PARAMS(p, nextsize, nextnextaddr);
SET_PARAMS(pageaddr, size+nextsize, nextnextaddr);
FREERETURNCHECK;
} else {
SET_PARAMS(pageaddr, size, p);
FREERETURNCHECK;
addr_decr_iter(&iter);
if((p = addr_get_iter(&iter))) {
SLABSANE(p);
if(p->addr + p->size == pr->addr) {
p->size += pr->size;
addr_remove(&addravl, pr->addr);
SLABFREE(pr);
}
}
return;
}
#define NR_DMA 16
PRIVATE struct dmatab

216
servers/vm/cavl_if.h Executable file
View File

@ -0,0 +1,216 @@
/* Abstract AVL Tree Generic C Package.
** Interface generation header file.
**
** This code is in the public domain. See cavl_tree.html for interface
** documentation.
**
** Version: 1.5 Author: Walt Karas
*/
/* This header contains the definition of CHAR_BIT (number of bits in a
** char). */
#include <limits.h>
#undef L__
#undef L__EST_LONG_BIT
#undef L__SIZE
#undef L__SC
#undef L__LONG_BIT
#undef L__BIT_ARR_DEFN
#ifndef AVL_SEARCH_TYPE_DEFINED_
#define AVL_SEARCH_TYPE_DEFINED_
typedef enum
{
AVL_EQUAL = 1,
AVL_LESS = 2,
AVL_GREATER = 4,
AVL_LESS_EQUAL = AVL_EQUAL | AVL_LESS,
AVL_GREATER_EQUAL = AVL_EQUAL | AVL_GREATER
}
avl_search_type;
#endif
#ifdef AVL_UNIQUE
#define L__ AVL_UNIQUE
#else
#define L__(X) X
#endif
/* Determine storage class for function prototypes. */
#ifdef AVL_PRIVATE
#define L__SC static
#else
#define L__SC extern
#endif
#ifdef AVL_SIZE
#define L__SIZE AVL_SIZE
#else
#define L__SIZE unsigned long
#endif
typedef struct
{
#ifdef AVL_INSIDE_STRUCT
AVL_INSIDE_STRUCT
#endif
AVL_HANDLE root;
}
L__(avl);
/* Function prototypes. */
L__SC void L__(init)(L__(avl) *tree);
L__SC int L__(is_empty)(L__(avl) *tree);
L__SC AVL_HANDLE L__(insert)(L__(avl) *tree, AVL_HANDLE h);
L__SC AVL_HANDLE L__(search)(L__(avl) *tree, AVL_KEY k, avl_search_type st);
L__SC AVL_HANDLE L__(search_least)(L__(avl) *tree);
L__SC AVL_HANDLE L__(search_greatest)(L__(avl) *tree);
L__SC AVL_HANDLE L__(remove)(L__(avl) *tree, AVL_KEY k);
L__SC AVL_HANDLE L__(subst)(L__(avl) *tree, AVL_HANDLE new_node);
#ifdef AVL_BUILD_ITER_TYPE
L__SC int L__(build)(
L__(avl) *tree, AVL_BUILD_ITER_TYPE p, L__SIZE num_nodes);
#endif
/* ANSI C/ISO C++ require that a long have at least 32 bits. Set
** L__EST_LONG_BIT to be the greatest multiple of 8 in the range
** 32 - 64 (inclusive) that is less than or equal to the number of
** bits in a long.
*/
#if (((LONG_MAX >> 31) >> 7) == 0)
#define L__EST_LONG_BIT 32
#elif (((LONG_MAX >> 31) >> 15) == 0)
#define L__EST_LONG_BIT 40
#elif (((LONG_MAX >> 31) >> 23) == 0)
#define L__EST_LONG_BIT 48
#elif (((LONG_MAX >> 31) >> 31) == 0)
#define L__EST_LONG_BIT 56
#else
#define L__EST_LONG_BIT 64
#endif
/* Number of bits in a long. */
#define L__LONG_BIT (sizeof(long) * CHAR_BIT)
/* The macro L__BIT_ARR_DEFN defines a bit array whose index is a (0-based)
** node depth. The definition depends on whether the maximum depth is more
** or less than the number of bits in a single long.
*/
#if ((AVL_MAX_DEPTH) > L__EST_LONG_BIT)
/* Maximum depth may be more than number of bits in a long. */
#define L__BIT_ARR_DEFN(NAME) \
unsigned long NAME[((AVL_MAX_DEPTH) + L__LONG_BIT - 1) / L__LONG_BIT];
#else
/* Maximum depth is definitely less than number of bits in a long. */
#define L__BIT_ARR_DEFN(NAME) unsigned long NAME;
#endif
/* Iterator structure. */
typedef struct
{
/* Tree being iterated over. */
L__(avl) *tree_;
/* Records a path into the tree. If bit n is true, indicates
** take greater branch from the nth node in the path, otherwise
** take the less branch. bit 0 gives branch from root, and
** so on. */
L__BIT_ARR_DEFN(branch)
/* Zero-based depth of path into tree. */
unsigned depth;
/* Handles of nodes in path from root to current node (returned by *). */
AVL_HANDLE path_h[(AVL_MAX_DEPTH) - 1];
}
L__(iter);
/* Iterator function prototypes. */
L__SC void L__(start_iter)(
L__(avl) *tree, L__(iter) *iter, AVL_KEY k, avl_search_type st);
L__SC void L__(start_iter_least)(L__(avl) *tree, L__(iter) *iter);
L__SC void L__(start_iter_greatest)(L__(avl) *tree, L__(iter) *iter);
L__SC AVL_HANDLE L__(get_iter)(L__(iter) *iter);
L__SC void L__(incr_iter)(L__(iter) *iter);
L__SC void L__(decr_iter)(L__(iter) *iter);
L__SC void L__(init_iter)(L__(iter) *iter);
#define AVL_IMPL_INIT 1
#define AVL_IMPL_IS_EMPTY (1 << 1)
#define AVL_IMPL_INSERT (1 << 2)
#define AVL_IMPL_SEARCH (1 << 3)
#define AVL_IMPL_SEARCH_LEAST (1 << 4)
#define AVL_IMPL_SEARCH_GREATEST (1 << 5)
#define AVL_IMPL_REMOVE (1 << 6)
#define AVL_IMPL_BUILD (1 << 7)
#define AVL_IMPL_START_ITER (1 << 8)
#define AVL_IMPL_START_ITER_LEAST (1 << 9)
#define AVL_IMPL_START_ITER_GREATEST (1 << 10)
#define AVL_IMPL_GET_ITER (1 << 11)
#define AVL_IMPL_INCR_ITER (1 << 12)
#define AVL_IMPL_DECR_ITER (1 << 13)
#define AVL_IMPL_INIT_ITER (1 << 14)
#define AVL_IMPL_SUBST (1 << 15)
#define AVL_IMPL_ALL (~0)
#undef L__
#undef L__EST_LONG_BIT
#undef L__SIZE
#undef L__SC
#undef L__LONG_BIT
#undef L__BIT_ARR_DEFN

1181
servers/vm/cavl_impl.h Executable file

File diff suppressed because it is too large Load Diff

View File

@ -13,6 +13,7 @@
#include <minix/ipc.h>
#include <minix/sysutil.h>
#include <minix/syslib.h>
#include <minix/debug.h>
#include <errno.h>
#include <env.h>
@ -31,6 +32,8 @@ PUBLIC int do_fork(message *msg)
{
int r, proc, s, childproc, fullvm;
struct vmproc *vmp, *vmc;
pt_t origpt;
vir_bytes msgaddr;
SANITYCHECK(SCL_FUNCTIONS);
@ -49,6 +52,7 @@ PUBLIC int do_fork(message *msg)
vmp = &vmproc[proc]; /* parent */
vmc = &vmproc[childproc]; /* child */
vm_assert(vmc->vm_slot == childproc);
if(vmp->vm_flags & VMF_HAS_DMA) {
printf("VM: %d has DMA memory and may not fork\n", msg->VMF_ENDPOINT);
@ -58,14 +62,20 @@ PUBLIC int do_fork(message *msg)
fullvm = vmp->vm_flags & VMF_HASPT;
/* The child is basically a copy of the parent. */
origpt = vmc->vm_pt;
*vmc = *vmp;
vmc->vm_slot = childproc;
vmc->vm_regions = NULL;
vmc->vm_endpoint = NONE; /* In case someone tries to use it. */
vmc->vm_pt = origpt;
vmc->vm_flags &= ~VMF_HASPT;
#if VMSTATS
vmc->vm_bytecopies = 0;
#endif
SANITYCHECK(SCL_DETAIL);
if(fullvm) {
SANITYCHECK(SCL_DETAIL);
@ -74,6 +84,8 @@ PUBLIC int do_fork(message *msg)
return ENOMEM;
}
vmc->vm_flags |= VMF_HASPT;
SANITYCHECK(SCL_DETAIL);
if(map_proc_copy(vmc, vmp) != OK) {
@ -108,6 +120,7 @@ PUBLIC int do_fork(message *msg)
/* Create a copy of the parent's core image for the child. */
child_abs = (phys_bytes) child_base << CLICK_SHIFT;
parent_abs = (phys_bytes) vmp->vm_arch.vm_seg[D].mem_phys << CLICK_SHIFT;
FIXME("VM uses kernel for abscopy");
s = sys_abscopy(parent_abs, child_abs, prog_bytes);
if (s < 0) vm_panic("do_fork can't copy", s);
@ -127,11 +140,18 @@ PUBLIC int do_fork(message *msg)
/* Tell kernel about the (now successful) FORK. */
if((r=sys_fork(vmp->vm_endpoint, childproc,
&vmc->vm_endpoint, vmc->vm_arch.vm_seg,
fullvm ? PFF_VMINHIBIT : 0)) != OK) {
fullvm ? PFF_VMINHIBIT : 0, &msgaddr)) != OK) {
vm_panic("do_fork can't sys_fork", r);
}
if(fullvm) {
vir_bytes vir;
vir = arch_vir2map(vmc, msgaddr);
if(handle_memory(vmc, vir, sizeof(message), 1) != OK)
vm_panic("can't make message writable (child)", NO_NUM);
vir = arch_vir2map(vmp, msgaddr);
if(handle_memory(vmp, vir, sizeof(message), 1) != OK)
vm_panic("can't make message writable (parent)", NO_NUM);
if((r=pt_bind(&vmc->vm_pt, vmc)) != OK)
vm_panic("fork can't pt_bind", r);
}

View File

@ -25,4 +25,5 @@ EXTERN long vm_sanitychecklevel;
/* vm operation mode state and values */
EXTERN long vm_paged;
EXTERN phys_bytes kernel_top_bytes;
EXTERN int meminit_done;

View File

@ -15,7 +15,7 @@
#define VM_PAGE_SIZE I386_PAGE_SIZE
/* Where do processes start in linear (i.e. page table) memory? */
#define VM_PROCSTART (I386_BIG_PAGE_SIZE*10)
#define VM_PROCSTART (I386_BIG_PAGE_SIZE*100)
#define CLICKSPERPAGE (I386_PAGE_SIZE/CLICK_SIZE)

View File

@ -34,14 +34,14 @@
#include "memory.h"
int global_bit_ok = 0;
int bigpage_ok = 0;
/* PDE used to map in kernel, kernel physical address. */
PRIVATE int kernel_pde = -1, pagedir_pde = -1;
PRIVATE u32_t kern_pde_val = 0, global_bit = 0, pagedir_pde_val;
/* Location in our virtual address space where we can map in
* any physical page we want.
*/
static unsigned char *varmap = NULL; /* Our address space. */
static u32_t varmap_loc; /* Our page table. */
PRIVATE int proc_pde = 0;
/* 4MB page size available in hardware? */
PRIVATE int bigpage_ok = 0;
/* Our process table entry. */
struct vmproc *vmp = &vmproc[VM_PROC_NR];
@ -52,7 +52,7 @@ struct vmproc *vmp = &vmproc[VM_PROC_NR];
*/
#define SPAREPAGES 5
int missing_spares = SPAREPAGES;
static struct {
PRIVATE struct {
void *page;
u32_t phys;
} sparepages[SPAREPAGES];
@ -78,7 +78,6 @@ static struct {
u32_t page_directories_phys, *page_directories = NULL;
#if SANITYCHECKS
#define PT_SANE(p) { pt_sanitycheck((p), __FILE__, __LINE__); SANITYCHECK(SCL_DETAIL); }
/*===========================================================================*
* pt_sanitycheck *
*===========================================================================*/
@ -86,21 +85,33 @@ PUBLIC void pt_sanitycheck(pt_t *pt, char *file, int line)
{
/* Basic pt sanity check. */
int i;
int slot;
MYASSERT(pt);
MYASSERT(pt->pt_dir);
MYASSERT(pt->pt_dir_phys);
for(i = 0; i < I386_VM_DIR_ENTRIES; i++) {
for(slot = 0; slot < ELEMENTS(vmproc); slot++) {
if(pt == &vmproc[slot].vm_pt)
break;
}
if(slot >= ELEMENTS(vmproc)) {
vm_panic("pt_sanitycheck: passed pt not in any proc", NO_NUM);
}
for(i = proc_pde; i < I386_VM_DIR_ENTRIES; i++) {
if(pt->pt_pt[i]) {
if(!(pt->pt_dir[i] & I386_VM_PRESENT)) {
printf("slot %d: pt->pt_pt[%d] = 0x%lx, but pt_dir entry 0x%lx\n",
slot, i, pt->pt_pt[i], pt->pt_dir[i]);
}
MYASSERT(pt->pt_dir[i] & I386_VM_PRESENT);
} else {
MYASSERT(!(pt->pt_dir[i] & I386_VM_PRESENT));
}
}
}
#else
#define PT_SANE(p)
#endif
/*===========================================================================*
@ -262,10 +273,7 @@ PRIVATE void *vm_checkspares(void)
}
if(worst < n) worst = n;
total += n;
#if 0
if(n > 0)
printf("VM: made %d spares, total %d, worst %d\n", n, total, worst);
#endif
return NULL;
}
@ -293,7 +301,7 @@ PUBLIC void *vm_allocpages(phys_bytes *phys, int pages, int reason)
vm_assert(level >= 1);
vm_assert(level <= 2);
if(level > 1 || !(vmp->vm_flags & VMF_HASPT)) {
if(level > 1 || !(vmp->vm_flags & VMF_HASPT) || !meminit_done) {
int r;
void *s;
vm_assert(pages == 1);
@ -347,7 +355,7 @@ PRIVATE int pt_ptalloc(pt_t *pt, int pde, u32_t flags)
/* Argument must make sense. */
vm_assert(pde >= 0 && pde < I386_VM_DIR_ENTRIES);
vm_assert(!(flags & ~(PTF_ALLFLAGS | PTF_MAPALLOC)));
vm_assert(!(flags & ~(PTF_ALLFLAGS)));
/* We don't expect to overwrite page directory entry, nor
* storage for the page table.
@ -385,10 +393,9 @@ PUBLIC int pt_writemap(pt_t *pt, vir_bytes v, phys_bytes physaddr,
/* Page directory and table entries for this virtual address. */
int p, pages, pde;
int finalpde;
SANITYCHECK(SCL_FUNCTIONS);
vm_assert(!(bytes % I386_PAGE_SIZE));
vm_assert(!(flags & ~(PTF_ALLFLAGS | PTF_MAPALLOC)));
vm_assert(!(flags & ~(PTF_ALLFLAGS)));
pages = bytes / I386_PAGE_SIZE;
@ -417,6 +424,8 @@ PUBLIC int pt_writemap(pt_t *pt, vir_bytes v, phys_bytes physaddr,
for(pde = I386_VM_PDE(v); pde <= finalpde; pde++) {
vm_assert(pde >= 0 && pde < I386_VM_DIR_ENTRIES);
if(pt->pt_dir[pde] & I386_VM_BIGPAGE) {
printf("pt_writemap: trying to write 0x%lx into 0x%lx\n",
physaddr, v);
vm_panic("pt_writemap: BIGPAGE found", NO_NUM);
}
if(!(pt->pt_dir[pde] & I386_VM_PRESENT)) {
@ -470,7 +479,7 @@ PUBLIC int pt_writemap(pt_t *pt, vir_bytes v, phys_bytes physaddr,
v += I386_PAGE_SIZE;
PT_SANE(pt);
}
SANITYCHECK(SCL_FUNCTIONS);
PT_SANE(pt);
return OK;
@ -488,7 +497,14 @@ PUBLIC int pt_new(pt_t *pt)
*/
int i;
if(!(pt->pt_dir = vm_allocpages(&pt->pt_dir_phys, 1, VMP_PAGEDIR))) {
/* Don't ever re-allocate/re-move a certain process slot's
* page directory once it's been created. This is a fraction
* faster, but also avoids having to invalidate the page
* mappings from in-kernel page tables pointing to
* the page directories (the page_directories data).
*/
if(!pt->pt_dir &&
!(pt->pt_dir = vm_allocpages(&pt->pt_dir_phys, 1, VMP_PAGEDIR))) {
return ENOMEM;
}
@ -500,10 +516,14 @@ PUBLIC int pt_new(pt_t *pt)
/* Where to start looking for free virtual address space? */
pt->pt_virtop = 0;
PT_SANE(pt);
/* Map in kernel. */
if(pt_mapkernel(pt) != OK)
vm_panic("pt_new: pt_mapkernel failed", NO_NUM);
PT_SANE(pt);
return OK;
}
@ -520,13 +540,14 @@ PUBLIC void pt_init(void)
*/
pt_t *newpt;
int s, r;
vir_bytes v;
vir_bytes v, kpagedir;
phys_bytes lo, hi;
vir_bytes extra_clicks;
u32_t moveup = 0;
global_bit_ok = _cpufeature(_CPUF_I386_PGE);
bigpage_ok = _cpufeature(_CPUF_I386_PSE);
int global_bit_ok = 0;
int free_pde;
int p;
vir_bytes kernlimit;
/* Shorthand. */
newpt = &vmp->vm_pt;
@ -541,12 +562,37 @@ PUBLIC void pt_init(void)
}
missing_spares = 0;
/* Make new page table for ourselves, partly copied
* from the current one.
*/
if(pt_new(newpt) != OK)
vm_panic("pt_init: pt_new failed", NO_NUM);
/* global bit and 4MB pages available? */
global_bit_ok = _cpufeature(_CPUF_I386_PGE);
bigpage_ok = _cpufeature(_CPUF_I386_PSE);
/* Set bit for PTE's and PDE's if available. */
if(global_bit_ok)
global_bit = I386_VM_GLOBAL;
/* Figure out kernel pde slot. */
{
int pde1, pde2;
pde1 = I386_VM_PDE(KERNEL_TEXT);
pde2 = I386_VM_PDE(KERNEL_DATA+KERNEL_DATA_LEN);
if(pde1 != pde2)
vm_panic("pt_init: kernel too big", NO_NUM);
/* Map in kernel with this single pde value if 4MB pages
* supported.
*/
kern_pde_val = (KERNEL_TEXT & I386_VM_ADDR_MASK_4MB) |
I386_VM_BIGPAGE|
I386_VM_USER|
I386_VM_PRESENT|I386_VM_WRITE|global_bit;
kernel_pde = pde1;
vm_assert(kernel_pde >= 0);
free_pde = kernel_pde+1;
}
/* First unused pde. */
proc_pde = free_pde;
/* Initial (current) range of our virtual address space. */
lo = CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys);
@ -562,21 +608,27 @@ PUBLIC void pt_init(void)
vm_assert(!(lo % I386_PAGE_SIZE));
vm_assert(!(moveup % I386_PAGE_SIZE));
}
/* Make new page table for ourselves, partly copied
* from the current one.
*/
if(pt_new(newpt) != OK)
vm_panic("pt_init: pt_new failed", NO_NUM);
/* Old position mapped in? */
pt_check(vmp);
/* Set up mappings for VM process. */
for(v = lo; v < hi; v += I386_PAGE_SIZE) {
phys_bytes addr;
u32_t flags;
/* We have to write the old and new position in the PT,
/* We have to write the new position in the PT,
* so we can move our segments.
*/
if(pt_writemap(newpt, v+moveup, v, I386_PAGE_SIZE,
I386_VM_PRESENT|I386_VM_WRITE|I386_VM_USER, 0) != OK)
vm_panic("pt_init: pt_writemap failed", NO_NUM);
if(pt_writemap(newpt, v, v, I386_PAGE_SIZE,
I386_VM_PRESENT|I386_VM_WRITE|I386_VM_USER, 0) != OK)
vm_panic("pt_init: pt_writemap failed", NO_NUM);
}
/* Move segments up too. */
@ -584,21 +636,14 @@ PUBLIC void pt_init(void)
vmp->vm_arch.vm_seg[D].mem_phys += ABS2CLICK(moveup);
vmp->vm_arch.vm_seg[S].mem_phys += ABS2CLICK(moveup);
#if 0
/* Map in kernel. */
if(pt_mapkernel(newpt) != OK)
vm_panic("pt_init: pt_mapkernel failed", NO_NUM);
/* Allocate us a page table in which to remember page directory
* pointers.
*/
if(!(page_directories = vm_allocpages(&page_directories_phys,
1, VMP_PAGETABLE)))
vm_panic("no virt addr for vm mappings", NO_NUM);
#endif
/* Give our process the new, copied, private page table. */
pt_bind(newpt, vmp);
memset(page_directories, 0, I386_PAGE_SIZE);
/* Increase our hardware data segment to create virtual address
* space above our stack. We want to increase it to VM_DATATOP,
@ -614,19 +659,6 @@ PUBLIC void pt_init(void)
(vmp->vm_arch.vm_seg[S].mem_vir +
vmp->vm_arch.vm_seg[S].mem_len) << CLICK_SHIFT;
if((s=sys_newmap(VM_PROC_NR, vmp->vm_arch.vm_seg)) != OK)
vm_panic("VM: pt_init: sys_newmap failed", s);
/* Back to reality - this is where the stack actually is. */
vmp->vm_arch.vm_seg[S].mem_len -= extra_clicks;
/* Wipe old mappings from VM. */
for(v = lo; v < hi; v += I386_PAGE_SIZE) {
if(pt_writemap(newpt, v, MAP_NONE, I386_PAGE_SIZE,
0, WMF_OVERWRITE) != OK)
vm_panic("pt_init: pt_writemap failed", NO_NUM);
}
/* Where our free virtual address space starts.
* This is only a hint to the VM system.
*/
@ -635,17 +667,49 @@ PUBLIC void pt_init(void)
/* Let other functions know VM now has a private page table. */
vmp->vm_flags |= VMF_HASPT;
/* Reserve a page in our virtual address space that we
* can use to map in arbitrary physical pages.
*/
varmap_loc = findhole(newpt, I386_PAGE_SIZE,
arch_vir2map(vmp, vmp->vm_stacktop),
vmp->vm_arch.vm_data_top);
if(varmap_loc == NO_MEM) {
vm_panic("no virt addr for vm mappings", NO_NUM);
}
varmap = (unsigned char *) arch_map2vir(vmp, varmap_loc);
/* Find a PDE below processes available for mapping in the
* page directories (readonly).
*/
pagedir_pde = free_pde++;
pagedir_pde_val = (page_directories_phys & I386_VM_ADDR_MASK) |
I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE;
/* Tell kernel about free pde's. */
while(free_pde*I386_BIG_PAGE_SIZE < VM_PROCSTART) {
if((r=sys_vmctl(SELF, VMCTL_I386_FREEPDE, free_pde++)) != OK) {
vm_panic("VMCTL_I386_FREEPDE failed", r);
}
}
/* first pde in use by process. */
proc_pde = free_pde;
kernlimit = free_pde*I386_BIG_PAGE_SIZE;
/* Increase kernel segment to address this memory. */
if((r=sys_vmctl(SELF, VMCTL_I386_KERNELLIMIT, kernlimit)) != OK) {
vm_panic("VMCTL_I386_KERNELLIMIT failed", r);
}
kpagedir = arch_map2vir(&vmproc[VMP_SYSTEM],
pagedir_pde*I386_BIG_PAGE_SIZE);
/* Tell kernel how to get at the page directories. */
if((r=sys_vmctl(SELF, VMCTL_I386_PAGEDIRS, kpagedir)) != OK) {
vm_panic("VMCTL_I386_KERNELLIMIT failed", r);
}
/* Give our process the new, copied, private page table. */
pt_mapkernel(newpt); /* didn't know about vm_dir pages earlier */
pt_bind(newpt, vmp);
/* Now actually enable paging. */
if(sys_vmctl_enable_paging(vmp->vm_arch.vm_seg) != OK)
vm_panic("pt_init: enable paging failed", NO_NUM);
/* Back to reality - this is where the stack actually is. */
vmp->vm_arch.vm_seg[S].mem_len -= extra_clicks;
/* All OK. */
return;
}
@ -656,7 +720,8 @@ PUBLIC void pt_init(void)
*===========================================================================*/
PUBLIC int pt_bind(pt_t *pt, struct vmproc *who)
{
int slot;
int slot, ispt;
u32_t phys;
/* Basic sanity checks. */
vm_assert(who);
@ -664,16 +729,20 @@ PUBLIC int pt_bind(pt_t *pt, struct vmproc *who)
if(pt) PT_SANE(pt);
vm_assert(pt);
#if 0
slot = who->vm_slot;
vm_assert(slot >= 0);
vm_assert(slot < ELEMENTS(vmproc));
vm_assert(!(pt->pt_dir_phys & ~I386_VM_ADDR_MASK));
vm_assert(slot < I386_VM_PT_ENTRIES);
page_directories[slot] = (pt->pt_dir_phys & I386_VM_ADDR_MASK) |
(I386_VM_PRESENT|I386_VM_WRITE);
phys = pt->pt_dir_phys & I386_VM_ADDR_MASK;
vm_assert(pt->pt_dir_phys == phys);
/* Update "page directory pagetable." */
page_directories[slot] = phys | I386_VM_PRESENT|I386_VM_WRITE;
#if 0
printf("VM: slot %d has pde val 0x%lx\n", slot, page_directories[slot]);
#endif
/* Tell kernel about new page table root. */
return sys_vmctl(who->vm_endpoint, VMCTL_I386_SETCR3,
pt ? pt->pt_dir_phys : 0);
@ -689,22 +758,14 @@ PUBLIC void pt_free(pt_t *pt)
PT_SANE(pt);
for(i = 0; i < I386_VM_DIR_ENTRIES; i++) {
int p;
if(pt->pt_pt[i]) {
for(p = 0; p < I386_VM_PT_ENTRIES; p++) {
if((pt->pt_pt[i][p] & (PTF_MAPALLOC | I386_VM_PRESENT))
== (PTF_MAPALLOC | I386_VM_PRESENT)) {
u32_t pa = I386_VM_PFA(pt->pt_pt[i][p]);
FREE_MEM(ABS2CLICK(pa), CLICKSPERPAGE);
}
}
vm_freepages((vir_bytes) pt->pt_pt[i],
I386_VM_PFA(pt->pt_dir[i]), 1, VMP_PAGETABLE);
}
}
for(i = 0; i < I386_VM_DIR_ENTRIES; i++)
if(pt->pt_pt[i])
vm_freepages((vir_bytes) pt->pt_pt[i],
I386_VM_PFA(pt->pt_dir[i]), 1, VMP_PAGETABLE);
#if 0
vm_freepages((vir_bytes) pt->pt_dir, pt->pt_dir_phys, 1, VMP_PAGEDIR);
#endif
return;
}
@ -715,77 +776,51 @@ PUBLIC void pt_free(pt_t *pt)
PUBLIC int pt_mapkernel(pt_t *pt)
{
int r;
static int pde = -1, do_bigpage = 0;
u32_t global = 0;
static u32_t kern_phys;
static int printed = 0;
if(global_bit_ok) global = I386_VM_GLOBAL;
/* Any i386 page table needs to map in the kernel address space. */
vm_assert(vmproc[VMP_SYSTEM].vm_flags & VMF_INUSE);
if(pde == -1 && bigpage_ok) {
int pde1, pde2;
pde1 = I386_VM_PDE(KERNEL_TEXT);
pde2 = I386_VM_PDE(KERNEL_DATA+KERNEL_DATA_LEN);
if(pde1 != pde2) {
printf("VM: pt_mapkernel: kernel too big?");
bigpage_ok = 0;
} else {
kern_phys = KERNEL_TEXT & I386_VM_ADDR_MASK_4MB;
pde = pde1;
do_bigpage = 1;
vm_assert(pde >= 0);
}
}
if(do_bigpage) {
pt->pt_dir[pde] = kern_phys |
I386_VM_BIGPAGE|I386_VM_PRESENT|I386_VM_WRITE|global;
if(bigpage_ok) {
if(kernel_pde >= 0) {
pt->pt_dir[kernel_pde] = kern_pde_val;
} else
vm_panic("VM: pt_mapkernel: no kernel pde", NO_NUM);
} else {
vm_panic("VM: pt_mapkernel: no bigpage", NO_NUM);
/* Map in text. flags: don't write, supervisor only */
if((r=pt_writemap(pt, KERNEL_TEXT, KERNEL_TEXT, KERNEL_TEXT_LEN,
I386_VM_PRESENT|global, 0)) != OK)
I386_VM_PRESENT|global_bit, 0)) != OK)
return r;
/* Map in data. flags: read-write, supervisor only */
if((r=pt_writemap(pt, KERNEL_DATA, KERNEL_DATA, KERNEL_DATA_LEN,
I386_VM_PRESENT|I386_VM_WRITE|global, 0)) != OK)
I386_VM_PRESENT|I386_VM_WRITE, 0)) != OK)
return r;
}
if(pagedir_pde >= 0) {
/* Kernel also wants to know about all page directories. */
pt->pt_dir[pagedir_pde] = pagedir_pde_val;
}
return OK;
}
/*===========================================================================*
* pt_freerange *
* pt_check *
*===========================================================================*/
PUBLIC void pt_freerange(pt_t *pt, vir_bytes low, vir_bytes high)
PUBLIC void pt_check(struct vmproc *vmp)
{
/* Free memory allocated by pagetable functions in this range. */
int pde;
u32_t v;
PT_SANE(pt);
for(v = low; v < high; v += I386_PAGE_SIZE) {
int pte;
pde = I386_VM_PDE(v);
pte = I386_VM_PTE(v);
if(!(pt->pt_dir[pde] & I386_VM_PRESENT))
continue;
if((pt->pt_pt[pde][pte] & (PTF_MAPALLOC | I386_VM_PRESENT))
== (PTF_MAPALLOC | I386_VM_PRESENT)) {
u32_t pa = I386_VM_PFA(pt->pt_pt[pde][pte]);
FREE_MEM(ABS2CLICK(pa), CLICKSPERPAGE);
pt->pt_pt[pde][pte] = 0;
}
phys_bytes hi;
hi = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_phys +
vmp->vm_arch.vm_seg[S].mem_len);
if(hi >= (kernel_pde+1) * I386_BIG_PAGE_SIZE) {
printf("VM: %d doesn't fit in kernel range\n",
vmp->vm_endpoint, hi);
vm_panic("boot time processes too big", NO_NUM);
}
PT_SANE(pt);
return;
}
/*===========================================================================*
@ -796,82 +831,3 @@ PUBLIC void pt_cycle(void)
vm_checkspares();
}
/* In sanity check mode, pages are mapped and unmapped explicitly, so
* unexpected double mappings (overwriting a page table entry) are caught.
* If not sanity checking, simply keep the page mapped in and overwrite
* the mapping entry; we need WMF_OVERWRITE for that in PHYS_MAP though.
*/
#if SANITYCHECKS
#define MAPFLAGS 0
#else
#define MAPFLAGS WMF_OVERWRITE
#endif
static u32_t ismapped = MAP_NONE;
#define PHYS_MAP(a, o) \
{ int r; \
u32_t wantmapped; \
vm_assert(varmap); \
(o) = (a) % I386_PAGE_SIZE; \
wantmapped = (a) - (o); \
if(wantmapped != ismapped || ismapped == MAP_NONE) { \
r = pt_writemap(&vmp->vm_pt, (vir_bytes) varmap_loc, \
wantmapped, I386_PAGE_SIZE, \
I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE, \
MAPFLAGS); \
if(r != OK) \
vm_panic("PHYS_MAP: pt_writemap", NO_NUM); \
ismapped = wantmapped; \
/* pt_bind() flushes TLB. */ \
pt_bind(&vmp->vm_pt, vmp); \
} \
}
#define PHYSMAGIC 0x7b9a0590
#if SANITYCHECKS
#define PHYS_UNMAP if(OK != pt_writemap(&vmp->vm_pt, varmap_loc, MAP_NONE,\
I386_PAGE_SIZE, 0, WMF_OVERWRITE)) { \
vm_panic("PHYS_UNMAP: pt_writemap failed", NO_NUM); }
ismapped = MAP_NONE;
#endif
#define PHYS_VAL(o) (* (phys_bytes *) (varmap + (o)))
/*===========================================================================*
* phys_writeaddr *
*===========================================================================*/
PUBLIC void phys_writeaddr(phys_bytes addr, phys_bytes v1, phys_bytes v2)
{
phys_bytes offset;
SANITYCHECK(SCL_DETAIL);
PHYS_MAP(addr, offset);
PHYS_VAL(offset) = v1;
PHYS_VAL(offset + sizeof(phys_bytes)) = v2;
#if SANITYCHECKS
PHYS_VAL(offset + 2*sizeof(phys_bytes)) = PHYSMAGIC;
PHYS_UNMAP;
#endif
SANITYCHECK(SCL_DETAIL);
}
/*===========================================================================*
* phys_readaddr *
*===========================================================================*/
PUBLIC void phys_readaddr(phys_bytes addr, phys_bytes *v1, phys_bytes *v2)
{
phys_bytes offset;
SANITYCHECK(SCL_DETAIL);
PHYS_MAP(addr, offset);
*v1 = PHYS_VAL(offset);
*v2 = PHYS_VAL(offset + sizeof(phys_bytes));
#if SANITYCHECKS
vm_assert(PHYS_VAL(offset + 2*sizeof(phys_bytes)) == PHYSMAGIC);
PHYS_UNMAP;
#endif
SANITYCHECK(SCL_DETAIL);
}

View File

@ -5,6 +5,8 @@
#include <stdint.h>
#include <sys/vm_i386.h>
#include "../vm.h"
/* An i386 pagetable. */
typedef struct {
/* Directory entries in VM addr space - root of page table. */
@ -34,5 +36,12 @@ typedef struct {
*/
#define PTF_ALLFLAGS (PTF_WRITE|PTF_PRESENT|PTF_USER|PTF_GLOBAL)
#if SANITYCHECKS
#define PT_SANE(p) { pt_sanitycheck((p), __FILE__, __LINE__); }
#else
#define PT_SANE(p)
#endif
#endif

View File

@ -25,83 +25,18 @@
#include "memory.h"
#define PAGE_SIZE 4096
#define PAGE_DIR_SIZE (1024*PAGE_SIZE)
#define PAGE_TABLE_COVER (1024*PAGE_SIZE)
/*=========================================================================*
* arch_init_vm *
*=========================================================================*/
PUBLIC void arch_init_vm(mem_chunks)
struct memory mem_chunks[NR_MEMS];
{
phys_bytes high, bytes;
phys_clicks clicks, base_click;
unsigned pages;
int i, r;
/* Compute the highest memory location */
high= 0;
for (i= 0; i<NR_MEMS; i++)
{
if (mem_chunks[i].size == 0)
continue;
if (mem_chunks[i].base + mem_chunks[i].size > high)
high= mem_chunks[i].base + mem_chunks[i].size;
}
high <<= CLICK_SHIFT;
#if VERBOSE_VM
printf("do_x86_vm: found high 0x%x\n", high);
#endif
/* Rounding up */
high= (high-1+PAGE_DIR_SIZE) & ~(PAGE_DIR_SIZE-1);
/* The number of pages we need is one for the page directory, enough
* page tables to cover the memory, and one page for alignement.
*/
pages= 1 + (high + PAGE_TABLE_COVER-1)/PAGE_TABLE_COVER + 1;
bytes= pages*PAGE_SIZE;
clicks= (bytes + CLICK_SIZE-1) >> CLICK_SHIFT;
#if VERBOSE_VM
printf("do_x86_vm: need %d pages\n", pages);
printf("do_x86_vm: need %d bytes\n", bytes);
printf("do_x86_vm: need %d clicks\n", clicks);
#endif
for (i= 0; i<NR_MEMS; i++)
{
if (mem_chunks[i].size <= clicks)
continue;
break;
}
if (i >= NR_MEMS)
panic("VM", "not enough memory for VM page tables?", NO_NUM);
base_click= mem_chunks[i].base;
mem_chunks[i].base += clicks;
mem_chunks[i].size -= clicks;
#if VERBOSE_VM
printf("do_x86_vm: using 0x%x clicks @ 0x%x\n", clicks, base_click);
#endif
r= sys_vm_setbuf(base_click << CLICK_SHIFT, clicks << CLICK_SHIFT,
high);
if (r != 0)
printf("do_x86_vm: sys_vm_setbuf failed: %d\n", r);
}
/*===========================================================================*
* arch_map2vir *
*===========================================================================*/
PUBLIC vir_bytes arch_map2vir(struct vmproc *vmp, vir_bytes addr)
{
vir_bytes bottom = CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys);
vir_bytes textstart = CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys);
vir_bytes datastart = CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys);
vm_assert(bottom <= addr);
/* Could be a text address. */
vm_assert(datastart <= addr || textstart <= addr);
return addr - bottom;
return addr - datastart;
}
/*===========================================================================*

View File

@ -77,6 +77,7 @@ PUBLIC int main(void)
#if SANITYCHECKS
nocheck = 0;
FIXME("VM SANITYCHECKS are on");
memcpy(data1, CHECKADDR, sizeof(data1));
#endif
SANITYCHECK(SCL_TOP);
@ -114,21 +115,18 @@ PUBLIC int main(void)
switch(msg.m_source) {
case SYSTEM:
/* Kernel wants to have memory ranges
* verified.
* verified, and/or pagefaults handled.
*/
do_memory();
break;
case HARDWARE:
do_pagefaults();
break;
case PM_PROC_NR:
/* PM sends a notify() on shutdown, which
* is OK and we ignore.
*/
break;
case HARDWARE:
/* This indicates a page fault has happened,
* which we have to handle.
*/
do_pagefaults();
break;
default:
/* No-one else should send us notifies. */
printf("VM: ignoring notify() from %d\n",
@ -177,6 +175,7 @@ PUBLIC int main(void)
PRIVATE void vm_init(void)
{
int s, i;
int click, clicksforgotten = 0;
struct memory mem_chunks[NR_MEMS];
struct boot_image image[NR_BOOT_PROCS];
struct boot_image *ip;
@ -241,37 +240,12 @@ PRIVATE void vm_init(void)
vmp->vm_flags |= VMF_SEPARATE;
}
/* Let architecture-dependent VM initialization use some memory. */
arch_init_vm(mem_chunks);
/* Architecture-dependent initialization. */
pt_init();
/* Initialize tables to all physical memory. */
mem_init(mem_chunks);
/* Bits of code need to know where a process can
* start in a pagetable.
*/
kernel_top_bytes = find_kernel_top();
/* Can first kernel pages of code and data be (left) mapped out?
* If so, change the SYSTEM process' memory map to reflect this
* (future mappings of SYSTEM into other processes will not include
* first pages), and free the first pages.
*/
if(vm_paged && sys_vmctl(SELF, VMCTL_NOPAGEZERO, 0) == OK) {
struct vmproc *vmp;
vmp = &vmproc[VMP_SYSTEM];
if(vmp->vm_arch.vm_seg[T].mem_len > 0) {
#define DIFF CLICKSPERPAGE
vmp->vm_arch.vm_seg[T].mem_phys += DIFF;
vmp->vm_arch.vm_seg[T].mem_len -= DIFF;
}
vmp->vm_arch.vm_seg[D].mem_phys += DIFF;
vmp->vm_arch.vm_seg[D].mem_len -= DIFF;
}
meminit_done = 1;
/* Give these processes their own page table. */
for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
@ -283,14 +257,22 @@ PRIVATE void vm_init(void)
GETVMP(vmp, ip->proc_nr);
if(!(ip->flags & PROC_FULLVM)) {
/* See if this process fits in kernel
* mapping. VM has its own pagetable,
* don't check it.
*/
if(!(vmp->vm_flags & VMF_HASPT)) {
pt_check(vmp);
}
continue;
}
old_stack =
vmp->vm_arch.vm_seg[S].mem_vir +
vmp->vm_arch.vm_seg[S].mem_len -
vmp->vm_arch.vm_seg[D].mem_len;
if(!(ip->flags & PROC_FULLVM))
continue;
if(pt_new(&vmp->vm_pt) != OK)
vm_panic("vm_init: no new pagetable", NO_NUM);
#define BASICSTACK VM_PAGE_SIZE
@ -370,11 +352,3 @@ PRIVATE void vm_init(void)
vm_panic("kernel loaded too high", NO_NUM);
}
#if 0
void kputc(int c)
{
if(c == '\n')
ser_putc('\r');
ser_putc(c);
}
#endif

View File

@ -47,10 +47,6 @@ PUBLIC int do_mmap(message *m)
vmp = &vmproc[n];
if(m->VMM_FLAGS & MAP_LOWER16M)
printf("VM: warning for %d: MAP_LOWER16M not implemented\n",
m->m_source);
if(!(vmp->vm_flags & VMF_HASPT))
return ENXIO;
@ -66,14 +62,15 @@ PUBLIC int do_mmap(message *m)
if(m->VMM_FLAGS & MAP_CONTIG) mfflags |= MF_CONTIG;
if(m->VMM_FLAGS & MAP_PREALLOC) mfflags |= MF_PREALLOC;
if(m->VMM_FLAGS & MAP_LOWER16M) vrflags |= VR_LOWER16MB;
if(m->VMM_FLAGS & MAP_ALIGN64K) vrflags |= VR_PHYS64K;
if(len % VM_PAGE_SIZE)
len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE);
if(!(vr = map_page_region(vmp,
arch_vir2map(vmp, vmp->vm_stacktop), VM_DATATOP, len, MAP_NONE,
vrflags, mfflags))) {
arch_vir2map(vmp, vmp->vm_stacktop),
VM_DATATOP, len, MAP_NONE, vrflags, mfflags))) {
return ENOMEM;
}
} else {
@ -84,6 +81,7 @@ PUBLIC int do_mmap(message *m)
vm_assert(vr);
m->VMM_RETADDR = arch_map2vir(vmp, vr->vaddr);
return OK;
}

View File

@ -61,12 +61,21 @@ PUBLIC void do_pagefaults(void)
vir_bytes offset;
int p, wr = PFERR_WRITE(err);
#if 0
printf("VM: pagefault: ep %d 0x%lx %s\n",
ep, arch_map2vir(vmp, addr), pf_errstr(err));
#endif
if(vm_isokendpt(ep, &p) != OK)
vm_panic("do_pagefaults: endpoint wrong", ep);
vmp = &vmproc[p];
vm_assert(vmp->vm_flags & VMF_INUSE);
#if 0
map_printmap(vmp);
#endif
/* See if address is valid at all. */
if(!(region = map_lookup(vmp, addr))) {
vm_assert(PFERR_NOPAGE(err));
@ -104,11 +113,19 @@ PUBLIC void do_pagefaults(void)
vm_panic("sys_kill failed", s);
continue;
}
#if 0
printf("VM: map_pf done; ep %d 0x%lx %s\n",
ep, arch_map2vir(vmp, addr), pf_errstr(err));
printf("VM: handling pagefault OK: %d addr 0x%lx %s\n",
ep, arch_map2vir(vmp, addr), pf_errstr(err));
#endif
/* Pagefault is handled, so now reactivate the process. */
if((s=sys_vmctl(ep, VMCTL_CLEAR_PAGEFAULT, r)) != OK)
vm_panic("do_pagefaults: sys_vmctl failed", ep);
}
return;
@ -120,55 +137,73 @@ PUBLIC void do_pagefaults(void)
PUBLIC void do_memory(void)
{
int r, s;
endpoint_t who;
endpoint_t who, requestor;
vir_bytes mem;
vir_bytes len;
int wrflag;
while((r=sys_vmctl_get_memreq(&who, &mem, &len, &wrflag)) == OK) {
while((r=sys_vmctl_get_memreq(&who, &mem, &len, &wrflag, &requestor))
== OK) {
int p, r = OK;
struct vir_region *region;
struct vmproc *vmp;
vir_bytes o;
if(vm_isokendpt(who, &p) != OK)
vm_panic("do_memory: endpoint wrong", who);
vmp = &vmproc[p];
/* Page-align memory and length. */
o = mem % VM_PAGE_SIZE;
mem -= o;
len += o;
o = len % VM_PAGE_SIZE;
if(o > 0) len += VM_PAGE_SIZE - o;
r = handle_memory(vmp, mem, len, wrflag);
if(!(region = map_lookup(vmp, mem))) {
printf("VM: do_memory: memory doesn't exist\n");
r = EFAULT;
} else if(mem + len > region->vaddr + region->length) {
vm_assert(region->vaddr <= mem);
vm_panic("do_memory: not contained", NO_NUM);
} else if(!(region->flags & VR_WRITABLE) && wrflag) {
printf("VM: do_memory: write to unwritable map\n");
r = EFAULT;
} else {
vir_bytes offset;
vm_assert(region->vaddr <= mem);
vm_assert(!(region->flags & VR_NOPF));
vm_assert(!(region->vaddr % VM_PAGE_SIZE));
offset = mem - region->vaddr;
r = map_handle_memory(vmp, region, offset, len, wrflag);
}
if(r != OK) {
printf("VM: memory range 0x%lx-0x%lx not available in %d\n",
arch_map2vir(vmp, mem), arch_map2vir(vmp, mem+len),
vmp->vm_endpoint);
}
if(sys_vmctl(who, VMCTL_MEMREQ_REPLY, r) != OK)
if(sys_vmctl(requestor, VMCTL_MEMREQ_REPLY, r) != OK)
vm_panic("do_memory: sys_vmctl failed", r);
#if 0
printf("VM: handling memory request %d done OK\n",
who);
#endif
}
}
int handle_memory(struct vmproc *vmp, vir_bytes mem, vir_bytes len, int wrflag)
{
struct vir_region *region;
vir_bytes o;
int r;
#if 0
printf("VM: handling memory request: %d, 0x%lx-0x%lx, wr %d\n",
vmp->vm_endpoint, mem, mem+len, wrflag);
#endif
/* Page-align memory and length. */
o = mem % VM_PAGE_SIZE;
mem -= o;
len += o;
o = len % VM_PAGE_SIZE;
if(o > 0) len += VM_PAGE_SIZE - o;
if(!(region = map_lookup(vmp, mem))) {
map_printmap(vmp);
printf("VM: do_memory: memory doesn't exist\n");
r = EFAULT;
} else if(mem + len > region->vaddr + region->length) {
vm_assert(region->vaddr <= mem);
vm_panic("do_memory: not contained", NO_NUM);
} else if(!(region->flags & VR_WRITABLE) && wrflag) {
printf("VM: do_memory: write to unwritable map\n");
r = EFAULT;
} else {
vir_bytes offset;
vm_assert(region->vaddr <= mem);
vm_assert(!(region->flags & VR_NOPF));
vm_assert(!(region->vaddr % VM_PAGE_SIZE));
offset = mem - region->vaddr;
r = map_handle_memory(vmp, region, offset, len, wrflag);
}
if(r != OK) {
printf("VM: memory range 0x%lx-0x%lx not available in %d\n",
arch_map2vir(vmp, mem), arch_map2vir(vmp, mem+len),
vmp->vm_endpoint);
}
}

24
servers/vm/pagerange.h Normal file
View File

@ -0,0 +1,24 @@
#include <minix/callnr.h>
#include <minix/com.h>
#include <minix/config.h>
#include <minix/const.h>
#include <minix/ds.h>
#include <minix/endpoint.h>
#include <minix/keymap.h>
#include <minix/minlib.h>
#include <minix/type.h>
#include <minix/ipc.h>
#include <minix/sysutil.h>
#include <minix/syslib.h>
#include <minix/const.h>
typedef struct pagerange {
phys_bytes addr; /* in pages */
phys_bytes size; /* in pages */
/* AVL fields */
struct pagerange *less, *greater; /* children */
int factor; /* AVL balance factor */
} pagerange_t;

View File

@ -81,9 +81,12 @@ _PROTOTYPE(int do_unmap_phys, (message *msg) );
_PROTOTYPE( void do_pagefaults, (void) );
_PROTOTYPE( void do_memory, (void) );
_PROTOTYPE( char *pf_errstr, (u32_t err));
_PROTOTYPE( int handle_memory, (struct vmproc *vmp, vir_bytes mem,
vir_bytes len, int wrflag));
/* $(ARCH)/pagetable.c */
_PROTOTYPE( void pt_init, (void) );
_PROTOTYPE( void pt_check, (struct vmproc *vmp) );
_PROTOTYPE( int pt_new, (pt_t *pt) );
_PROTOTYPE( void pt_free, (pt_t *pt) );
_PROTOTYPE( void pt_freerange, (pt_t *pt, vir_bytes lo, vir_bytes hi) );
@ -93,8 +96,6 @@ _PROTOTYPE( int pt_bind, (pt_t *pt, struct vmproc *who) );
_PROTOTYPE( void *vm_allocpages, (phys_bytes *p, int pages, int cat));
_PROTOTYPE( void pt_cycle, (void));
_PROTOTYPE( int pt_mapkernel, (pt_t *pt));
_PROTOTYPE( void phys_readaddr, (phys_bytes addr, phys_bytes *v1, phys_bytes *v2));
_PROTOTYPE( void phys_writeaddr, (phys_bytes addr, phys_bytes v1, phys_bytes v2));
#if SANITYCHECKS
_PROTOTYPE( void pt_sanitycheck, (pt_t *pt, char *file, int line) );
#endif
@ -135,6 +136,7 @@ _PROTOTYPE(int map_pf,(struct vmproc *vmp,
struct vir_region *region, vir_bytes offset, int write));
_PROTOTYPE(int map_handle_memory,(struct vmproc *vmp,
struct vir_region *region, vir_bytes offset, vir_bytes len, int write));
_PROTOTYPE(void map_printmap, (struct vmproc *vmp));
_PROTOTYPE(struct vir_region * map_region_lookup_tag, (struct vmproc *vmp, u32_t tag));
_PROTOTYPE(void map_region_set_tag, (struct vir_region *vr, u32_t tag));

View File

@ -32,8 +32,6 @@ FORWARD _PROTOTYPE(int map_new_physblock, (struct vmproc *vmp,
FORWARD _PROTOTYPE(int map_copy_ph_block, (struct vmproc *vmp, struct vir_region *region, struct phys_region *ph));
FORWARD _PROTOTYPE(struct vir_region *map_copy_region, (struct vir_region *));
FORWARD _PROTOTYPE(void map_printmap, (struct vmproc *vmp));
PRIVATE char *map_name(struct vir_region *vr)
{
int type = vr->flags & (VR_ANON|VR_DIRECT);
@ -52,7 +50,7 @@ PRIVATE char *map_name(struct vir_region *vr)
/*===========================================================================*
* map_printmap *
*===========================================================================*/
PRIVATE void map_printmap(vmp)
PUBLIC void map_printmap(vmp)
struct vmproc *vmp;
{
struct vir_region *vr;
@ -60,14 +58,15 @@ struct vmproc *vmp;
for(vr = vmp->vm_regions; vr; vr = vr->next) {
struct phys_region *ph;
int nph = 0;
printf("map_printmap: map_name: %s\n", map_name(vr));
printf("\t0x%lx - 0x%lx (len 0x%lx), %s\n",
vr->vaddr, vr->vaddr + vr->length, vr->length,
vr->vaddr + vr->length, map_name(vr));
map_name(vr));
printf("\t\tphysical: ");
for(ph = vr->first; ph; ph = ph->next) {
printf("0x%lx-0x%lx (refs %d): phys 0x%lx ",
vr->vaddr + ph->ph->offset,
vr->vaddr + ph->ph->offset + ph->ph->length,
vr->vaddr + ph->offset,
vr->vaddr + ph->offset + ph->ph->length,
ph->ph->refcount,
ph->ph->phys);
nph++;
@ -123,8 +122,8 @@ PUBLIC void map_sanitycheck(char *file, int line)
map_printmap(vmp);
printf("ph in vr 0x%lx: 0x%lx-0x%lx refcount %d "
"but seencount %lu\n",
vr, pr->ph->offset,
pr->ph->offset + pr->ph->length,
vr, pr->offset,
pr->offset + pr->ph->length,
pr->ph->refcount, pr->ph->seencount);
}
{
@ -147,7 +146,7 @@ PUBLIC void map_sanitycheck(char *file, int line)
MYASSERT(pr->ph->refcount == n_others);
}
MYASSERT(pr->ph->refcount == pr->ph->seencount);
MYASSERT(!(pr->ph->offset % VM_PAGE_SIZE));
MYASSERT(!(pr->offset % VM_PAGE_SIZE));
MYASSERT(!(pr->ph->length % VM_PAGE_SIZE)););
}
#endif
@ -156,14 +155,15 @@ PUBLIC void map_sanitycheck(char *file, int line)
/*=========================================================================*
* map_ph_writept *
*=========================================================================*/
PUBLIC int map_ph_writept(struct vmproc *vmp, struct vir_region *vr,
struct phys_block *pb, int *ropages, int *rwpages)
PRIVATE int map_ph_writept(struct vmproc *vmp, struct vir_region *vr,
struct phys_region *pr, int *ropages, int *rwpages)
{
int rw;
struct phys_block *pb = pr->ph;
vm_assert(!(vr->vaddr % VM_PAGE_SIZE));
vm_assert(!(pb->length % VM_PAGE_SIZE));
vm_assert(!(pb->offset % VM_PAGE_SIZE));
vm_assert(!(pr->offset % VM_PAGE_SIZE));
vm_assert(pb->refcount > 0);
if((vr->flags & VR_WRITABLE)
@ -183,7 +183,7 @@ PUBLIC int map_ph_writept(struct vmproc *vmp, struct vir_region *vr,
}
#endif
if(pt_writemap(&vmp->vm_pt, vr->vaddr + pb->offset,
if(pt_writemap(&vmp->vm_pt, vr->vaddr + pr->offset,
pb->phys, pb->length, PTF_PRESENT | PTF_USER | rw,
WMF_OVERWRITE) != OK) {
printf("VM: map_writept: pt_writemap failed\n");
@ -308,7 +308,7 @@ int mapflags;
}
vm_assert(newregion->first);
vm_assert(!newregion->first->next);
if(map_ph_writept(vmp, newregion, newregion->first->ph, NULL, NULL) != OK) {
if(map_ph_writept(vmp, newregion, newregion->first, NULL, NULL) != OK) {
printf("VM: map_region_writept failed\n");
SLABFREE(newregion);
return NULL;
@ -409,7 +409,8 @@ void pb_unreferenced(struct vir_region *region, struct phys_region *pr)
SLABSANE(pb->firstregion);
SLABSANE(pb->firstregion->parent);
if(map_ph_writept(pb->firstregion->parent->parent,
pb->firstregion->parent, pb, NULL, NULL) != OK) {
pb->firstregion->parent, pb->firstregion,
NULL, NULL) != OK) {
vm_panic("pb_unreferenced: writept", NO_NUM);
}
}
@ -545,6 +546,8 @@ struct phys_region *physhint;
u32_t af = PAF_CLEAR;
if(region->flags & VR_PHYS64K)
af |= PAF_ALIGN64K;
if(region->flags & VR_LOWER16MB)
af |= PAF_LOWER16MB;
if((mem_clicks = ALLOC_MEM(clicks, af)) == NO_MEM) {
SLABFREE(newpb);
SLABFREE(newphysr);
@ -559,12 +562,12 @@ struct phys_region *physhint;
/* New physical block. */
newpb->phys = mem;
newpb->refcount = 1;
newpb->offset = offset;
newpb->length = length;
newpb->firstregion = newphysr;
SLABSANE(newpb->firstregion);
/* New physical region. */
newphysr->offset = offset;
newphysr->ph = newpb;
newphysr->parent = region;
newphysr->next_ph_list = NULL; /* No other references to this block. */
@ -573,7 +576,7 @@ struct phys_region *physhint;
vm_assert(!(length % VM_PAGE_SIZE));
vm_assert(!(newpb->length % VM_PAGE_SIZE));
SANITYCHECK(SCL_DETAIL);
if(map_ph_writept(vmp, region, newpb, NULL, NULL) != OK) {
if(map_ph_writept(vmp, region, newphysr, NULL, NULL) != OK) {
if(what_mem == MAP_NONE)
FREE_MEM(mem_clicks, clicks);
SLABFREE(newpb);
@ -581,17 +584,17 @@ struct phys_region *physhint;
return ENOMEM;
}
if(!region->first || offset < region->first->ph->offset) {
if(!region->first || offset < region->first->offset) {
/* Special case: offset is before start. */
if(region->first) {
vm_assert(offset + length <= region->first->ph->offset);
vm_assert(offset + length <= region->first->offset);
}
newphysr->next = region->first;
region->first = newphysr;
} else {
struct phys_region *physr;
for(physr = physhint; physr; physr = physr->next) {
if(!physr->next || physr->next->ph->offset > offset) {
if(!physr->next || physr->next->offset > offset) {
newphysr->next = physr->next;
physr->next = newphysr;
break;
@ -649,7 +652,6 @@ struct phys_region *ph;
SLABSANE(ph->ph);
vm_assert(ph->ph->refcount > 0);
newpb->length = ph->ph->length;
newpb->offset = ph->ph->offset;
newpb->refcount = 1;
newpb->phys = newmem;
newpb->firstregion = ph;
@ -675,7 +677,7 @@ struct phys_region *ph;
/* Update pagetable with new address.
* This will also make it writable.
*/
r = map_ph_writept(vmp, region, ph->ph, NULL, NULL);
r = map_ph_writept(vmp, region, ph, NULL, NULL);
if(r != OK)
vm_panic("map_copy_ph_block: map_ph_writept failed", r);
@ -708,7 +710,7 @@ int write;
SANITYCHECK(SCL_FUNCTIONS);
for(ph = region->first; ph; ph = ph->next)
if(ph->ph->offset <= offset && offset < ph->ph->offset + ph->ph->length)
if(ph->offset <= offset && offset < ph->offset + ph->ph->length)
break;
if(ph) {
@ -718,9 +720,10 @@ int write;
vm_assert(ph->ph->refcount > 0);
if(ph->ph->refcount == 1)
r = map_ph_writept(vmp, region, ph->ph, NULL, NULL);
else
r = map_ph_writept(vmp, region, ph, NULL, NULL);
else {
r = map_copy_ph_block(vmp, region, ph);
}
} else {
/* Pagefault in non-existing block. Map in new block. */
#if 0
@ -756,8 +759,8 @@ int write;
#define FREE_RANGE_HERE(er1, er2) { \
struct phys_region *r1 = (er1), *r2 = (er2); \
vir_bytes start = offset, end = offset + length; \
if(r1) { start = MAX(start, r1->ph->offset + r1->ph->length); } \
if(r2) { end = MIN(end, r2->ph->offset); } \
if(r1) { start = MAX(start, r1->offset + r1->ph->length); } \
if(r2) { end = MIN(end, r2->offset); } \
if(start < end) { \
int r; \
SANITYCHECK(SCL_DETAIL); \
@ -797,7 +800,7 @@ int write;
SANITYCHECK(SCL_DETAIL);
} else {
SANITYCHECK(SCL_DETAIL);
if((r=map_ph_writept(vmp, region, physr->ph, NULL, NULL)) != OK) {
if((r=map_ph_writept(vmp, region, physr, NULL, NULL)) != OK) {
printf("VM: map_ph_writept failed\n");
return r;
}
@ -870,6 +873,7 @@ PRIVATE struct vir_region *map_copy_region(struct vir_region *vr)
newph->ph = ph->ph;
newph->next_ph_list = NULL;
newph->parent = newvr;
newph->offset = ph->offset;
if(prevph) prevph->next = newph;
else newvr->first = newph;
prevph = newph;
@ -895,7 +899,7 @@ PUBLIC int map_writept(struct vmproc *vmp)
for(vr = vmp->vm_regions; vr; vr = vr->next)
for(ph = vr->first; ph; ph = ph->next) {
map_ph_writept(vmp, vr, ph->ph, &ropages, &rwpages);
map_ph_writept(vmp, vr, ph, &ropages, &rwpages);
}
return OK;
@ -912,6 +916,9 @@ struct vmproc *src;
dst->vm_regions = NULL;
SANITYCHECK(SCL_FUNCTIONS);
PT_SANE(&src->vm_pt);
for(vr = src->vm_regions; vr; vr = vr->next) {
struct vir_region *newvr;
struct phys_region *orig_ph, *new_ph;
@ -958,8 +965,12 @@ struct vmproc *src;
}
SANITYCHECK(SCL_DETAIL);
PT_SANE(&src->vm_pt);
map_writept(src);
PT_SANE(&src->vm_pt);
map_writept(dst);
PT_SANE(&dst->vm_pt);
SANITYCHECK(SCL_FUNCTIONS);
return OK;

View File

@ -6,7 +6,6 @@ struct phys_block {
#if SANITYCHECKS
u32_t seencount;
#endif
vir_bytes offset; /* offset from start of vir region */
vir_bytes length; /* no. of contiguous bytes */
phys_bytes phys; /* physical memory */
u8_t refcount; /* Refcount of these pages */
@ -19,6 +18,7 @@ struct phys_region {
struct phys_region *next; /* next contiguous block */
struct phys_block *ph;
struct vir_region *parent; /* Region that owns this phys_region. */
vir_bytes offset; /* offset from start of vir region */
/* list of phys_regions that reference the same phys_block */
struct phys_region *next_ph_list;
@ -38,6 +38,7 @@ struct vir_region {
#define VR_WRITABLE 0x01 /* Process may write here. */
#define VR_NOPF 0x02 /* May not generate page faults. */
#define VR_PHYS64K 0x04 /* Physical memory must be 64k aligned. */
#define VR_LOWER16MB 0x08
/* Mapping type: */
#define VR_ANON 0x10 /* Memory to be cleared and allocated */

View File

@ -34,7 +34,7 @@
for(vmp = vmproc; vmp <= &vmproc[_NR_PROCS]; vmp++) { \
if((vmp->vm_flags & (VMF_INUSE | VMF_HASPT)) == \
(VMF_INUSE | VMF_HASPT)) { \
pt_sanitycheck(&vmp->vm_pt, __FILE__, __LINE__); \
PT_SANE(&vmp->vm_pt); \
} \
} \
map_sanitycheck(__FILE__, __LINE__); \

View File

@ -108,6 +108,9 @@ struct mem_map *map_ptr; /* memory to remove */
}
if (memp >= &mem_chunks[NR_MEMS])
{
printf("VM: looking for memory at 0x%x, length 0x%x\n",
CLICK2ABS(map_ptr[T].mem_phys),
CLICK2ABS(map_ptr[T].mem_len));
vm_panic("reserve_proc_mem: can't find map in mem_chunks ",
map_ptr[T].mem_phys);
}

View File

@ -5,6 +5,7 @@
#define PAF_CLEAR 0x01 /* Clear physical memory. */
#define PAF_CONTIG 0x02 /* Physically contiguous. */
#define PAF_ALIGN64K 0x04 /* Aligned to 64k boundary. */
#define PAF_LOWER16MB 0x08
/* special value for v in pt_allocmap */
#define AM_AUTO ((u32_t) -1)
@ -14,7 +15,7 @@
/* Compile in asserts and custom sanity checks at all? */
#define SANITYCHECKS 0
#define VMSTATS 1
#define VMSTATS 0
/* If so, this level: */
#define SCL_NONE 0 /* No sanity checks - vm_assert()s only. */

View File

@ -1,8 +1,8 @@
Welcome to MINIX 3.1.3.
Welcome to MINIX 3.1.4.
This is an interim release, not final release quality. It is intended as
a prerelease for developers.
This snapshot is an interim release, not final release quality. It is
intended as a prerelease for developers.
The system is now running and many commands work normally. To use MINIX
in a serious way, you need to install it to your hard disk, which you
@ -16,7 +16,6 @@ to begin installing the many software packages available. After you
have installed the packages, type 'xdm' to start X Windows if you have
installed it.
It is strongly recommended that you print and read the 'setup.pdf' file
on the CD-ROM before staring the installation. The file is also
available at www.minix3.org/doc under the 'Installation' heading.
Before you begin the installation process, it is strongly recommended
that you print and carefully read the installation instructions
available on the MINIX 3 website: http://www.minix3.org.

View File

@ -0,0 +1,133 @@
CSSC-1.0.1
PopTart-0.9.7
Xaw3d-1.5
ackpack
antiword-0.35
apache-1.3.37
apr-0.9.12
apr-util-0.9.12
ascii-3.8
atk-1.9.0
audio-1.0.0
autoconf-2.59
autoconf-2.60
automake-1.9
avra-0.7
bash-3.0
bc-1.06
bchunk-1.2.0
bcrypt-1.1
bison-2.1
catdoc-0.94.2
cpio-2.5
ctags-5.5.4
cvs-1.11.21
dact-0.8.39
diffstat-1.39
diffutils-2.8.1
dungeon-2.7.1
ede-1.1
efltk-2.0.6
elvis
exim-4.66
fb
flawfinder-1.26
flex-2.5.4
flip-1.19
fltk-1.1.7
fltk-2.0.0-5220
gawk-3.1.4
gdbm-1.8.3
gettext-0.14
glib-1.2.10
glib-2.10.0
gnupg-1.4.8
grep-2.5.1a
groff-1.19.2
gv-3.5.8
gzip-1.2.4
gzip-beta-1.3.3
hexcat-0.0.3
irssi-0.8.10
jikes-1.22
john-1.7.2
jpeg-6b
jwm-1.8
kermit-2.1.1
lcrack-20040914
less-3.8.5
lesstif-0.93.96
libiconv-1.9.1
libmcrypt-2.5.7
libpng-1.2.8
libungif-4.1.3
libxml2-2.6.28
links-2.1pre26
lpr
lynx2-8-5
lzo-1.08
lzop-1.01
m4-1.4.3
make-3.80
mdf2iso
mtools-3.9.7
nano-1.3.12
nasm-0.98.38
nawk
ncftp-3.1.9
ncsa_httpd_1.4
ncurses-5.5
neon-0.25.5
nethack-3.4.3-2
netpbm-10.26.30
nomarch-1.3
nrg2iso-0.2
nvi-1.79
openssh-4.3p2
openssl-0.9.8a
patch-2.5.4
pce-0.1.7
pcre-4.5
pdksh-5.2.14
picasm112c
pine-4.64
pkg-config-0.20
prng-3.0.2
pscan-1.1
pstotext-1.9
psutils
python-2.4.3
rcs-5.7
readline-5.1
rman-3.2
robodoc-4.0.18
rsync-2.6.7
screen-4.0.2
sed-3.02
shhmsg-1.4.1
simhv35-2
sip-0.4
slang-1.4.9
slrn-0.9.8.1
sqlite-3.3.17
src2tex-2.12
ssmtp-2.61
subversion-1.4.0
tcl8.4.14
texinfo-4.7
tiff-3.8.2
tk8.4.14
unrtf-0.19.3
unzip-5.52
vice-1.19
vice-1.20
vile-9.4
vim-6.3
wdiff-0.5
webcpp-0.8.4
wget-1.5.3
whichman-2.4
xv-3.10a
zip-2.31
zlib-1.2.3
zsh-4.2.6

120
tools/packages.install Normal file
View File

@ -0,0 +1,120 @@
CSSC-1.0.1
ImageMagick-6.3.1
MPlayer-1.0rc1
PopTart-0.9.7
X11R6.8.2
Xaw3d-1.5
ackpack
antiword-0.35
apache-1.3.37
apr-0.9.12
apr-util-0.9.12
ascii-3.8
atk-1.9.0
audio-1.0.0
autoconf-2.59
autoconf-2.60
automake-1.9
avra-0.7
bash-3.0
bc-1.06
bchunk-1.2.0
bcrypt-1.1
binutils-2.16.1
bison-2.1
catdoc-0.94.2
cvs-1.11.21
diffutils-2.8.1
dungeon-2.7.1
ede-1.1
efltk-2.0.6
elvis
emacs-21.4
exim-4.66
fb
flawfinder-1.26
flex-2.5.4
fltk-1.1.7
fltk-2.0.0-5220
gawk-3.1.4
gcc-4.1.1-libs
gcc-4.1.1
gettext-0.14
ghostscript-8.54-gpl
ghostscript-fonts-std-8.11
glib-1.2.10
gnu-coreutils-5.2.1
gnupg-1.4.8
grep-2.5.1a
groff-1.19.2
gv-3.5.8
gzip-1.2.4
hexcat-0.0.3
irssi-0.8.10
jikes-1.22
john-1.7.2
jpeg-6b
jwm-1.8
kermit-2.1.1
lcrack-20040914
less-3.8.5
lesstif-0.93.96
libiconv-1.9.1
libpng-1.2.8
libxml2-2.6.28
links-2.1pre26
lpr
lynx2-8-5
m4-1.4.3
make-3.80
mdf2iso
mtools-3.9.7
nano-1.3.12
nawk
ncftp-3.1.9
ncurses-5.5
neon-0.25.5
nethack-3.4.3-2
nomarch-1.3
nvi-1.79
openssh-4.3p2
openssl-0.9.8a
patch-2.5.4
pce-0.1.7
pdksh-5.2.14
perl-5.8.7
php-5.2.1
pine-4.64
pkg-config-0.20
psutils
python-2.4.3
rcs-5.7
readline-5.1
rman-3.2
rsync-2.6.7
screen-4.0.2
sed-3.02
shhmsg-1.4.1
simhv35-2
sip-0.4
slang-1.4.9
slrn-0.9.8.1
sqlite-3.3.17
ssmtp-2.61
subversion-1.4.0
tcl8.4.14
texinfo-4.7
tiff-3.8.2
tk8.4.14
unrtf-0.19.3
unzip-5.52
vice-1.20
vile-9.4
vim-6.3
webcpp-0.8.4
wget-1.5.3
whichman-2.4
xv-3.10a
zip-2.31
zlib-1.2.3
zsh-4.2.6

View File

@ -3,6 +3,7 @@
set -e
XBIN=usr/xbin
BRANCHNAME=src.beng-working.r4327
SRC=src
# size of /tmp during build
@ -10,6 +11,10 @@ TMPKB=32000
PACKAGEDIR=/usr/bigports/Packages
PACKAGESOURCEDIR=/usr/bigports/Sources
# List of packages included on installation media
PACKAGELIST=packages.install
# List of package source included on installation media
PACKAGESOURCELIST=package_sources.install
secs=`expr 32 '*' 64`
export SHELL=/bin/sh
@ -205,24 +210,37 @@ cp -rp /usr/lib $RELEASEDIR/usr
cp -rp /bin/bigsh /bin/sh /bin/echo $RELEASEDIR/bin
cp -rp /usr/bin/make /usr/bin/install /usr/bin/yacc /usr/bin/flex $RELEASEDIR/usr/bin
if [ -d $PACKAGEDIR -a -d $PACKAGESOURCEDIR -a $PACKAGES -ne 0 ]
if [ -d $PACKAGEDIR -a -d $PACKAGESOURCEDIR -a -f $PACKAGELIST -a -f $PACKAGESOURCELIST -a $PACKAGES -ne 0 ]
then echo " * Indexing packages"
bintotal=0
( cd $PACKAGEDIR
for p in *.tar.bz2
( for p in `cat $PACKAGELIST`
do
p="`echo $p | sed 's/.tar.bz2//'`"
descr="../$p/.descr"
descr="$PACKAGEDIR/../$p/.descr"
if [ -f "$descr" ]
then echo "$p|`cat $descr`"
fi
done | tee List
done | tee $RELEASEPACKAGE/List
)
echo " * Transfering $PACKAGEDIR to $RELEASEPACKAGE"
cp $PACKAGEDIR/* $RELEASEPACKAGE/
for p in `cat $PACKAGELIST`
do
if [ -f $PACKAGEDIR/$p.tar.bz2 ]
then
cp $PACKAGEDIR/$p.tar.bz2 $RELEASEPACKAGE/
else
echo "Can't copy $PACKAGEDIR/$p.tar.bz2. Missing."
fi
done
echo " * Transfering $PACKAGESOURCEDIR to $RELEASEPACKAGESOURCES"
cp $PACKAGESOURCEDIR/* $RELEASEPACKAGESOURCES/ || true
for p in `cat $PACKAGESOURCELIST`
do
if [ -f $PACKAGESOURCEDIR/$p.tar.bz2 ]
then
cp $PACKAGESOURCEDIR/$p.tar.bz2 $RELEASEPACKAGESOURCES/
else
echo "Can't copy $PACKAGESOURCEDIR/$p.tar.bz2. Missing."
fi
done
fi
# Make sure compilers and libraries are bin-owned
@ -232,10 +250,10 @@ chmod -R u+w $RELEASEDIR/usr/lib
if [ "$COPY" -ne 1 ]
then
echo " * Doing new svn export"
REPO=https://gforge.cs.vu.nl/svn/minix/trunk/$SRC
REPO=https://gforge.cs.vu.nl/svn/minix/branches/$BRANCHNAME
REVISION="`svn info $USERNAME $SVNREV $REPO | grep '^Revision: ' | awk '{ print $2 }'`"
echo "Doing export of revision $REVISION from $REPO."
( cd $RELEASEDIR/usr && svn $USERNAME export -r$REVISION $REPO )
( cd $RELEASEDIR/usr && svn $USERNAME export -r$REVISION $REPO && mv $BRANCHNAME $SRC )
REVTAG=r$REVISION
echo "