Merge pull request #66 from Oichkatzelesfrettschen/kernel-refactor-foundations

Refactor: KASSERT_PLACEHOLDER to KASSERT Migration (Partial)
This commit is contained in:
Eirikr Hinngart 2025-06-06 21:11:28 -07:00 committed by GitHub
commit 57fd337d71
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
21 changed files with 323 additions and 132 deletions

View File

@ -13,6 +13,7 @@
// Added kernel headers
#include <minix/kernel_types.h>
#include <sys/kassert.h>
#include <klib/include/kprintf.h>
#include <klib/include/kstring.h>
#include <klib/include/kmemory.h>
@ -356,7 +357,7 @@ static const char *mtypename(int mtype, int *possible_callname)
if(errname) return errname;
KASSERT_PLACEHOLDER(callname); // MODIFIED
KASSERT(callname);
return callname;
}
@ -475,12 +476,12 @@ static void sortstats(void)
* and should be inserted at position 'w.'
*/
rem = PRINTSLOTS-w-1;
KASSERT_PLACEHOLDER(rem >= 0); // MODIFIED
KASSERT_PLACEHOLDER(rem < PRINTSLOTS); // MODIFIED
KASSERT(rem >= 0);
KASSERT(rem < PRINTSLOTS);
if(rem > 0) {
KASSERT_PLACEHOLDER(w+1 <= PRINTSLOTS-1); // MODIFIED
KASSERT_PLACEHOLDER(w >= 0); // MODIFIED
kmemmove(&winners[w+1], &winners[w], // MODIFIED
KASSERT(w+1 <= PRINTSLOTS-1);
KASSERT(w >= 0);
kmemmove(&winners[w+1], &winners[w],
rem*sizeof(winners[0]));
}
winners[w].src = src_slot;
@ -494,7 +495,7 @@ static void sortstats(void)
#define proc2slot(p, s) { \
if(p) { s = p->p_nr; } \
else { s = KERNELIPC; } \
KASSERT_PLACEHOLDER(s >= 0 && s < IPCPROCS); \
KASSERT(s >= 0 && s < IPCPROCS); \
}
static void statmsg(message *msg, struct proc *srcp, struct proc *dstp)
@ -503,9 +504,9 @@ static void statmsg(message *msg, struct proc *srcp, struct proc *dstp)
static int lastprint;
/* Stat message. */
KASSERT_PLACEHOLDER(src); // This assert was on 'src' which is uninitialized here. Assuming it meant srcp or similar.
KASSERT(src); // This assert was on 'src' which is uninitialized here. Assuming it meant srcp or similar.
// For now, keeping as is, but this is a bug in original code.
// If it meant to assert srcp, it would be KASSERT_PLACEHOLDER(srcp);
// If it meant to assert srcp, it would be KASSERT(srcp);
proc2slot(srcp, src);
proc2slot(dstp, dst);
messages[src][dst]++;
@ -563,7 +564,8 @@ void hook_ipc_clear(struct proc *p)
{
#if DEBUG_IPCSTATS
int slot, i;
KASSERT_PLACEHOLDER(p); // MODIFIED
KASSERT(p);
proc2slot(p, slot);
for(i = 0; i < IPCPROCS; i++)
messages[slot][i] = messages[i][slot] = 0;

View File

@ -51,4 +51,29 @@ typedef unsigned long k_vaddr_t; /* Virtual address type for x86_64 */
#error "Unsupported architecture for k_paddr_t and k_vaddr_t"
#endif
/* Atomic types (architecture might need to provide actual atomic operations) */
typedef struct {
volatile long counter;
} __attribute__((aligned(8))) k_atomic_t; /* Generic atomic type, often for counters */
typedef struct {
volatile long counter; // Assuming long is suitable for most atomic ops.
// Could also be arch-specific size.
} __attribute__((aligned(8))) k_atomic_long_t; /* Explicitly long atomic type */
/* Opaque handle types (forward declarations) */
/* These declare pointer types to incomplete structs, hiding implementation details. */
/* The actual struct definitions would be elsewhere (e.g., proc.h, thread.h). */
typedef struct k_proc_handle *k_proc_handle_t;
typedef struct k_thread_handle *k_thread_handle_t;
typedef struct k_mem_region_handle *k_mem_region_handle_t; // Renamed from k_mem_handle for clarity
/* Memory alignment macros */
#define K_CACHE_LINE_SIZE 64 /* Common cache line size, may need arch-specific versions */
#define __k_cacheline_aligned __attribute__((aligned(K_CACHE_LINE_SIZE)))
/* Offset calculation without stdlib (for C11 _Static_assert might need careful use with this) */
/* This macro is generally safe for standard-layout types. */
#define K_OFFSETOF(type, member) ((k_size_t)&((type *)0)->member)
#endif /* _MINIX_KERNEL_TYPES_H */

View File

@ -0,0 +1,35 @@
#ifndef _SYS_BARRIER_H
#define _SYS_BARRIER_H
/* Compiler barrier - prevents compiler reordering instructions across this point */
#define kcompiler_barrier() __asm__ __volatile__("" ::: "memory")
#if defined(__i386__) || defined(__x86_64__)
/* Full memory barrier - ensures all prior memory ops complete before subsequent ones */
#define kmb() __asm__ __volatile__("mfence" ::: "memory")
/* Read memory barrier - ensures all prior loads complete before subsequent loads */
#define krmb() __asm__ __volatile__("lfence" ::: "memory")
/* Write memory barrier - ensures all prior stores complete before subsequent stores */
#define kwmb() __asm__ __volatile__("sfence" ::: "memory")
#else
/* Fallback to compiler barriers for other architectures.
This is often insufficient for true SMP safety on those architectures,
requiring platform-specific implementations. */
#define kmb() kcompiler_barrier()
#define krmb() kcompiler_barrier()
#define kwmb() kcompiler_barrier()
#endif
/*
* Acquire/Release semantics for lock-free programming.
* These ensure that operations before an atomic operation (acquire) are visible
* before the atomic, and operations after an atomic operation (release) are
* visible only after the atomic.
* Using full memory barriers (kmb) provides strong ordering.
* On some architectures, lighter-weight barriers might be used for pure
* acquire or release semantics if available and appropriate.
*/
#define k_smp_mb__before_atomic() kmb()
#define k_smp_mb__after_atomic() kmb()
#endif /* _SYS_BARRIER_H */

View File

@ -0,0 +1,29 @@
#ifndef _SYS_KASSERT_H
#define _SYS_KASSERT_H
// Include kernel_types.h if kpanic might eventually need types from it,
// or if __FILE__, __LINE__, __func__ relate to types defined there (unlikely).
// For now, not strictly needed by the macros themselves.
// #include <minix/kernel_types.h>
/* Kernel panic function - defined in kpanic.c */
/* The format attribute helps the compiler check printf-style arguments. */
void kpanic(const char *fmt, ...) __attribute__((noreturn, format (printf, 1, 2) ));
// DEBUG_KERNEL flag will control if KASSERT is active.
// This should be defined in the build system for debug builds.
#ifdef DEBUG_KERNEL
#define KASSERT(cond) do { \
if (!(cond)) { \
kpanic("KASSERT failed: %s\n at %s:%d in %s()", \
#cond, __FILE__, __LINE__, __func__); \
} \
} while (0)
#else
#define KASSERT(cond) ((void)0)
#endif
/* Compile-time assertion (requires C11 or later) */
#define KSTATIC_ASSERT(cond) _Static_assert(cond, #cond)
#endif /* _SYS_KASSERT_H */

View File

@ -20,6 +20,7 @@
// Added kernel headers
#include <minix/kernel_types.h>
#include <sys/kassert.h>
#include <klib/include/kprintf.h> // For KASSERT_PLACEHOLDER and kprintf_stub
#include <klib/include/kstring.h> // Precautionary
#include <klib/include/kmemory.h> // Precautionary
@ -124,7 +125,7 @@ void irq_handle(int irq)
irq_hook_t * hook;
/* here we need not to get this IRQ until all the handlers had a say */
KASSERT_PLACEHOLDER(irq >= 0 && irq < NR_IRQ_VECTORS); // MODIFIED
KASSERT(irq >= 0 && irq < NR_IRQ_VECTORS);
hw_intr_mask(irq);
hook = irq_handlers[irq];

View File

@ -22,32 +22,31 @@ void *kmemset(void *s, int c, k_size_t n) {
}
void *kmemmove(void *dest, const void *src, k_size_t n) {
if (dest == NULL || src == NULL || n == 0) {
return dest;
}
// Ensure kernel_types.h is included for k_size_t if not already.
// It should be via "kmemory.h" -> <minix/kernel_types.h>
unsigned char *pd = (unsigned char *)dest;
const unsigned char *ps = (const unsigned char *)src;
// Check for overlap and copy direction
if (pd < ps) {
// Copy forwards (standard memcpy)
return __builtin_memcpy(dest, src, n);
} else if (pd > ps) {
if (pd == ps || n == 0) {
return dest;
}
// Correct overlap detection: backward copy needed when
// dest > src AND dest < src + n (i.e., dest is within src and src+n)
if (pd > ps && pd < (ps + n)) {
// Copy backwards
// Check if regions overlap and dest is after src
// (pd > ps) means dest starts after src.
// If (ps + n > pd), then the end of src overlaps with the start of dest.
if ((ps + n) > pd) { // Overlap condition where backwards copy is needed
for (k_size_t i = n; i > 0; i--) {
pd[i-1] = ps[i-1];
pd = pd + n;
ps = ps + n;
while (n--) {
*--pd = *--ps;
}
} else {
// No overlap even if dest > src, so forward copy is fine
return __builtin_memcpy(dest, src, n);
// Forward copy is safe (src and dest do not overlap in a way that requires backward copy)
// This includes cases where dest < src, or dest >= ps + n.
while (n--) {
*pd++ = *ps++;
}
}
// else (pd == ps), no copy needed or already handled by n==0
return dest;
}

View File

@ -0,0 +1,34 @@
#include <sys/kassert.h> // For kpanic's own declaration
#include <klib/include/kprintf.h> // For kprintf_stub
#include <stdarg.h> // For ... in kpanic signature, even if va_list not fully used by stub
// Forward declarations for HAL functions.
// These are expected to be provided by architecture-specific code.
// TODO: Replace these with includes of a proper HAL header when available.
extern void arch_disable_interrupts(void);
extern void arch_halt(void) __attribute__((noreturn));
// Definition of kpanic
void kpanic(const char *fmt, ...) {
// Disable interrupts as early as possible
arch_disable_interrupts();
// Basic message indicating panic
kprintf_stub("KERNEL PANIC: "); // kprintf_stub is from <klib/include/kprintf.h>
// Handle variadic arguments for the format string
// This is a simplified handling due to kprintf_stub's limitations.
// A proper kvprintf is needed to format this correctly.
if (fmt) {
kprintf_stub(fmt); // This will print the raw format string
// TODO: When a real kvprintf is available, use it here:
// va_list ap;
// va_start(ap, fmt);
// kvprintf_real(fmt, ap); // Call the real kvprintf
// va_end(ap);
}
kprintf_stub("\n");
// Halt the system
arch_halt();
}

View File

@ -35,9 +35,9 @@
#include <klib/include/kmemory.h>
#include <klib/include/kprintf.h>
#include <minix/kernel_types.h>
#include <sys/kassert.h>
/* dummy for linking */
/* FIXME dummy for linking */
char *** _penviron;
/* Prototype declarations for PRIVATE functions. */
@ -129,7 +129,7 @@ void kmain(kinfo_t *local_cbi)
static int bss_test;
/* bss sanity check */
KASSERT_PLACEHOLDER(bss_test == 0); // MODIFIED
KASSERT(bss_test == 0);
bss_test = 1;
/* save a global copy of the boot parameters */
@ -149,7 +149,7 @@ void kmain(kinfo_t *local_cbi)
/* Kernel may use bits of main memory before VM is started */
kernel_may_alloc = 1;
KASSERT_PLACEHOLDER(sizeof(kinfo.boot_procs) == sizeof(image)); // MODIFIED
KASSERT(sizeof(kinfo.boot_procs) == sizeof(image));
kmemcpy(kinfo.boot_procs, image, sizeof(kinfo.boot_procs)); // MODIFIED
cstart();
@ -230,7 +230,7 @@ void kmain(kinfo_t *local_cbi)
}
/* Privileges for the root system process. */
else {
KASSERT_PLACEHOLDER(isrootsysn(proc_nr)); // MODIFIED
KASSERT(isrootsysn(proc_nr));
priv(rp)->s_flags= RSYS_F; /* privilege flags */
priv(rp)->s_init_flags = SRV_I; /* init flags */
priv(rp)->s_trap_mask= SRV_T; /* allowed traps */
@ -283,8 +283,8 @@ void kmain(kinfo_t *local_cbi)
kmemcpy(kinfo.boot_procs, image, sizeof(kinfo.boot_procs)); // MODIFIED
#define IPCNAME(n) { \
KASSERT_PLACEHOLDER((n) >= 0 && (n) <= IPCNO_HIGHEST); \
KASSERT_PLACEHOLDER(!ipc_call_names[n]); \
KASSERT((n) >= 0 && (n) <= IPCNO_HIGHEST); \
KASSERT(!ipc_call_names[n]); \
ipc_call_names[n] = #n; \
}

View File

@ -63,8 +63,9 @@ kernel_includes = include_directories(
klib_sources = files(
'klib/kstring.c',
'klib/kmemory.c',
'klib/kprintf_stub.c'
# Add other klib source files here if created (e.g. kpanic.c, katom.c)
'klib/kprintf_stub.c',
'klib/kpanic.c'
# TODO Add other klib source files here if created (e.g. katom.c)
)
klib = static_library('klib',
@ -154,6 +155,33 @@ elif arch_subdir == 'x86_64'
noop_x86_64 = [] # Placeholder
endif
# Define architecture-specific assembly sources
kernel_asm_sources = []
if arch == 'i386'
kernel_asm_sources = files(
'arch/i386/mpx.S',
'arch/i386/head.S',
'arch/i386/klib.S',
'arch/i386/apic_asm.S'
# Add other essential .S files here if identified later e.g.
# 'arch/i386/debugreg.S',
# 'arch/i386/io_inb.S', 'arch/i386/io_inl.S', 'arch/i386/io_intr.S', 'arch/i386/io_inw.S',
# 'arch/i386/io_outb.S', 'arch/i386/io_outl.S', 'arch/i386/io_outw.S',
# 'arch/i386/trampoline.S',
# 'arch/i386/usermapped_glo_ipc.S'
# For now, using the minimal list from feedback.
)
elif arch == 'x86_64'
# Assuming these files do not exist yet based on previous 'ls' output.
# If they are added, they can be listed here.
# kernel_asm_sources = files(
# 'arch/x86_64/mpx.S',
# 'arch/x86_64/head.S',
# 'arch/x86_64/klib.S'
# )
kernel_asm_sources = [] # Empty list for x86_64 if files are not present
endif
# Need to handle Assembly files separately and link them.
# For now, focusing on C files. This will be a FIXME for the build.
@ -170,7 +198,7 @@ kernel_link_args += ['-T', linker_script, '-Wl,--build-id=none']
# Build kernel executable
kernel = executable('kernel',
all_c_sources,
all_c_sources + kernel_asm_sources,
link_with: [klib],
link_args: kernel_link_args,
c_args: kernel_c_args,

View File

@ -43,6 +43,7 @@
// Added kernel headers
#include <minix/kernel_types.h>
#include <sys/kassert.h>
#include <klib/include/kprintf.h>
#include <klib/include/kstring.h>
#include <klib/include/kmemory.h>
@ -245,8 +246,8 @@ void vm_suspend(struct proc *caller, const struct proc *target,
/* This range is not OK for this process. Set parameters
* of the request and notify VM about the pending request.
*/
KASSERT_PLACEHOLDER(!RTS_ISSET(caller, RTS_VMREQUEST)); // MODIFIED
KASSERT_PLACEHOLDER(!RTS_ISSET(target, RTS_VMREQUEST)); // MODIFIED
KASSERT(!RTS_ISSET(caller, RTS_VMREQUEST));
KASSERT(!RTS_ISSET(target, RTS_VMREQUEST));
RTS_SET(caller, RTS_VMREQUEST);
@ -269,9 +270,9 @@ void vm_suspend(struct proc *caller, const struct proc *target,
*===========================================================================*/
static void delivermsg(struct proc *rp)
{
KASSERT_PLACEHOLDER(!RTS_ISSET(rp, RTS_VMREQUEST)); // MODIFIED
KASSERT_PLACEHOLDER(rp->p_misc_flags & MF_DELIVERMSG); // MODIFIED
KASSERT_PLACEHOLDER(rp->p_delivermsg.m_source != NONE); // MODIFIED
KASSERT(!RTS_ISSET(rp, RTS_VMREQUEST));
KASSERT(rp->p_misc_flags & MF_DELIVERMSG);
KASSERT(rp->p_delivermsg.m_source != NONE);
if (copy_msg_to_user(&rp->p_delivermsg,
(message *) rp->p_delivermsg_vir)) {
@ -357,13 +358,15 @@ not_runnable_pick_new:
check_misc_flags:
KASSERT_PLACEHOLDER(p); // MODIFIED
KASSERT_PLACEHOLDER(proc_is_runnable(p)); // MODIFIED
KASSERT(p);
KASSERT(proc_is_runnable(p));
while (p->p_misc_flags &
(MF_KCALL_RESUME | MF_DELIVERMSG |
MF_SC_DEFER | MF_SC_TRACE | MF_SC_ACTIVE)) {
KASSERT_PLACEHOLDER(proc_is_runnable(p)); // MODIFIED
KASSERT(proc_is_runnable(p));
if (p->p_misc_flags & MF_KCALL_RESUME) {
kernel_call_resume(p);
}
@ -375,7 +378,7 @@ check_misc_flags:
else if (p->p_misc_flags & MF_SC_DEFER) {
/* Perform the system call that we deferred earlier. */
KASSERT_PLACEHOLDER (!(p->p_misc_flags & MF_SC_ACTIVE)); // MODIFIED
KASSERT (!(p->p_misc_flags & MF_SC_ACTIVE));
arch_do_syscall(p);
@ -442,7 +445,8 @@ check_misc_flags:
#endif
p = arch_finish_switch_to_user();
KASSERT_PLACEHOLDER(p->p_cpu_time_left); // MODIFIED
KASSERT(p->p_cpu_time_left);
context_stop(proc_addr(KERNEL));
@ -458,10 +462,11 @@ check_misc_flags:
p->p_misc_flags &= ~MF_CONTEXT_SET;
#if defined(__i386__)
KASSERT_PLACEHOLDER(p->p_seg.p_cr3 != 0); // MODIFIED
KASSERT(p->p_seg.p_cr3 != 0);
#elif defined(__arm__)
KASSERT_PLACEHOLDER(p->p_seg.p_ttbr != 0); // MODIFIED
KASSERT(p->p_seg.p_ttbr != 0);
#endif
#ifdef CONFIG_SMP
if (p->p_misc_flags & MF_FLUSH_TLB) {
if (tlb_must_refresh)
@ -497,7 +502,7 @@ static int do_sync_ipc(struct proc * caller_ptr, /* who made the call */
* endpoint to corresponds to a process. In addition, it is necessary to check
* whether a process is allowed to send to a given destination.
*/
KASSERT_PLACEHOLDER(call_nr != SENDA); // MODIFIED
KASSERT(call_nr != SENDA);
/* Only allow non-negative call_nr values less than 32 */
if (call_nr < 0 || call_nr > IPCNO_HIGHEST || call_nr >= 32
@ -608,7 +613,7 @@ int do_ipc(reg_t r1, reg_t r2, reg_t r3)
struct proc *const caller_ptr = get_cpulocal_var(proc_ptr); /* get pointer to caller */
int call_nr = (int) r1;
KASSERT_PLACEHOLDER(!RTS_ISSET(caller_ptr, RTS_SLOT_FREE)); // MODIFIED
KASSERT(!RTS_ISSET(caller_ptr, RTS_SLOT_FREE));
/* bill kernel time to this process. */
kbill_ipc = caller_ptr;
@ -623,7 +628,9 @@ int do_ipc(reg_t r1, reg_t r2, reg_t r3)
* input message. Postpone the entire system call.
*/
caller_ptr->p_misc_flags &= ~MF_SC_TRACE;
KASSERT_PLACEHOLDER(!(caller_ptr->p_misc_flags & MF_SC_DEFER)); // MODIFIED
KASSERT(!(caller_ptr->p_misc_flags & MF_SC_DEFER));
caller_ptr->p_misc_flags |= MF_SC_DEFER;
caller_ptr->p_defer.r1 = r1;
caller_ptr->p_defer.r2 = r2;
@ -639,7 +646,8 @@ int do_ipc(reg_t r1, reg_t r2, reg_t r3)
/* If the MF_SC_DEFER flag is set, the syscall is now being resumed. */
caller_ptr->p_misc_flags &= ~MF_SC_DEFER;
KASSERT_PLACEHOLDER (!(caller_ptr->p_misc_flags & MF_SC_ACTIVE)); // MODIFIED
KASSERT (!(caller_ptr->p_misc_flags & MF_SC_ACTIVE));
/* Set a flag to allow reliable tracing of leaving the system call. */
caller_ptr->p_misc_flags |= MF_SC_ACTIVE;
@ -730,8 +738,9 @@ static int deadlock(
int src_dst_slot;
okendpt(src_dst_e, &src_dst_slot);
xp = proc_addr(src_dst_slot); /* follow chain of processes */
KASSERT_PLACEHOLDER(proc_ptr_ok(xp)); // MODIFIED
KASSERT_PLACEHOLDER(!RTS_ISSET(xp, RTS_SLOT_FREE)); // MODIFIED
KASSERT(proc_ptr_ok(xp));
KASSERT(!RTS_ISSET(xp, RTS_SLOT_FREE));
#if DEBUG_ENABLE_IPC_WARNINGS
processes[group_size] = xp;
#endif
@ -902,7 +911,7 @@ int mini_send(
if (WILLRECEIVE(caller_ptr->p_endpoint, dst_ptr, (vir_bytes)m_ptr, NULL)) {
int call;
/* Destination is indeed waiting for this message. */
KASSERT_PLACEHOLDER(!(dst_ptr->p_misc_flags & MF_DELIVERMSG)); // MODIFIED
KASSERT(!(dst_ptr->p_misc_flags & MF_DELIVERMSG));
if (!(flags & FROM_KERNEL)) {
if(copy_msg_from_user(m_ptr, &dst_ptr->p_delivermsg))
@ -956,7 +965,8 @@ int mini_send(
caller_ptr->p_sendto_e = dst_e;
/* Process is now blocked. Put in on the destination's queue. */
KASSERT_PLACEHOLDER(caller_ptr->p_q_link == NULL); // MODIFIED (NULL can be an issue if stddef.h is truly gone)
KASSERT(caller_ptr->p_q_link == NULL); /* NULL can be an issue if stddef.h is truly gone */
xpp = &dst_ptr->p_caller_q; /* find end of list */
while (*xpp) xpp = &(*xpp)->p_q_link;
*xpp = caller_ptr; /* add caller to end */
@ -984,7 +994,7 @@ static int mini_receive(struct proc * caller_ptr,
int r, src_id, found, src_proc_nr, src_p;
endpoint_t sender_e;
KASSERT_PLACEHOLDER(!(caller_ptr->p_misc_flags & MF_DELIVERMSG)); // MODIFIED
KASSERT(!(caller_ptr->p_misc_flags & MF_DELIVERMSG));
/* This is where we want our message. */
caller_ptr->p_delivermsg_vir = (vir_bytes) m_buff_usr;
@ -1025,12 +1035,13 @@ static int mini_receive(struct proc * caller_ptr,
kprintf_stub("mini_receive: sending notify from NONE\n"); // MODIFIED
}
#endif
KASSERT_PLACEHOLDER(src_proc_nr != NONE); // MODIFIED
KASSERT(src_proc_nr != NONE);
unset_notify_pending(caller_ptr, src_id); /* no longer pending */
/* Found a suitable source, deliver the notification message. */
KASSERT_PLACEHOLDER(!(caller_ptr->p_misc_flags & MF_DELIVERMSG)); // MODIFIED
KASSERT_PLACEHOLDER(src_e == ANY || sender_e == src_e); // MODIFIED
KASSERT(!(caller_ptr->p_misc_flags & MF_DELIVERMSG));
KASSERT(src_e == ANY || sender_e == src_e);
/* assemble message */
BuildNotifyMessage(&caller_ptr->p_delivermsg, src_proc_nr, caller_ptr);
@ -1064,11 +1075,12 @@ static int mini_receive(struct proc * caller_ptr,
if (CANRECEIVE(src_e, sender_e, caller_ptr, 0, &sender->p_sendmsg)) {
int call;
KASSERT_PLACEHOLDER(!RTS_ISSET(sender, RTS_SLOT_FREE)); // MODIFIED
KASSERT_PLACEHOLDER(!RTS_ISSET(sender, RTS_NO_ENDPOINT)); // MODIFIED
KASSERT(!RTS_ISSET(sender, RTS_SLOT_FREE));
KASSERT(!RTS_ISSET(sender, RTS_NO_ENDPOINT));
/* Found acceptable message. Copy it and update status. */
KASSERT_PLACEHOLDER(!(caller_ptr->p_misc_flags & MF_DELIVERMSG)); // MODIFIED
KASSERT(!(caller_ptr->p_misc_flags & MF_DELIVERMSG));
caller_ptr->p_delivermsg = sender->p_sendmsg;
caller_ptr->p_delivermsg.m_source = sender->p_endpoint;
caller_ptr->p_misc_flags |= MF_DELIVERMSG;
@ -1152,7 +1164,7 @@ int mini_notify(
* message and deliver it. Copy from pseudo-source HARDWARE, since the
* message is in the kernel's address space.
*/
KASSERT_PLACEHOLDER(!(dst_ptr->p_misc_flags & MF_DELIVERMSG)); // MODIFIED
KASSERT(!(dst_ptr->p_misc_flags & MF_DELIVERMSG));
BuildNotifyMessage(&dst_ptr->p_delivermsg, proc_nr(caller_ptr), dst_ptr);
dst_ptr->p_delivermsg.m_source = caller_ptr->p_endpoint;
@ -1382,7 +1394,8 @@ static int try_async(struct proc * caller_ptr)
}
#endif
KASSERT_PLACEHOLDER(!(caller_ptr->p_misc_flags & MF_DELIVERMSG)); // MODIFIED
KASSERT(!(caller_ptr->p_misc_flags & MF_DELIVERMSG));
if ((r = try_one(ANY, src_ptr, caller_ptr)) == OK)
return(r);
}
@ -1462,7 +1475,8 @@ static int try_one(endpoint_t receive_e, struct proc *src_ptr,
if (dst != dst_ptr->p_endpoint) continue;
if (!CANRECEIVE(receive_e, src_e, dst_ptr,
table_v + i*sizeof(asynmsg_t) + offsetof(struct asynmsg,msg), // offsetof may be an issue
table_v + i*sizeof(asynmsg_t) + K_OFFSETOF(struct asynmsg,msg),
NULL)) { // MODIFIED (NULL)
continue;
}
@ -1614,9 +1628,9 @@ void enqueue(
int q = rp->p_priority; /* scheduling queue to use */
struct proc **rdy_head, **rdy_tail;
KASSERT_PLACEHOLDER(proc_is_runnable(rp)); // MODIFIED
KASSERT(proc_is_runnable(rp));
KASSERT_PLACEHOLDER(q >= 0); // MODIFIED
KASSERT(q >= 0);
rdy_head = get_cpu_var(rp->p_cpu, run_q_head);
rdy_tail = get_cpu_var(rp->p_cpu, run_q_tail);
@ -1640,7 +1654,9 @@ void enqueue(
*/
struct proc * p;
p = get_cpulocal_var(proc_ptr);
KASSERT_PLACEHOLDER(p); // MODIFIED
KASSERT(p);
if((p->p_priority > rp->p_priority) &&
(priv(p)->s_flags & PREEMPTIBLE))
RTS_SET(p, RTS_PREEMPTED); /* calls dequeue() */
@ -1661,7 +1677,7 @@ void enqueue(
#if DEBUG_SANITYCHECKS
KASSERT_PLACEHOLDER(runqueues_ok_local()); // MODIFIED
KASSERT(runqueues_ok_local());
#endif
}
@ -1680,17 +1696,16 @@ static void enqueue_head(struct proc *rp)
struct proc **rdy_head, **rdy_tail;
KASSERT_PLACEHOLDER(proc_ptr_ok(rp)); // MODIFIED
KASSERT_PLACEHOLDER(proc_is_runnable(rp)); // MODIFIED
KASSERT(proc_ptr_ok(rp));
KASSERT(proc_is_runnable(rp));
/*
* the process was runnable without its quantum expired when dequeued. A
* process with no time left should have been handled else and differently
*/
KASSERT_PLACEHOLDER(rp->p_cpu_time_left); // MODIFIED
KASSERT_PLACEHOLDER(q >= 0); // MODIFIED
KASSERT(rp->p_cpu_time_left);
KASSERT(q >= 0);
rdy_head = get_cpu_var(rp->p_cpu, run_q_head);
rdy_tail = get_cpu_var(rp->p_cpu, run_q_tail);
@ -1713,7 +1728,8 @@ static void enqueue_head(struct proc *rp)
rp->p_accounting.preempted++;
#if DEBUG_SANITYCHECKS
KASSERT_PLACEHOLDER(runqueues_ok_local()); // MODIFIED
KASSERT(runqueues_ok_local());
#endif
}
@ -1737,11 +1753,11 @@ void dequeue(struct proc *rp)
struct proc **rdy_tail;
KASSERT_PLACEHOLDER(proc_ptr_ok(rp)); // MODIFIED
KASSERT_PLACEHOLDER(!proc_is_runnable(rp)); // MODIFIED
KASSERT(proc_ptr_ok(rp));
KASSERT(!proc_is_runnable(rp));
/* Side-effect for kernel: check if the task's stack still is ok? */
KASSERT_PLACEHOLDER (!iskernelp(rp) || *priv(rp)->s_stack_guard == STACK_GUARD); // MODIFIED
KASSERT (!iskernelp(rp) || *priv(rp)->s_stack_guard == STACK_GUARD);
rdy_tail = get_cpu_var(rp->p_cpu, run_q_tail);
@ -1782,7 +1798,8 @@ void dequeue(struct proc *rp)
rp->p_dequeued = get_monotonic();
#if DEBUG_SANITYCHECKS
KASSERT_PLACEHOLDER(runqueues_ok_local()); // MODIFIED
KASSERT(runqueues_ok_local());
#endif
}
@ -1811,7 +1828,8 @@ static struct proc * pick_proc(void)
TRACE(VF_PICKPROC, kprintf_stub("cpu %d queue %d empty\n", cpuid, q);); // MODIFIED
continue;
}
KASSERT_PLACEHOLDER(proc_is_runnable(rp)); // MODIFIED
KASSERT(proc_is_runnable(rp));
if (priv(rp)->s_flags & BILLABLE)
get_cpulocal_var(bill_ptr) = rp; /* bill for system time */
return rp;
@ -1869,7 +1887,7 @@ static void notify_scheduler(struct proc *p)
message m_no_quantum;
int err;
KASSERT_PLACEHOLDER(!proc_kernel_scheduler(p)); // MODIFIED
KASSERT(!proc_kernel_scheduler(p));
/* dequeue the process */
RTS_SET(p, RTS_NO_QUANTUM);
@ -1942,7 +1960,8 @@ void copr_not_available_handler(void)
/* if FPU is not owned by anyone, do not store anything */
local_fpu_owner = get_cpulocal_var_ptr(fpu_owner);
if (*local_fpu_owner != NULL) { // MODIFIED (NULL)
KASSERT_PLACEHOLDER(*local_fpu_owner != p); // MODIFIED
KASSERT(*local_fpu_owner != p);
save_local_fpu(*local_fpu_owner, FALSE /*retain*/);
}

View File

@ -2,6 +2,7 @@
// Added kernel headers
#include <minix/kernel_types.h>
#include <sys/kassert.h>
#include <klib/include/kprintf.h>
#include <klib/include/kstring.h>
#include <klib/include/kmemory.h>
@ -83,7 +84,8 @@ static void smp_schedule_sync(struct proc * p, unsigned task)
unsigned cpu = p->p_cpu;
unsigned mycpu = cpuid;
KASSERT_PLACEHOLDER(cpu != mycpu); // MODIFIED
KASSERT(cpu != mycpu);
/*
* if some other cpu made a request to the same cpu, wait until it is
* done before proceeding
@ -123,7 +125,7 @@ void smp_schedule_stop_proc(struct proc * p)
smp_schedule_sync(p, SCHED_IPI_STOP_PROC);
else
RTS_SET(p, RTS_PROC_STOP);
KASSERT_PLACEHOLDER(RTS_ISSET(p, RTS_PROC_STOP)); // MODIFIED
KASSERT(RTS_ISSET(p, RTS_PROC_STOP));
}
void smp_schedule_vminhibit(struct proc * p)
@ -132,7 +134,7 @@ void smp_schedule_vminhibit(struct proc * p)
smp_schedule_sync(p, SCHED_IPI_VM_INHIBIT);
else
RTS_SET(p, RTS_VMINHIBIT);
KASSERT_PLACEHOLDER(RTS_ISSET(p, RTS_VMINHIBIT)); // MODIFIED
KASSERT(RTS_ISSET(p, RTS_VMINHIBIT));
}
void smp_schedule_stop_proc_save_ctx(struct proc * p)
@ -142,7 +144,8 @@ void smp_schedule_stop_proc_save_ctx(struct proc * p)
* be saved (i.e. including FPU state and such)
*/
smp_schedule_sync(p, SCHED_IPI_STOP_PROC | SCHED_IPI_SAVE_CTX);
KASSERT_PLACEHOLDER(RTS_ISSET(p, RTS_PROC_STOP)); // MODIFIED
KASSERT(RTS_ISSET(p, RTS_PROC_STOP));
}
void smp_schedule_migrate_proc(struct proc * p, unsigned dest_cpu)
@ -152,7 +155,7 @@ void smp_schedule_migrate_proc(struct proc * p, unsigned dest_cpu)
* be saved (i.e. including FPU state and such)
*/
smp_schedule_sync(p, SCHED_IPI_STOP_PROC | SCHED_IPI_SAVE_CTX);
KASSERT_PLACEHOLDER(RTS_ISSET(p, RTS_PROC_STOP)); // MODIFIED
KASSERT(RTS_ISSET(p, RTS_PROC_STOP));
/* assign the new cpu and let the process run again */
p->p_cpu = dest_cpu;

View File

@ -45,6 +45,7 @@
// Added kernel headers
#include <minix/kernel_types.h>
#include <sys/kassert.h>
#include <klib/include/kprintf.h>
#include <klib/include/kstring.h>
#include <klib/include/kmemory.h>
@ -60,7 +61,7 @@ static int (*call_vec[NR_SYS_CALLS])(struct proc * caller, message *m_ptr);
#define map(call_nr, handler) \
{ int call_index = call_nr-KERNEL_CALL; \
KASSERT_PLACEHOLDER(call_index >= 0 && call_index < NR_SYS_CALLS); \
KASSERT(call_index >= 0 && call_index < NR_SYS_CALLS); \
call_vec[call_index] = (handler) ; }
static void kernel_call_finish(struct proc * caller, message *msg, int result)
@ -70,8 +71,8 @@ static void kernel_call_finish(struct proc * caller, message *msg, int result)
* until VM tells us it's allowed. VM has been notified
* and we must wait for its reply to restart the call.
*/
KASSERT_PLACEHOLDER(RTS_ISSET(caller, RTS_VMREQUEST)); // MODIFIED
KASSERT_PLACEHOLDER(caller->p_vmrequest.type == VMSTYPE_KERNELCALL); // MODIFIED
KASSERT(RTS_ISSET(caller, RTS_VMREQUEST));
KASSERT(caller->p_vmrequest.type == VMSTYPE_KERNELCALL);
caller->p_vmrequest.saved.reqmsg = *msg;
caller->p_misc_flags |= MF_KCALL_RESUME;
} else {
@ -620,10 +621,9 @@ void kernel_call_resume(struct proc *caller)
{
int result;
KASSERT_PLACEHOLDER(!RTS_ISSET(caller, RTS_SLOT_FREE)); // MODIFIED
KASSERT_PLACEHOLDER(!RTS_ISSET(caller, RTS_VMREQUEST)); // MODIFIED
KASSERT_PLACEHOLDER(caller->p_vmrequest.saved.reqmsg.m_source == caller->p_endpoint); // MODIFIED
KASSERT(!RTS_ISSET(caller, RTS_SLOT_FREE));
KASSERT(!RTS_ISSET(caller, RTS_VMREQUEST));
KASSERT(caller->p_vmrequest.saved.reqmsg.m_source == caller->p_endpoint);
/*
kprintf_stub("KERNEL_CALL restart from %s / %d rts 0x%08x misc 0x%08x\n", // MODIFIED
@ -820,7 +820,7 @@ int allow_ipc_filtered_msg(struct proc *rp, endpoint_t src_e,
return TRUE; /* no IPC filters, always allow */
if (m_src_p == NULL) { // MODIFIED (NULL)
KASSERT_PLACEHOLDER(m_src_v != 0); // MODIFIED
KASSERT(m_src_v != 0);
/* Should we copy in the message type? */
get_mtype = FALSE;
@ -841,7 +841,7 @@ int allow_ipc_filtered_msg(struct proc *rp, endpoint_t src_e,
if (get_mtype) {
/* FIXME: offsetof may be undefined */
r = data_copy(src_e,
m_src_v + offsetof(message, m_type), KERNEL,
m_src_v + K_OFFSETOF(message, m_type), KERNEL,
(vir_bytes)&m_buff.m_type, sizeof(m_buff.m_type));
if (r != OK) {
/* allow for now, this will fail later anyway */

View File

@ -16,6 +16,7 @@
// Added kernel headers
#include <minix/kernel_types.h> // For k_errno_t or similar if error codes are mapped
#include <sys/kassert.h>
#include <klib/include/kprintf.h>
#include <klib/include/kstring.h>
#include <klib/include/kmemory.h>
@ -86,7 +87,8 @@ int do_copy(struct proc * caller, message * m_ptr)
/* Now try to make the actual virtual copy. */
if(m_ptr->m_lsys_krn_sys_copy.flags & CP_FLAG_TRY) {
int r;
KASSERT_PLACEHOLDER(caller->p_endpoint == VFS_PROC_NR); // MODIFIED
KASSERT(caller->p_endpoint == VFS_PROC_NR);
r = virtual_copy(&vir_addr[_SRC_], &vir_addr[_DST_], bytes);
if(r == EFAULT_SRC || r == EFAULT_DST) return r = EFAULT; // EFAULT* might be undefined
return r;

View File

@ -20,6 +20,7 @@
// Added kernel headers
#include <minix/kernel_types.h> // For k_errno_t, k_sigset_t
#include <sys/kassert.h>
#include <klib/include/kprintf.h>
#include <klib/include/kstring.h>
#include <klib/include/kmemory.h>
@ -52,7 +53,7 @@ int do_fork(struct proc * caller, message * m_ptr)
rpc = proc_addr(m_ptr->m_lsys_krn_sys_fork.slot);
if (isemptyp(rpp) || ! isemptyp(rpc)) return(EINVAL); // EINVAL might be undefined
KASSERT_PLACEHOLDER(!(rpp->p_misc_flags & MF_DELIVERMSG)); // MODIFIED
KASSERT(!(rpp->p_misc_flags & MF_DELIVERMSG));
/* needs to be receiving so we know where the message buffer is */
if(!RTS_ISSET(rpp, RTS_RECEIVING)) {

View File

@ -18,6 +18,7 @@
// Added kernel headers
#include <minix/kernel_types.h> // For k_errno_t or similar if error codes are mapped
#include <sys/kassert.h>
#include <klib/include/kprintf.h>
#include <klib/include/kstring.h>
#include <klib/include/kmemory.h>
@ -58,7 +59,8 @@ int do_getmcontext(struct proc * caller, message * m_ptr)
/* make sure that the FPU context is saved into proc structure first */
save_fpu(rp);
mc.mc_flags = (rp->p_misc_flags & MF_FPU_INITIALIZED) ? _MC_FPU_SAVED : 0;
KASSERT_PLACEHOLDER(sizeof(mc.__fpregs.__fp_reg_set) == FPU_XFP_SIZE); // MODIFIED
KASSERT(sizeof(mc.__fpregs.__fp_reg_set) == FPU_XFP_SIZE);
kmemcpy(&(mc.__fpregs.__fp_reg_set), rp->p_seg.fpu_state, FPU_XFP_SIZE); // MODIFIED
}
#endif
@ -99,7 +101,8 @@ int do_setmcontext(struct proc * caller, message * m_ptr)
/* Copy FPU state */
if (mc.mc_flags & _MC_FPU_SAVED) {
rp->p_misc_flags |= MF_FPU_INITIALIZED;
KASSERT_PLACEHOLDER(sizeof(mc.__fpregs.__fp_reg_set) == FPU_XFP_SIZE); // MODIFIED
KASSERT(sizeof(mc.__fpregs.__fp_reg_set) == FPU_XFP_SIZE);
kmemcpy(rp->p_seg.fpu_state, &(mc.__fpregs.__fp_reg_set), FPU_XFP_SIZE); // MODIFIED
} else
rp->p_misc_flags &= ~MF_FPU_INITIALIZED;

View File

@ -12,6 +12,7 @@
// Added kernel headers
#include <minix/kernel_types.h> // For k_errno_t or similar if error codes are mapped
#include <sys/kassert.h>
#include <klib/include/kprintf.h>
#include <klib/include/kstring.h>
#include <klib/include/kmemory.h>
@ -69,7 +70,8 @@ int do_runctl(struct proc * caller, message * m_ptr)
RTS_SET(rp, RTS_PROC_STOP);
break;
case RC_RESUME:
KASSERT_PLACEHOLDER(RTS_ISSET(rp, RTS_PROC_STOP)); // MODIFIED
KASSERT(RTS_ISSET(rp, RTS_PROC_STOP));
RTS_UNSET(rp, RTS_PROC_STOP);
break;
default:

View File

@ -20,6 +20,7 @@
// Added kernel headers
#include <minix/kernel_types.h> // For k_size_t, k_errno_t
#include <sys/kassert.h>
#include <klib/include/kprintf.h>
#include <klib/include/kstring.h>
#include <klib/include/kmemory.h>
@ -266,7 +267,7 @@ int verify_grant(
sfinfo->endpt = granter;
/* FIXME: offsetof may be undefined */
sfinfo->addr = priv(granter_proc)->s_grant_table +
sizeof(g) * grant_idx + offsetof(cp_grant_t, cp_faulted);
sizeof(g) * grant_idx + K_OFFSETOF(cp_grant_t, cp_faulted);
sfinfo->value = grant;
}
@ -413,7 +414,8 @@ int do_vsafecopy(struct proc * caller, message * m_ptr)
/* Set vector copy parameters. */
src.proc_nr_e = caller->p_endpoint;
KASSERT_PLACEHOLDER(src.proc_nr_e != NONE); // MODIFIED
KASSERT(src.proc_nr_e != NONE);
src.offset = (vir_bytes) m_ptr->m_lsys_krn_vsafecopy.vec_addr;
dst.proc_nr_e = KERNEL;
dst.offset = (vir_bytes) vec;

View File

@ -151,21 +151,22 @@ int do_trace(struct proc * caller, message * m_ptr)
* tries to load them prior to restarting a process, so do
* not allow it.
*/
if (i == (int) &((struct proc *) 0)->p_reg.cs ||
i == (int) &((struct proc *) 0)->p_reg.ds ||
i == (int) &((struct proc *) 0)->p_reg.es ||
i == (int) &((struct proc *) 0)->p_reg.gs ||
i == (int) &((struct proc *) 0)->p_reg.fs ||
i == (int) &((struct proc *) 0)->p_reg.ss)
if (i == K_OFFSETOF(struct proc, p_reg.cs) ||
i == K_OFFSETOF(struct proc, p_reg.ds) ||
i == K_OFFSETOF(struct proc, p_reg.es) ||
i == K_OFFSETOF(struct proc, p_reg.gs) ||
i == K_OFFSETOF(struct proc, p_reg.fs) ||
i == K_OFFSETOF(struct proc, p_reg.ss))
return(EFAULT); // EFAULT might be undefined
if (i == (int) &((struct proc *) 0)->p_reg.psw)
if (i == K_OFFSETOF(struct proc, p_reg.psw))
/* only selected bits are changeable */
SETPSW(rp, tr_data);
else
*(reg_t *) ((char *) &rp->p_reg + i) = (reg_t) tr_data;
#elif defined(__arm__)
if (i == (int) &((struct proc *) 0)->p_reg.psr) {
if (i == K_OFFSETOF(struct proc, p_reg.psr)) {
/* only selected bits are changeable */
SET_USR_PSR(rp, tr_data);
} else {

View File

@ -13,11 +13,11 @@
// Added kernel headers
#include <minix/kernel_types.h> // For k_errno_t
#include <sys/kassert.h>
#include <klib/include/kprintf.h>
#include <klib/include/kstring.h>
#include <klib/include/kmemory.h>
#if USE_UPDATE
#define DEBUG 0
@ -78,7 +78,7 @@ int do_update(struct proc * caller, message * m_ptr)
return EPERM; // EPERM might be undefined
}
KASSERT_PLACEHOLDER(!proc_is_runnable(src_rp) && !proc_is_runnable(dst_rp)); // MODIFIED
KASSERT(!proc_is_runnable(src_rp) && !proc_is_runnable(dst_rp));
/* Check if processes are updatable. */
if(!proc_is_updatable(src_rp) || !proc_is_updatable(dst_rp)) {

View File

@ -13,6 +13,7 @@
// Added kernel headers
#include <minix/kernel_types.h> // For k_errno_t
#include <sys/kassert.h>
#include <klib/include/kprintf.h>
#include <klib/include/kstring.h>
#include <klib/include/kmemory.h>
@ -38,7 +39,8 @@ int do_vmctl(struct proc * caller, message * m_ptr)
switch(m_ptr->SVMCTL_PARAM) {
case VMCTL_CLEAR_PAGEFAULT:
KASSERT_PLACEHOLDER(RTS_ISSET(p,RTS_PAGEFAULT)); // MODIFIED
KASSERT(RTS_ISSET(p,RTS_PAGEFAULT));
LDER(RTS_ISSET(p,RTS_PAGEFAULT)); // MODIFIED
RTS_UNSET(p, RTS_PAGEFAULT);
return OK;
case VMCTL_MEMREQ_GET:
@ -51,7 +53,7 @@ int do_vmctl(struct proc * caller, message * m_ptr)
rpp = &(*rpp)->p_vmrequest.nextrequestor) {
rp = *rpp;
KASSERT_PLACEHOLDER(RTS_ISSET(rp, RTS_VMREQUEST)); // MODIFIED
KASSERT(RTS_ISSET(rp, RTS_VMREQUEST));
okendpt(rp->p_vmrequest.target, &proc_nr);
target = proc_addr(proc_nr);
@ -86,12 +88,12 @@ int do_vmctl(struct proc * caller, message * m_ptr)
return ENOENT; // ENOENT might be undefined
case VMCTL_MEMREQ_REPLY:
KASSERT_PLACEHOLDER(RTS_ISSET(p, RTS_VMREQUEST)); // MODIFIED
KASSERT_PLACEHOLDER(p->p_vmrequest.vmresult == VMSUSPEND); // MODIFIED
KASSERT(RTS_ISSET(p, RTS_VMREQUEST));
KASSERT(p->p_vmrequest.vmresult == VMSUSPEND);
okendpt(p->p_vmrequest.target, &proc_nr);
target = proc_addr(proc_nr);
p->p_vmrequest.vmresult = m_ptr->SVMCTL_VALUE;
KASSERT_PLACEHOLDER(p->p_vmrequest.vmresult != VMSUSPEND); // MODIFIED
KASSERT(p->p_vmrequest.vmresult != VMSUSPEND);
switch(p->p_vmrequest.type) {
case VMSTYPE_KERNELCALL:
@ -102,12 +104,12 @@ int do_vmctl(struct proc * caller, message * m_ptr)
p->p_misc_flags |= MF_KCALL_RESUME;
break;
case VMSTYPE_DELIVERMSG:
KASSERT_PLACEHOLDER(p->p_misc_flags & MF_DELIVERMSG); // MODIFIED
KASSERT_PLACEHOLDER(p == target); // MODIFIED
KASSERT_PLACEHOLDER(RTS_ISSET(p, RTS_VMREQUEST)); // MODIFIED
KASSERT(p->p_misc_flags & MF_DELIVERMSG);
KASSERT(p == target);
KASSERT(RTS_ISSET(p, RTS_VMREQUEST));
break;
case VMSTYPE_MAP:
KASSERT_PLACEHOLDER(RTS_ISSET(p, RTS_VMREQUEST)); // MODIFIED
KASSERT(RTS_ISSET(p, RTS_VMREQUEST));
break;
default:
panic("strange request type: %d",p->p_vmrequest.type);
@ -142,7 +144,8 @@ int do_vmctl(struct proc * caller, message * m_ptr)
#endif
return OK;
case VMCTL_VMINHIBIT_CLEAR:
KASSERT_PLACEHOLDER(RTS_ISSET(p, RTS_VMINHIBIT)); // MODIFIED
KASSERT(RTS_ISSET(p, RTS_VMINHIBIT));
/*
* the processes is certainly not runnable, no need to tell its
* cpu

View File

@ -18,6 +18,7 @@
// Added kernel headers
#include <minix/kernel_types.h> // For k_size_t, k_errno_t
#include <sys/kassert.h>
#include <klib/include/kprintf.h>
#include <klib/include/kstring.h>
#include <klib/include/kmemory.h>
@ -126,7 +127,8 @@ int do_vumap(struct proc *caller, message *m_ptr)
}
/* Copy out the resulting vector of physical addresses. */
KASSERT_PLACEHOLDER(pcount > 0); // MODIFIED
KASSERT(pcount > 0);
size = pcount * sizeof(pvec[0]);