From dece0b4c70f9200333bf2ef7bb454eb5b1faddfe Mon Sep 17 00:00:00 2001 From: Philippe Aubertin <39178965+phaubertin@users.noreply.github.com> Date: Thu, 21 Nov 2024 17:02:10 -0500 Subject: [PATCH] Single ready queue scheduler (#91) First scheduler implementation with a single shared ready queued. Allow user thread to be preempted on a tick interrupt. --- include/kernel/domain/entities/thread.h | 10 - .../kernel/domain/services/asm/scheduler.h | 37 +++ include/kernel/domain/services/scheduler.h | 55 ++++ include/kernel/interface/i686/trap.h | 9 + include/kernel/types.h | 1 + kernel/Makefile | 2 + kernel/application/interrupts/tick.c | 3 +- kernel/application/syscalls/yield_thread.c | 4 +- kernel/domain/entities/thread.c | 215 +------------- kernel/domain/services/ipc.c | 16 +- kernel/domain/services/scheduler.c | 273 ++++++++++++++++++ kernel/interface/i686/interrupts.c | 7 +- kernel/interface/i686/trap.asm | 44 +-- kernel/interface/i686/trap.c | 46 +++ 14 files changed, 466 insertions(+), 256 deletions(-) create mode 100644 include/kernel/domain/services/asm/scheduler.h create mode 100644 include/kernel/domain/services/scheduler.h create mode 100644 kernel/domain/services/scheduler.c create mode 100644 kernel/interface/i686/trap.c diff --git a/include/kernel/domain/entities/thread.h b/include/kernel/domain/entities/thread.h index c7c18b60..f7f72256 100644 --- a/include/kernel/domain/entities/thread.h +++ b/include/kernel/domain/entities/thread.h @@ -44,22 +44,12 @@ thread_t *thread_new(process_t *process); void thread_prepare(thread_t *thread, const thread_params_t *params); -void thread_ready(thread_t *thread); - void thread_run_first(thread_t *thread); void thread_run(thread_t *thread); -void thread_yield_current(void); - void thread_terminate_current(void); -void thread_switch_to(thread_t *to); - -void thread_switch_to_and_block(thread_t *to); - -void thread_block_current_and_unlock(spinlock_t *lock); - int thread_await(thread_t *thread); void thread_set_local_storage(thread_t *thread, addr_t addr, size_t size); diff --git a/include/kernel/domain/services/asm/scheduler.h b/include/kernel/domain/services/asm/scheduler.h new file mode 100644 index 00000000..5aaac235 --- /dev/null +++ b/include/kernel/domain/services/asm/scheduler.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2019-2024 Philippe Aubertin. + * All rights reserved. + + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the author nor the names of other contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JINUE_KERNEL_SERVICES_ASM_SCHEDULER_H +#define JINUE_KERNEL_SERVICES_ASM_SCHEDULER_H + +#define SCHEDULER_BASE_CREDITS 3 + +#endif diff --git a/include/kernel/domain/services/scheduler.h b/include/kernel/domain/services/scheduler.h new file mode 100644 index 00000000..ff9e47c6 --- /dev/null +++ b/include/kernel/domain/services/scheduler.h @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2019-2024 Philippe Aubertin. + * All rights reserved. + + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the author nor the names of other contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JINUE_KERNEL_SERVICES_SCHEDULER_H +#define JINUE_KERNEL_SERVICES_SCHEDULER_H + +#include +#include +#include + +void reschedule(void); + +void scheduler_tick(void); + +void ready_thread(thread_t *thread); + +void yield_current_thread(void); + +void switch_to_thread(thread_t *to); + +void switch_to_thread_and_block(thread_t *to); + +void block_current_thread_and_unlock(spinlock_t *lock); + +void switch_from_exiting_thread(void); + +#endif diff --git a/include/kernel/interface/i686/trap.h b/include/kernel/interface/i686/trap.h index 0fe94939..86fb3e3e 100644 --- a/include/kernel/interface/i686/trap.h +++ b/include/kernel/interface/i686/trap.h @@ -32,8 +32,13 @@ #ifndef JINUE_KERNEL_INTERFACE_I686_TRAP_H #define JINUE_KERNEL_INTERFACE_I686_TRAP_H +#include +#include + extern int syscall_implementation; +void handle_trap(trapframe_t *trapframe); + /** entry point for Intel fast system call implementation (SYSENTER/SYSEXIT) */ void fast_intel_entry(void); @@ -44,4 +49,8 @@ void fast_amd_entry(void); * first time. See thread_page_create(). */ void return_from_interrupt(void); +static inline jinue_syscall_args_t *trapframe_syscall_args(trapframe_t *trapframe) { + return (jinue_syscall_args_t *)&trapframe->msg_arg0; +} + #endif diff --git a/include/kernel/types.h b/include/kernel/types.h index 5b43cfac..14071b39 100644 --- a/include/kernel/types.h +++ b/include/kernel/types.h @@ -109,6 +109,7 @@ struct thread_t { machine_thread_t machine_thread; list_node_t thread_list; thread_state_t state; + int cpu_credits; process_t *process; struct thread_t *sender; struct thread_t *awaiter; diff --git a/kernel/Makefile b/kernel/Makefile index 28f083fd..e7d1b4cf 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -98,6 +98,7 @@ sources.kernel.c = \ domain/services/ipc.c \ domain/services/logging.c \ domain/services/panic.c \ + domain/services/scheduler.c \ domain/config.c \ infrastructure/i686/drivers/vga.c \ infrastructure/i686/drivers/pic8259.c \ @@ -121,6 +122,7 @@ sources.kernel.c = \ interface/i686/auxv.c \ interface/i686/bootinfo.c \ interface/i686/interrupts.c \ + interface/i686/trap.c \ interface/syscalls.c sources.kernel.nasm = \ diff --git a/kernel/application/interrupts/tick.c b/kernel/application/interrupts/tick.c index 80842499..1887e3fb 100644 --- a/kernel/application/interrupts/tick.c +++ b/kernel/application/interrupts/tick.c @@ -30,7 +30,8 @@ */ #include +#include void tick_interrupt(void) { - /* TODO implement something here */ + scheduler_tick(); } diff --git a/kernel/application/syscalls/yield_thread.c b/kernel/application/syscalls/yield_thread.c index 22f58e2a..7302bbb9 100644 --- a/kernel/application/syscalls/yield_thread.c +++ b/kernel/application/syscalls/yield_thread.c @@ -30,8 +30,8 @@ */ #include -#include +#include void yield_thread(void) { - thread_yield_current(); + yield_current_thread(); } diff --git a/kernel/domain/entities/thread.c b/kernel/domain/entities/thread.c index 454c8397..4d494bde 100644 --- a/kernel/domain/entities/thread.c +++ b/kernel/domain/entities/thread.c @@ -31,16 +31,14 @@ #include #include -#include #include #include #include #include -#include +#include #include #include #include -#include static void free_op(object_header_t *object); @@ -59,15 +57,6 @@ static const object_type_t object_type = { /** runtime type definition for a thread */ const object_type_t *object_type_thread = &object_type; -/** ready threads queue with lock */ -static struct { - list_t queue; - spinlock_t lock; -} ready_queue = { - .queue = STATIC_LIST, - .lock = SPINLOCK_STATIC -}; - /** * Thread constructor * @@ -138,45 +127,15 @@ void thread_prepare(thread_t *thread, const thread_params_t *params) { spin_lock(&thread->await_lock); - thread->awaiter = NULL; - thread->state = THREAD_STATE_STARTING; + thread->awaiter = NULL; + thread->state = THREAD_STATE_STARTING; + thread->cpu_credits = 0; spin_unlock(&thread->await_lock); machine_prepare_thread(thread, params); } -/** - * Add a thread to the ready queue (without locking) - * - * This funtion contains the business logic for thread_ready() without the - * locking. Some functions beside thread_ready() that need to block and then - * unlock call it, hence why it is a separate function. - * - * @param thread the thread - * - */ -static void thread_ready_locked(thread_t *thread) { - thread->state = THREAD_STATE_READY; - - /* add thread to the tail of the ready list to give other threads a chance to run */ - list_enqueue(&ready_queue.queue, &thread->thread_list); -} - -/** - * Add a thread to the ready queue - * - * @param thread the thread - * - */ -void thread_ready(thread_t *thread) { - spin_lock(&ready_queue.lock); - - thread_ready_locked(thread); - - spin_unlock(&ready_queue.lock); -} - /** * Common logic for a starting thread * @@ -224,78 +183,7 @@ void thread_run_first(thread_t *thread) { void thread_run(thread_t *thread) { thread_is_starting(thread); - thread_ready(thread); -} - -/** - * Get the thread at the head of the ready queue - * - * @return thread ready to run, NULL if there are none - * - */ -static thread_t *dequeue_ready_thread(void) { - spin_lock(&ready_queue.lock); - - thread_t *thread = list_dequeue(&ready_queue.queue, thread_t, thread_list); - - spin_unlock(&ready_queue.lock); - - return thread; -} - -/** - * Get the next thread to run - * - * @param current_can_run whether the current thread can continue running - * @return thread ready to run - * - */ -static thread_t *reschedule(bool current_can_run) { - thread_t *to = dequeue_ready_thread(); - - if(to == NULL) { - /* Special case to take into account: when scheduling the first thread, - * there is no current thread. We should not call get_current_thread() - * in that case. */ - if(current_can_run) { - return get_current_thread(); - } - - /* Currently, scheduling is purely cooperative and only one CPU is - * supported (so, there are no threads currently running on other - * CPUs). What this means is that, once there are no more threads - * running or ready to run, this situation will never change. */ - panic("No thread to schedule"); - } - - return to; -} - -/** - * Yield the current thread - * - * The current thread is added at the tail of the ready queue. It continues - * running if no other thread is ready to run. - */ -void thread_yield_current(void) { - thread_t *current = get_current_thread(); - thread_t *to = reschedule(true); - - if(to == current) { - return; - } - - to->state = THREAD_STATE_RUNNING; - - if(current->process != to->process) { - process_switch_to(to->process); - } - - spin_lock(&ready_queue.lock); - - thread_ready_locked(current); - - machine_switch_thread_and_unlock(current, to, &ready_queue.lock); + ready_thread(thread); } /** @@ -315,7 +203,7 @@ void thread_terminate_current(void) { current->state = THREAD_STATE_ZOMBIE; if(current->awaiter != NULL) { - thread_ready(current->awaiter); + ready_thread(current->awaiter); } spin_unlock(¤t->await_lock); @@ -325,94 +213,7 @@ void thread_terminate_current(void) { current->sender = NULL; } - thread_t *to = reschedule(false); - to->state = THREAD_STATE_RUNNING; - - if(current->process != to->process) { - process_switch_to(to->process); - } - - /* This must be done after switching process since it will destroy the process - * if the current thread is the last one. We don't want to destroy the address - * space we are still running in... */ - process_remove_running_thread(current->process); - - /* This function takes care of safely decrementing the reference count on - * the thread after having switched to the other one. We cannot just do it - * here because that will possibly free the current thread, which we don't - * want to do while it is still running. */ - machine_switch_and_unref_thread(current, to); -} - -/** - * Switch to another thread - * - * The current thread remains ready to run and is added to the ready queue. - * - * @param to thread to switch to - * - */ -void thread_switch_to(thread_t *to) { - thread_t *current = get_current_thread(); - - to->state = THREAD_STATE_RUNNING; - - if(current->process != to->process) { - process_switch_to(to->process); - } - - spin_lock(&ready_queue.lock); - - thread_ready_locked(current); - - machine_switch_thread_and_unlock(current, to, &ready_queue.lock); -} - -/** - * Switch to another thread and block the current thread - * - * @param to thread to switch to - * - */ -void thread_switch_to_and_block(thread_t *to) { - thread_t *current = get_current_thread(); - current->state = THREAD_STATE_BLOCKED; - to->state = THREAD_STATE_RUNNING; - - if(current->process != to->process) { - process_switch_to(to->process); - } - - machine_switch_thread(current, to); -} - -/** - * Block the current thread and then unlock a lock - * - * The lock is unlocked *after* the switch to another thread. This function - * eliminates race conditions when enqueuing the current thread to a queue, - * setting it as the awaiter of another thread, etc. and then blocking, if - * the following sequence is followed: - * - * 1. Take the lock (e.g. the lock protecting a queue). - * 2. Add the thread (e.g. to the queue). - * 3. Call this function to block the thread and release the lock atomically. - * - * @param lock the lock to unlock after switching thread - * - */ -void thread_block_current_and_unlock(spinlock_t *lock) { - thread_t *current = get_current_thread(); - current->state = THREAD_STATE_BLOCKED; - - thread_t *to = reschedule(false); - to->state = THREAD_STATE_RUNNING; - - if(current->process != to->process) { - process_switch_to(to->process); - } - - machine_switch_thread_and_unlock(current, to, lock); + switch_from_exiting_thread(); } /** @@ -441,7 +242,7 @@ int thread_await(thread_t *thread) { if(thread->state == THREAD_STATE_ZOMBIE) { spin_unlock(&thread->await_lock); } else { - thread_block_current_and_unlock(&thread->await_lock); + block_current_thread_and_unlock(&thread->await_lock); } return 0; diff --git a/kernel/domain/services/ipc.c b/kernel/domain/services/ipc.c index 647b9fc8..87adad60 100644 --- a/kernel/domain/services/ipc.c +++ b/kernel/domain/services/ipc.c @@ -34,8 +34,8 @@ #include #include #include -#include #include +#include #include #include #include @@ -241,14 +241,14 @@ int send_message( if(receiver == NULL) { /* No thread is waiting to receive this message, so we must wait on the sender list. */ list_enqueue(&endpoint->send_list, &sender->thread_list); - thread_block_current_and_unlock(&endpoint->lock); + block_current_thread_and_unlock(&endpoint->lock); } else { spin_unlock(&endpoint->lock); receiver->sender = sender; /* switch to receiver thread, which will resume inside syscall_receive() */ - thread_switch_to_and_block(receiver); + switch_to_thread_and_block(receiver); } if(sender->message_errno == JINUE_EPROTO) { @@ -307,7 +307,7 @@ int receive_message(ipc_endpoint_t *endpoint, thread_t *receiver, jinue_message_ if(sender == NULL) { /* No thread is waiting to send a message, so we must wait on the receive list. */ list_enqueue(&endpoint->recv_list, &receiver->thread_list); - thread_block_current_and_unlock(&endpoint->lock); + block_current_thread_and_unlock(&endpoint->lock); /* set by sending thread */ sender = receiver->sender; @@ -327,7 +327,7 @@ int receive_message(ipc_endpoint_t *endpoint, thread_t *receiver, jinue_message_ sender->message_errno = JINUE_E2BIG; receiver->sender = NULL; - thread_ready(sender); + ready_thread(sender); continue; } @@ -382,7 +382,7 @@ int reply_to_message(thread_t *replier, const jinue_message_t *message) { replier->sender = NULL; /* switch back to sender thread to return from call immediately */ - thread_switch_to(replyto); + switch_to_thread(replyto); return 0; } @@ -410,7 +410,7 @@ int reply_error_to_message(thread_t *replier, uintptr_t errcode) { replier->sender = NULL; /* switch back to sender thread to return from call immediately */ - thread_switch_to(replyto); + switch_to_thread(replyto); return 0; } @@ -431,5 +431,5 @@ int reply_error_to_message(thread_t *replier, uintptr_t errcode) { */ void abort_message(thread_t *thread) { thread->message_errno = JINUE_EIO; - thread_ready(thread); + ready_thread(thread); } diff --git a/kernel/domain/services/scheduler.c b/kernel/domain/services/scheduler.c new file mode 100644 index 00000000..287c4692 --- /dev/null +++ b/kernel/domain/services/scheduler.c @@ -0,0 +1,273 @@ +/* + * Copyright (C) 2019-2024 Philippe Aubertin. + * All rights reserved. + + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the author nor the names of other contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include + +/** ready threads queue with lock */ +static struct { + list_t queue; + spinlock_t lock; +} ready_queue = { + .queue = STATIC_LIST, + .lock = SPINLOCK_STATIC +}; + +/** + * Get the thread at the head of the ready queue + * + * @return thread ready to run, NULL if there are none + * + */ +static thread_t *dequeue_ready_thread(void) { + spin_lock(&ready_queue.lock); + + thread_t *thread = list_dequeue(&ready_queue.queue, thread_t, thread_list); + + spin_unlock(&ready_queue.lock); + + return thread; +} + +/** + * Get the next thread to run + * + * @param current_can_run whether the current thread can continue running + * @return thread ready to run + * + */ +static thread_t *select_next_ready_thread(bool current_can_run) { + thread_t *to = dequeue_ready_thread(); + + if(to == NULL && current_can_run) { + to = get_current_thread(); + } + + if(to == NULL) { + /* Currently, scheduling is purely cooperative and only one CPU is + * supported (so, there are no threads currently running on other + * CPUs). What this means is that, once there are no more threads + * running or ready to run, this situation will never change. */ + panic("No thread to schedule"); + } + + to->cpu_credits += SCHEDULER_BASE_CREDITS; + + return to; +} + +/** + * Add a thread to the ready queue (without locking) + * + * This funtion contains the business logic for ready_thread() without the + * locking. Some functions beside ready_thread() that need to block and then + * unlock call it, hence why it is a separate function. + * + * @param thread the thread + * + */ +static void thread_ready_locked(thread_t *thread) { + thread->state = THREAD_STATE_READY; + + /* add thread to the tail of the ready list to give other threads a chance to run */ + list_enqueue(&ready_queue.queue, &thread->thread_list); +} + + +/** + * Preempt the current thread if it's time + */ +void reschedule(void) { + thread_t *current = get_current_thread(); + + if(current->cpu_credits > 0) { + return; + } + + thread_t *to = select_next_ready_thread(true); + + if(to == current) { + return; + } + + to->state = THREAD_STATE_RUNNING; + + if(current->process != to->process) { + process_switch_to(to->process); + } + + spin_lock(&ready_queue.lock); + + thread_ready_locked(current); + + machine_switch_thread_and_unlock(current, to, &ready_queue.lock); +} + +/** + * Decrement current thread CPU credits due to timer tick + */ +void scheduler_tick(void) { + thread_t *current = get_current_thread(); + + if(current->cpu_credits > 0) { + --current->cpu_credits; + } +} + +/** + * Add a thread to the ready queue + * + * @param thread the thread + * + */ +void ready_thread(thread_t *thread) { + spin_lock(&ready_queue.lock); + + thread_ready_locked(thread); + + spin_unlock(&ready_queue.lock); +} + +/** + * Yield the current thread + * + * The current thread is added at the tail of the ready queue. It continues + * running if no other thread is ready to run. + */ +void yield_current_thread(void) { + /* This defers the thread switch to the next time reschedule() is called, + * which will happen at the end of the system call. */ + thread_t *current = get_current_thread(); + current->cpu_credits = 0; +} + +/** + * Switch to another thread + * + * The current thread remains ready to run and is added to the ready queue. + * + * @param to thread to switch to + * + */ +void switch_to_thread(thread_t *to) { + thread_t *current = get_current_thread(); + + to->state = THREAD_STATE_RUNNING; + + if(current->process != to->process) { + process_switch_to(to->process); + } + + spin_lock(&ready_queue.lock); + + thread_ready_locked(current); + + machine_switch_thread_and_unlock(current, to, &ready_queue.lock); +} + +/** + * Switch to another thread and block the current thread + * + * @param to thread to switch to + * + */ +void switch_to_thread_and_block(thread_t *to) { + thread_t *current = get_current_thread(); + current->state = THREAD_STATE_BLOCKED; + to->state = THREAD_STATE_RUNNING; + + if(current->process != to->process) { + process_switch_to(to->process); + } + + machine_switch_thread(current, to); +} + +/** + * Block the current thread and then unlock a lock + * + * The lock is unlocked *after* the switch to another thread. This function + * eliminates race conditions when enqueuing the current thread to a queue, + * setting it as the awaiter of another thread, etc. and then blocking, if + * the following sequence is followed: + * + * 1. Take the lock (e.g. the lock protecting a queue). + * 2. Add the thread (e.g. to the queue). + * 3. Call this function to block the thread and release the lock atomically. + * + * @param lock the lock to unlock after switching thread + * + */ +void block_current_thread_and_unlock(spinlock_t *lock) { + thread_t *current = get_current_thread(); + current->state = THREAD_STATE_BLOCKED; + + thread_t *to = select_next_ready_thread(false); + to->state = THREAD_STATE_RUNNING; + + if(current->process != to->process) { + process_switch_to(to->process); + } + + machine_switch_thread_and_unlock(current, to, lock); +} + +/** + * Switch away from exiting thread + * + * This must be done with care since both the current process and thread might + * be destroyed and/or freed while doing this. + */ +void switch_from_exiting_thread(void) { + thread_t *current = get_current_thread(); + + thread_t *to = select_next_ready_thread(false); + to->state = THREAD_STATE_RUNNING; + + if(current->process != to->process) { + process_switch_to(to->process); + } + + /* This must be done after switching process since it will destroy the process + * if the current thread is the last one. We don't want to destroy the address + * space we are still running in... */ + process_remove_running_thread(current->process); + + /* This function takes care of safely decrementing the reference count on + * the thread after having switched to the other one. We cannot just do it + * here because that will possibly free the current thread, which we don't + * want to do while it is still running. */ + machine_switch_and_unref_thread(current, to); +} diff --git a/kernel/interface/i686/interrupts.c b/kernel/interface/i686/interrupts.c index e741c85d..bf7ca7ac 100644 --- a/kernel/interface/i686/interrupts.c +++ b/kernel/interface/i686/interrupts.c @@ -29,7 +29,6 @@ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include #include #include #include @@ -39,7 +38,6 @@ #include #include #include -#include #include @@ -82,10 +80,7 @@ static void handle_unexpected_interrupt(unsigned int ivt) { void handle_interrupt(trapframe_t *trapframe) { unsigned int ivt = trapframe->ivt; - if(ivt == JINUE_I686_SYSCALL_INTERRUPT) { - jinue_syscall_args_t *args = (jinue_syscall_args_t *)&trapframe->msg_arg0; - handle_syscall(args); - } else if(ivt <= IDT_LAST_EXCEPTION) { + if(ivt <= IDT_LAST_EXCEPTION) { handle_exception(ivt, trapframe->eip, trapframe->errcode); } else if(ivt >= IDT_PIC8259_BASE && ivt < IDT_PIC8259_BASE + PIC8259_IRQ_COUNT) { handle_hardware_interrupt(ivt); diff --git a/kernel/interface/i686/trap.asm b/kernel/interface/i686/trap.asm index 9d1c8b2a..020545c5 100644 --- a/kernel/interface/i686/trap.asm +++ b/kernel/interface/i686/trap.asm @@ -27,6 +27,7 @@ ; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +#include #include #include #include @@ -36,8 +37,7 @@ bits 32 - extern handle_interrupt - extern handle_syscall + extern handle_trap ; ------------------------------------------------------------------------------ ; FUNCTION: interrupt_entry @@ -131,11 +131,11 @@ interrupt_entry: mov eax, SEG_SELECTOR(GDT_PER_CPU_DATA, RPL_KERNEL) mov gs, ax - ; set handle_interrupt() function argument + ; set handle_trap() function argument push esp ; First argument: trapframe ; call interrupt dispatching function - call handle_interrupt + call handle_trap ; remove argument(s) from stack add esp, 4 @@ -185,7 +185,11 @@ fast_intel_entry: mov ebp, 0 ; setup dummy frame pointer push byte 0 ; 48 ebp (caller-saved by kernel calling convention) - push byte 0 ; 44 interrupt vector (unused) + + ; 44 interrupt vector + ; + ; This interrupt vector value tells handle_trap() this is a system call. + push dword JINUE_I686_SYSCALL_INTERRUPT push byte 0 ; 40 error code (unused) push gs ; 36 push fs ; 32 @@ -211,16 +215,12 @@ fast_intel_entry: mov eax, SEG_SELECTOR(GDT_PER_CPU_DATA, RPL_KERNEL) mov gs, ax - ; set handle_syscall() function argument - ; - ; The message arguments, a ponter to which handle_syscall() takes - ; as argument are at the beginning of the trap frame, so we can just - ; pass the address of the trap frame. - push esp ; First argument: message arguments + ; set handle_trap() function argument + push esp ; First argument: trapframe - call handle_syscall + call handle_trap - ; cleanup handle_syscall() argument + ; cleanup handle_trap() argument add esp, 4 pop eax ; 0 @@ -287,7 +287,11 @@ fast_amd_entry: mov ebp, 0 ; setup dummy frame pointer push byte 0 ; 48 ebp (caller-saved by kernel calling convention) - push byte 0 ; 44 interrupt vector (unused) + + ; 44 interrupt vector + ; + ; This interrupt vector value tells handle_trap() this is a system call. + push dword JINUE_I686_SYSCALL_INTERRUPT push byte 0 ; 40 error code (unused) push byte 0 ; 36 gs (caller-saved by kernel calling convention) push fs ; 32 @@ -309,16 +313,12 @@ fast_amd_entry: mov ds, cx mov es, cx - ; set handle_syscall() function argument - ; - ; The message arguments, a ponter to which handle_syscall() takes - ; as argument are at the beginning of the trap frame, so we can just - ; pass the address of the trap frame. - push esp ; First argument: message arguments + ; set handle_trap() function argument + push esp ; First argument: trapframe - call handle_syscall + call handle_trap - ; cleanup handle_syscall() argument + ; cleanup handle_trap() argument add esp, 4 pop eax ; 0 diff --git a/kernel/interface/i686/trap.c b/kernel/interface/i686/trap.c new file mode 100644 index 00000000..c0b049c8 --- /dev/null +++ b/kernel/interface/i686/trap.c @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2024 Philippe Aubertin. + * All rights reserved. + + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the author nor the names of other contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include + +void handle_trap(trapframe_t *trapframe) { + if(trapframe->ivt == JINUE_I686_SYSCALL_INTERRUPT) { + handle_syscall(trapframe_syscall_args(trapframe)); + } else { + handle_interrupt(trapframe); + } + + reschedule(); +}