From 4dd42c367fa159117cf7528dae99b65004aa79c1 Mon Sep 17 00:00:00 2001 From: llenotre Date: Sat, 28 Feb 2026 15:46:42 +0100 Subject: [PATCH] feat: `user_zero` --- kernel/arch/x86/src/raw_copy.s | 23 ++++++++++++++--- kernel/arch/x86_64/src/raw_copy.s | 15 ++++++++--- kernel/src/device/default.rs | 7 +----- kernel/src/memory/user.rs | 41 +++++++++++++++++++++++++++++-- kernel/src/process/exec/elf.rs | 12 ++++----- kernel/src/process/mod.rs | 8 +++--- 6 files changed, 81 insertions(+), 25 deletions(-) diff --git a/kernel/arch/x86/src/raw_copy.s b/kernel/arch/x86/src/raw_copy.s index f11b34629..156172b65 100644 --- a/kernel/arch/x86/src/raw_copy.s +++ b/kernel/arch/x86/src/raw_copy.s @@ -23,9 +23,11 @@ .section .text .global raw_copy -.global copy_fault +.global raw_zero +.global raw_fault + +// The order of functions is important for bound checking in the exception handler -// TODO can be optimized raw_copy: push esi push edi @@ -41,7 +43,22 @@ raw_copy: mov eax, 1 ret -copy_fault: +raw_zero: + push esi + push edi + + mov edi, 12[esp] + mov ecx, 16[esp] + + xor eax, eax + rep stosb + + pop edi + pop esi + mov eax, 1 + ret + +raw_fault: pop edi pop esi xor eax, eax diff --git a/kernel/arch/x86_64/src/raw_copy.s b/kernel/arch/x86_64/src/raw_copy.s index 47967df24..79b2030dd 100644 --- a/kernel/arch/x86_64/src/raw_copy.s +++ b/kernel/arch/x86_64/src/raw_copy.s @@ -23,15 +23,24 @@ .section .text .global raw_copy -.global copy_fault +.global raw_zero +.global raw_fault + +// The order of functions is important for bound checking in the exception handler -// TODO can be optimized raw_copy: mov rcx, rdx rep movsb mov rax, 1 ret -copy_fault: +raw_zero: + mov rcx, rsi + xor rax, rax + rep stosb + mov rax, 1 + ret + +raw_fault: xor rax, rax ret diff --git a/kernel/src/device/default.rs b/kernel/src/device/default.rs index a491c0a9d..83200257d 100644 --- a/kernel/src/device/default.rs +++ b/kernel/src/device/default.rs @@ -50,12 +50,7 @@ pub struct ZeroDeviceHandle; impl FileOps for ZeroDeviceHandle { fn read(&self, _file: &File, _: u64, buf: UserSlice) -> EResult { - let b: [u8; 128] = [0; 128]; - let mut i = 0; - while i < buf.len() { - i += buf.copy_to_user(i, &b)?; - } - Ok(buf.len()) + buf.zero(0, buf.len()) } fn write(&self, _file: &File, _: u64, buf: UserSlice) -> EResult { diff --git a/kernel/src/memory/user.rs b/kernel/src/memory/user.rs index bb8c4ade8..aff46330d 100644 --- a/kernel/src/memory/user.rs +++ b/kernel/src/memory/user.rs @@ -38,8 +38,11 @@ use utils::{ unsafe extern "C" { /// Copy, with access check. On success, the function returns `true`. pub fn raw_copy(dst: *mut u8, src: *const u8, n: usize) -> bool; - /// Function to be called back when a page fault occurs while using [`raw_copy`]. - pub fn copy_fault(); + /// Zero a range of memory, with page fault handling. On success, the function returns `true`. + pub fn raw_zero(dst: *mut u8, n: usize) -> bool; + + /// Function called back when a page fault occurs while using [`raw_copy`] or [`raw_zero`]. + pub fn raw_fault(); } /// Low level function to copy data from userspace to kernelspace, with access check. @@ -54,6 +57,22 @@ unsafe fn user_copy(src: *const u8, dst: *mut u8, n: usize) -> EResult<()> { } } +/// Low level function to zero a range of memory, with page fault handling. +/// +/// If the access check fails, the function returns [`EFAULT`]. +/// +/// # Safety +/// +/// The caller must ensure that `dst` points to valid memory that can be written to. +unsafe fn user_zero(dst: *mut u8, n: usize) -> EResult<()> { + let res = vmem::smap_disable(|| raw_zero(dst, n)); + if likely(res) { + Ok(()) + } else { + Err(errno!(EFAULT)) + } +} + /// Wrapper for an userspace pointer. #[derive(Clone, Copy)] pub struct UserPtr(pub Option>); @@ -321,6 +340,24 @@ impl<'a, T: Sized + fmt::Debug> UserSlice<'a, T> { pub fn copy_to_user(&self, off: usize, buf: &[T]) -> EResult { unsafe { self.copy_to_user_raw(off, buf.as_ptr(), buf.len()) } } + + /// Zeros the portion of the slice starting at offset `off`, with length `len`. + /// + /// The function returns the number of elements written. + /// + /// If the pointer is null, the function does nothing and returns `0`. + /// + /// If the slice is not accessible, the function returns an error. + pub fn zero(&self, off: usize, len: usize) -> EResult { + let Some(ptr) = self.ptr else { + return Ok(0); + }; + let len = min(len, self.len.saturating_sub(off)); + unsafe { + user_zero(ptr.as_ptr().add(off) as *mut _, size_of::() * len)?; + } + Ok(len) + } } impl fmt::Debug for UserSlice<'_, T> { diff --git a/kernel/src/process/exec/elf.rs b/kernel/src/process/exec/elf.rs index c80772224..3658ba8b5 100644 --- a/kernel/src/process/exec/elf.rs +++ b/kernel/src/process/exec/elf.rs @@ -30,7 +30,7 @@ use crate::{ perm::{AccessProfile, can_execute_file}, vfs, }, - memory::{COMPAT_PROCESS_END, PROCESS_END, VirtAddr, vmem}, + memory::{COMPAT_PROCESS_END, PROCESS_END, VirtAddr, user::UserSlice, vmem}, process::{ USER_STACK_SIZE, exec::{ProgramImage, vdso::MappedVDSO}, @@ -40,7 +40,7 @@ use crate::{ }, }, }; -use core::{cmp::max, hint::unlikely, num::NonZeroUsize, ops::Add, ptr, slice}; +use core::{cmp::max, hint::unlikely, num::NonZeroUsize, ops::Add, ptr}; use utils::{ collections::{path::Path, string::String, vec::Vec}, errno, @@ -277,11 +277,9 @@ fn map_segment( // Zero the end of the last page if needed let begin = load_base + seg.p_vaddr as usize + seg.p_filesz as usize; let len = begin.next_multiple_of(PAGE_SIZE) - begin.0; - if len > 0 { - unsafe { - let slice = slice::from_raw_parts_mut(begin.as_ptr::(), len); - vmem::write_ro(|| vmem::smap_disable(|| slice.fill(0))); - } + unsafe { + let slice = UserSlice::from_user(begin.as_ptr::(), len)?; + vmem::write_ro(|| slice.zero(0, len))?; } } // Add zero pages at the end if needed diff --git a/kernel/src/process/mod.rs b/kernel/src/process/mod.rs index 3ec3bf4c0..f5ac71f5e 100644 --- a/kernel/src/process/mod.rs +++ b/kernel/src/process/mod.rs @@ -425,10 +425,10 @@ pub(crate) fn init() -> EResult<()> { Ok(true) => {} Ok(false) => { if ring < 3 { - // Check if the fault was caused by a user <-> kernel copy - if (user::raw_copy as usize..user::copy_fault as usize).contains(&pc) { - // Jump to `copy_fault` - frame.set_program_counter(user::copy_fault as usize); + // Check if the fault was caused by a user <-> kernel copy/zero + if (user::raw_copy as usize..user::raw_fault as usize).contains(&pc) { + // Jump to `raw_fault` + frame.set_program_counter(user::raw_fault as usize); } else { return CallbackResult::Panic; }