Skip to content

Commit

Permalink
Merge branch 'patch-add-file-mapping' into feat-dynamic-link
Browse files Browse the repository at this point in the history
  • Loading branch information
MemoryShore committed Aug 28, 2024
2 parents 1a627e4 + d6a5341 commit baae979
Show file tree
Hide file tree
Showing 33 changed files with 1,659 additions and 340 deletions.
5 changes: 3 additions & 2 deletions kernel/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,8 @@ wait_queue_macros = { path = "crates/wait_queue_macros" }
paste = "=1.0.14"
slabmalloc = { path = "crates/rust-slabmalloc" }
log = "0.4.21"

xarray = "0.1.0"
lru = "0.12.3"

# target为x86_64时,使用下面的依赖
[target.'cfg(target_arch = "x86_64")'.dependencies]
Expand Down Expand Up @@ -88,4 +89,4 @@ debug = true # Controls whether the compiler passes `-g`

# The release profile, used for `cargo build --release`
[profile.release]
debug = false
debug = true
74 changes: 70 additions & 4 deletions kernel/src/arch/riscv64/mm/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,9 @@ use crate::{
page_frame::{FrameAllocator, PageFrameCount, PageFrameUsage, PhysPageFrame},
},
kernel_mapper::KernelMapper,
page::{PageEntry, PageFlags, PAGE_1G_SHIFT},
page::{EntryFlags, PageEntry, PAGE_1G_SHIFT},
ucontext::UserMapper,
MemoryManagementArch, PageTableKind, PhysAddr, VirtAddr,
MemoryManagementArch, PageTableKind, PhysAddr, VirtAddr, VmFlags,
},
smp::cpu::ProcessorId,
};
Expand Down Expand Up @@ -256,8 +256,74 @@ impl MemoryManagementArch for RiscV64MMArch {
) -> bool {
true
}

const PAGE_NONE: usize = Self::ENTRY_FLAG_GLOBAL | Self::ENTRY_FLAG_READONLY;

const PAGE_READ: usize = PAGE_ENTRY_BASE | Self::ENTRY_FLAG_READONLY;

const PAGE_WRITE: usize =
PAGE_ENTRY_BASE | Self::ENTRY_FLAG_READONLY | Self::ENTRY_FLAG_WRITEABLE;

const PAGE_EXEC: usize = PAGE_ENTRY_BASE | Self::ENTRY_FLAG_EXEC;

const PAGE_READ_EXEC: usize =
PAGE_ENTRY_BASE | Self::ENTRY_FLAG_READONLY | Self::ENTRY_FLAG_EXEC;

const PAGE_WRITE_EXEC: usize = PAGE_ENTRY_BASE
| Self::ENTRY_FLAG_READONLY
| Self::ENTRY_FLAG_EXEC
| Self::ENTRY_FLAG_WRITEABLE;

const PAGE_COPY: usize = Self::PAGE_READ;
const PAGE_COPY_EXEC: usize = Self::PAGE_READ_EXEC;
const PAGE_SHARED: usize = Self::PAGE_WRITE;
const PAGE_SHARED_EXEC: usize = Self::PAGE_WRITE_EXEC;

const PAGE_COPY_NOEXEC: usize = 0;
const PAGE_READONLY: usize = 0;
const PAGE_READONLY_EXEC: usize = 0;

const PROTECTION_MAP: [EntryFlags<MMArch>; 16] = protection_map();
}

const fn protection_map() -> [EntryFlags<MMArch>; 16] {
let mut map = [0; 16];
map[VmFlags::VM_NONE.bits()] = MMArch::PAGE_NONE;
map[VmFlags::VM_READ.bits()] = MMArch::PAGE_READONLY;
map[VmFlags::VM_WRITE.bits()] = MMArch::PAGE_COPY;
map[VmFlags::VM_WRITE.bits() | VmFlags::VM_READ.bits()] = MMArch::PAGE_COPY;
map[VmFlags::VM_EXEC.bits()] = MMArch::PAGE_READONLY_EXEC;
map[VmFlags::VM_EXEC.bits() | VmFlags::VM_READ.bits()] = MMArch::PAGE_READONLY_EXEC;
map[VmFlags::VM_EXEC.bits() | VmFlags::VM_WRITE.bits()] = MMArch::PAGE_COPY_EXEC;
map[VmFlags::VM_EXEC.bits() | VmFlags::VM_WRITE.bits() | VmFlags::VM_READ.bits()] =
MMArch::PAGE_COPY_EXEC;
map[VmFlags::VM_SHARED.bits()] = MMArch::PAGE_NONE;
map[VmFlags::VM_SHARED.bits() | VmFlags::VM_READ.bits()] = MMArch::PAGE_READONLY;
map[VmFlags::VM_SHARED.bits() | VmFlags::VM_WRITE.bits()] = MMArch::PAGE_SHARED;
map[VmFlags::VM_SHARED.bits() | VmFlags::VM_WRITE.bits() | VmFlags::VM_READ.bits()] =
MMArch::PAGE_SHARED;
map[VmFlags::VM_SHARED.bits() | VmFlags::VM_EXEC.bits()] = MMArch::PAGE_READONLY_EXEC;
map[VmFlags::VM_SHARED.bits() | VmFlags::VM_EXEC.bits() | VmFlags::VM_READ.bits()] =
MMArch::PAGE_READONLY_EXEC;
map[VmFlags::VM_SHARED.bits() | VmFlags::VM_EXEC.bits() | VmFlags::VM_WRITE.bits()] =
MMArch::PAGE_SHARED_EXEC;
map[VmFlags::VM_SHARED.bits()
| VmFlags::VM_EXEC.bits()
| VmFlags::VM_WRITE.bits()
| VmFlags::VM_READ.bits()] = MMArch::PAGE_SHARED_EXEC;
let mut ret = [unsafe { EntryFlags::from_data(0) }; 16];
let mut index = 0;
while index < 16 {
ret[index] = unsafe { EntryFlags::from_data(map[index]) };
index += 1;
}
ret
}

const PAGE_ENTRY_BASE: usize = RiscV64MMArch::ENTRY_FLAG_PRESENT
| RiscV64MMArch::ENTRY_FLAG_ACCESSED
| RiscV64MMArch::ENTRY_FLAG_USER;

impl VirtAddr {
/// 判断虚拟地址是否合法
#[inline(always)]
Expand All @@ -270,8 +336,8 @@ impl VirtAddr {
}

/// 获取内核地址默认的页面标志
pub unsafe fn kernel_page_flags<A: MemoryManagementArch>(_virt: VirtAddr) -> PageFlags<A> {
PageFlags::from_data(RiscV64MMArch::ENTRY_FLAG_DEFAULT_PAGE)
pub unsafe fn kernel_page_flags<A: MemoryManagementArch>(_virt: VirtAddr) -> EntryFlags<A> {
EntryFlags::from_data(RiscV64MMArch::ENTRY_FLAG_DEFAULT_PAGE)
.set_user(false)
.set_execute(true)
}
Expand Down
4 changes: 2 additions & 2 deletions kernel/src/arch/x86_64/kvm/vmx/ept.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
use crate::arch::mm::LockedFrameAllocator;
use crate::arch::mm::PageMapper;
use crate::arch::MMArch;
use crate::mm::page::PageFlags;
use crate::mm::page::EntryFlags;
use crate::mm::{PageTableKind, PhysAddr, VirtAddr};
use crate::smp::core::smp_get_processor_id;
use crate::smp::cpu::AtomicProcessorId;
Expand Down Expand Up @@ -92,7 +92,7 @@ impl EptMapper {
&mut self,
gpa: u64,
hpa: u64,
flags: PageFlags<MMArch>,
flags: EntryFlags<MMArch>,
) -> Result<(), SystemError> {
if self.readonly {
return Err(SystemError::EAGAIN_OR_EWOULDBLOCK);
Expand Down
4 changes: 2 additions & 2 deletions kernel/src/arch/x86_64/kvm/vmx/mmu.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
use crate::{
arch::kvm::vmx::ept::EptMapper,
libs::mutex::Mutex,
mm::{page::PageFlags, syscall::ProtFlags},
mm::{page::EntryFlags, syscall::ProtFlags},
virt::kvm::host_mem::{__gfn_to_pfn, kvm_vcpu_gfn_to_memslot, PAGE_MASK, PAGE_SHIFT},
};
use bitfield_struct::bitfield;
Expand Down Expand Up @@ -218,7 +218,7 @@ pub fn __direct_map(
}
// 把gpa映射到hpa
let mut ept_mapper = EptMapper::lock();
let page_flags = PageFlags::from_prot_flags(ProtFlags::from_bits_truncate(0x7_u64), false);
let page_flags = EntryFlags::from_prot_flags(ProtFlags::from_bits_truncate(0x7_u64), false);
unsafe {
assert!(ept_mapper.walk(gpa, pfn << PAGE_SHIFT, page_flags).is_ok());
}
Expand Down
12 changes: 5 additions & 7 deletions kernel/src/arch/x86_64/mm/fault.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ pub type PageMapper =

impl X86_64MMArch {
pub fn vma_access_error(vma: Arc<LockedVMA>, error_code: X86PfErrorCode) -> bool {
let vm_flags = *vma.lock().vm_flags();
let vm_flags = *vma.lock_irqsave().vm_flags();
let foreign = false;
if error_code.contains(X86PfErrorCode::X86_PF_PK) {
return true;
Expand Down Expand Up @@ -223,7 +223,7 @@ impl X86_64MMArch {
}

let current_address_space: Arc<AddressSpace> = AddressSpace::current().unwrap();
let mut space_guard = current_address_space.write();
let mut space_guard = current_address_space.write_irqsave();
let mut fault;
loop {
let vma = space_guard.mappings.find_nearest(address);
Expand All @@ -236,7 +236,7 @@ impl X86_64MMArch {
address.data(),
)
});
let guard = vma.lock();
let guard = vma.lock_irqsave();
let region = *guard.region();
let vm_flags = *guard.vm_flags();
drop(guard);
Expand Down Expand Up @@ -269,11 +269,9 @@ impl X86_64MMArch {
);
}
let mapper = &mut space_guard.user_mapper.utable;
let message = PageFaultMessage::new(vma.clone(), address, flags, mapper);

fault = PageFaultHandler::handle_mm_fault(
PageFaultMessage::new(vma.clone(), address, flags),
mapper,
);
fault = PageFaultHandler::handle_mm_fault(message);

if fault.contains(VmFaultReason::VM_FAULT_COMPLETED) {
return;
Expand Down
99 changes: 93 additions & 6 deletions kernel/src/arch/x86_64/mm/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@ use crate::{
};

use crate::mm::kernel_mapper::KernelMapper;
use crate::mm::page::{PageEntry, PageFlags, PAGE_1G_SHIFT};
use crate::mm::{MemoryManagementArch, PageTableKind, PhysAddr, VirtAddr};
use crate::mm::page::{EntryFlags, PageEntry, PAGE_1G_SHIFT};
use crate::mm::{MemoryManagementArch, PageTableKind, PhysAddr, VirtAddr, VmFlags};

use system_error::SystemError;

Expand Down Expand Up @@ -326,6 +326,93 @@ impl MemoryManagementArch for X86_64MMArch {
}
pkru::pkru_allows_pkey(pkru::vma_pkey(vma), write)
}

const PROTECTION_MAP: [EntryFlags<MMArch>; 16] = protection_map();

const PAGE_NONE: usize =
Self::ENTRY_FLAG_PRESENT | Self::ENTRY_FLAG_ACCESSED | Self::ENTRY_FLAG_GLOBAL;

const PAGE_SHARED: usize = Self::ENTRY_FLAG_PRESENT
| Self::ENTRY_FLAG_READWRITE
| Self::ENTRY_FLAG_USER
| Self::ENTRY_FLAG_ACCESSED
| Self::ENTRY_FLAG_NO_EXEC;

const PAGE_SHARED_EXEC: usize = Self::ENTRY_FLAG_PRESENT
| Self::ENTRY_FLAG_READWRITE
| Self::ENTRY_FLAG_USER
| Self::ENTRY_FLAG_ACCESSED;

const PAGE_COPY_NOEXEC: usize = Self::ENTRY_FLAG_PRESENT
| Self::ENTRY_FLAG_USER
| Self::ENTRY_FLAG_ACCESSED
| Self::ENTRY_FLAG_NO_EXEC;

const PAGE_COPY_EXEC: usize =
Self::ENTRY_FLAG_PRESENT | Self::ENTRY_FLAG_USER | Self::ENTRY_FLAG_ACCESSED;

const PAGE_COPY: usize = Self::ENTRY_FLAG_PRESENT
| Self::ENTRY_FLAG_USER
| Self::ENTRY_FLAG_ACCESSED
| Self::ENTRY_FLAG_NO_EXEC;

const PAGE_READONLY: usize = Self::ENTRY_FLAG_PRESENT
| Self::ENTRY_FLAG_USER
| Self::ENTRY_FLAG_ACCESSED
| Self::ENTRY_FLAG_NO_EXEC;

const PAGE_READONLY_EXEC: usize =
Self::ENTRY_FLAG_PRESENT | Self::ENTRY_FLAG_USER | Self::ENTRY_FLAG_ACCESSED;

const PAGE_READ: usize = 0;
const PAGE_READ_EXEC: usize = 0;
const PAGE_WRITE: usize = 0;
const PAGE_WRITE_EXEC: usize = 0;
const PAGE_EXEC: usize = 0;
}

/// 获取保护标志的映射表
///
///
/// ## 返回值
/// - `[usize; 16]`: 长度为16的映射表
const fn protection_map() -> [EntryFlags<MMArch>; 16] {
let mut map = [unsafe { EntryFlags::from_data(0) }; 16];
unsafe {
map[VmFlags::VM_NONE.bits()] = EntryFlags::from_data(MMArch::PAGE_NONE);
map[VmFlags::VM_READ.bits()] = EntryFlags::from_data(MMArch::PAGE_READONLY);
map[VmFlags::VM_WRITE.bits()] = EntryFlags::from_data(MMArch::PAGE_COPY);
map[VmFlags::VM_WRITE.bits() | VmFlags::VM_READ.bits()] =
EntryFlags::from_data(MMArch::PAGE_COPY);
map[VmFlags::VM_EXEC.bits()] = EntryFlags::from_data(MMArch::PAGE_READONLY_EXEC);
map[VmFlags::VM_EXEC.bits() | VmFlags::VM_READ.bits()] =
EntryFlags::from_data(MMArch::PAGE_READONLY_EXEC);
map[VmFlags::VM_EXEC.bits() | VmFlags::VM_WRITE.bits()] =
EntryFlags::from_data(MMArch::PAGE_COPY_EXEC);
map[VmFlags::VM_EXEC.bits() | VmFlags::VM_WRITE.bits() | VmFlags::VM_READ.bits()] =
EntryFlags::from_data(MMArch::PAGE_COPY_EXEC);
map[VmFlags::VM_SHARED.bits()] = EntryFlags::from_data(MMArch::PAGE_NONE);
map[VmFlags::VM_SHARED.bits() | VmFlags::VM_READ.bits()] =
EntryFlags::from_data(MMArch::PAGE_READONLY);
map[VmFlags::VM_SHARED.bits() | VmFlags::VM_WRITE.bits()] =
EntryFlags::from_data(MMArch::PAGE_SHARED);
map[VmFlags::VM_SHARED.bits() | VmFlags::VM_WRITE.bits() | VmFlags::VM_READ.bits()] =
EntryFlags::from_data(MMArch::PAGE_SHARED);
map[VmFlags::VM_SHARED.bits() | VmFlags::VM_EXEC.bits()] =
EntryFlags::from_data(MMArch::PAGE_READONLY_EXEC);
map[VmFlags::VM_SHARED.bits() | VmFlags::VM_EXEC.bits() | VmFlags::VM_READ.bits()] =
EntryFlags::from_data(MMArch::PAGE_READONLY_EXEC);
map[VmFlags::VM_SHARED.bits() | VmFlags::VM_EXEC.bits() | VmFlags::VM_WRITE.bits()] =
EntryFlags::from_data(MMArch::PAGE_SHARED_EXEC);
map[VmFlags::VM_SHARED.bits()
| VmFlags::VM_EXEC.bits()
| VmFlags::VM_WRITE.bits()
| VmFlags::VM_READ.bits()] = EntryFlags::from_data(MMArch::PAGE_SHARED_EXEC);
}
// if X86_64MMArch::is_xd_reserved() {
// map.iter_mut().for_each(|x| *x &= !Self::ENTRY_FLAG_NO_EXEC)
// }
map
}

impl X86_64MMArch {
Expand Down Expand Up @@ -650,17 +737,17 @@ impl FrameAllocator for LockedFrameAllocator {
}

/// 获取内核地址默认的页面标志
pub unsafe fn kernel_page_flags<A: MemoryManagementArch>(virt: VirtAddr) -> PageFlags<A> {
pub unsafe fn kernel_page_flags<A: MemoryManagementArch>(virt: VirtAddr) -> EntryFlags<A> {
let info: X86_64MMBootstrapInfo = BOOTSTRAP_MM_INFO.unwrap();

if virt.data() >= info.kernel_code_start && virt.data() < info.kernel_code_end {
// Remap kernel code execute
return PageFlags::new().set_execute(true).set_write(true);
return EntryFlags::new().set_execute(true).set_write(true);
} else if virt.data() >= info.kernel_data_end && virt.data() < info.kernel_rodata_end {
// Remap kernel rodata read only
return PageFlags::new().set_execute(true);
return EntryFlags::new().set_execute(true);
} else {
return PageFlags::new().set_write(true).set_execute(true);
return EntryFlags::new().set_write(true).set_execute(true);
}
}

Expand Down
4 changes: 2 additions & 2 deletions kernel/src/arch/x86_64/mm/pkru.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@ const PKEY_MASK: usize = 1 << 32 | 1 << 33 | 1 << 34 | 1 << 35;
/// ## 返回值
/// - `u16`: vma的protection_key
pub fn vma_pkey(vma: Arc<LockedVMA>) -> u16 {
let guard = vma.lock();
((guard.vm_flags().bits() & PKEY_MASK as u64) >> VM_PKEY_SHIFT) as u16
let guard = vma.lock_irqsave();
((guard.vm_flags().bits() & PKEY_MASK) >> VM_PKEY_SHIFT) as u16
}

// TODO pkru实现参考:https://code.dragonos.org.cn/xref/linux-6.6.21/arch/x86/include/asm/pkru.h
Expand Down
4 changes: 2 additions & 2 deletions kernel/src/driver/net/dma.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ use crate::arch::mm::kernel_page_flags;
use crate::arch::MMArch;

use crate::mm::kernel_mapper::KernelMapper;
use crate::mm::page::{page_manager_lock_irqsave, PageFlags};
use crate::mm::page::{page_manager_lock_irqsave, EntryFlags};
use crate::mm::{
allocator::page_frame::{
allocate_page_frames, deallocate_page_frames, PageFrameCount, PhysPageFrame,
Expand All @@ -25,7 +25,7 @@ pub fn dma_alloc(pages: usize) -> (usize, NonNull<u8>) {
// 清空这块区域,防止出现脏数据
core::ptr::write_bytes(virt.data() as *mut u8, 0, count.data() * MMArch::PAGE_SIZE);

let dma_flags: PageFlags<MMArch> = PageFlags::mmio_flags();
let dma_flags: EntryFlags<MMArch> = EntryFlags::mmio_flags();

let mut kernel_mapper = KernelMapper::lock();
let kernel_mapper = kernel_mapper.as_mut().unwrap();
Expand Down
4 changes: 2 additions & 2 deletions kernel/src/driver/video/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ use crate::{
spinlock::SpinLock,
},
mm::{
allocator::page_frame::PageFrameCount, kernel_mapper::KernelMapper, page::PageFlags,
allocator::page_frame::PageFrameCount, kernel_mapper::KernelMapper, page::EntryFlags,
MemoryManagementArch,
},
time::timer::{Timer, TimerFunction},
Expand Down Expand Up @@ -95,7 +95,7 @@ impl VideoRefreshManager {
let count = PageFrameCount::new(
page_align_up(frame_buffer_info_guard.buf_size()) / MMArch::PAGE_SIZE,
);
let page_flags: PageFlags<MMArch> = PageFlags::new().set_execute(true).set_write(true);
let page_flags: EntryFlags<MMArch> = EntryFlags::new().set_execute(true).set_write(true);

let mut kernel_mapper = KernelMapper::lock();
let mut kernel_mapper = kernel_mapper.as_mut();
Expand Down
4 changes: 2 additions & 2 deletions kernel/src/driver/virtio/virtio_impl.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ use crate::arch::mm::kernel_page_flags;
use crate::arch::MMArch;

use crate::mm::kernel_mapper::KernelMapper;
use crate::mm::page::{page_manager_lock_irqsave, PageFlags};
use crate::mm::page::{page_manager_lock_irqsave, EntryFlags};
use crate::mm::{
allocator::page_frame::{
allocate_page_frames, deallocate_page_frames, PageFrameCount, PhysPageFrame,
Expand Down Expand Up @@ -32,7 +32,7 @@ unsafe impl Hal for HalImpl {
// 清空这块区域,防止出现脏数据
core::ptr::write_bytes(virt.data() as *mut u8, 0, count.data() * MMArch::PAGE_SIZE);

let dma_flags: PageFlags<MMArch> = PageFlags::mmio_flags();
let dma_flags: EntryFlags<MMArch> = EntryFlags::mmio_flags();

let mut kernel_mapper = KernelMapper::lock();
let kernel_mapper = kernel_mapper.as_mut().unwrap();
Expand Down
Loading

0 comments on commit baae979

Please sign in to comment.