diff --git a/Documentation/Patterns.md b/Documentation/Patterns.md index bd82fedc98c..9dc84b4a699 100644 --- a/Documentation/Patterns.md +++ b/Documentation/Patterns.md @@ -53,7 +53,7 @@ ErrorOr AddressSpace::allocate_region(VirtualRange const& range, String if (!name.is_null()) region_name = TRY(KString::try_create(name)); auto vmobject = TRY(AnonymousVMObject::try_create_with_size(range.size(), strategy)); - auto region = TRY(Region::try_create_user_accessible(range, move(vmobject), 0, move(region_name), prot_to_region_access_flags(prot), Region::Cacheable::Yes, false)); + auto region = TRY(Region::try_create_user_accessible(range, move(vmobject), 0, move(region_name), prot_to_region_access_flags(prot), MemoryType::Normal, false)); TRY(region->map(page_directory())); return add_region(move(region)); } diff --git a/Kernel/Arch/aarch64/PageDirectory.h b/Kernel/Arch/aarch64/PageDirectory.h index 249db8fe3b1..7ee0c1ba659 100644 --- a/Kernel/Arch/aarch64/PageDirectory.h +++ b/Kernel/Arch/aarch64/PageDirectory.h @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -82,8 +83,7 @@ public: bool is_write_through() const { TODO_AARCH64(); } void set_write_through(bool) { } - bool is_cache_disabled() const { TODO_AARCH64(); } - void set_cache_disabled(bool) { } + void set_memory_type(MemoryType) { } bool is_global() const { TODO_AARCH64(); } void set_global(bool) { } @@ -135,8 +135,7 @@ public: bool is_write_through() const { TODO_AARCH64(); } void set_write_through(bool) { } - bool is_cache_disabled() const { TODO_AARCH64(); } - void set_cache_disabled(bool) { } + void set_memory_type(MemoryType) { } bool is_global() const { TODO_AARCH64(); } void set_global(bool) { } diff --git a/Kernel/Arch/riscv64/PageDirectory.h b/Kernel/Arch/riscv64/PageDirectory.h index 9970ce51bc8..cda47a0ff38 100644 --- a/Kernel/Arch/riscv64/PageDirectory.h +++ b/Kernel/Arch/riscv64/PageDirectory.h @@ -12,6 +12,7 @@ #include #include +#include #include #include @@ -120,8 +121,7 @@ public: bool is_writable() const { return (m_raw & to_underlying(PageTableEntryBits::Writeable)) != 0; } void set_writable(bool b) { set_bit(PageTableEntryBits::Writeable, b); } - bool is_cache_disabled() const { TODO_RISCV64(); } - void set_cache_disabled(bool) { } + void set_memory_type(MemoryType) { } bool is_global() const { TODO_RISCV64(); } void set_global(bool b) { set_bit(PageTableEntryBits::Global, b); } diff --git a/Kernel/Arch/x86_64/PageDirectory.h b/Kernel/Arch/x86_64/PageDirectory.h index 992108d7330..8192bc75a73 100644 --- a/Kernel/Arch/x86_64/PageDirectory.h +++ b/Kernel/Arch/x86_64/PageDirectory.h @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -61,8 +62,7 @@ public: bool is_write_through() const { return (raw() & WriteThrough) == WriteThrough; } void set_write_through(bool b) { set_bit(WriteThrough, b); } - bool is_cache_disabled() const { return (raw() & CacheDisabled) == CacheDisabled; } - void set_cache_disabled(bool b) { set_bit(CacheDisabled, b); } + void set_memory_type(MemoryType t) { set_bit(CacheDisabled, t != MemoryType::Normal); } bool is_global() const { return (raw() & Global) == Global; } void set_global(bool b) { set_bit(Global, b); } @@ -117,8 +117,7 @@ public: bool is_write_through() const { return (raw() & WriteThrough) == WriteThrough; } void set_write_through(bool b) { set_bit(WriteThrough, b); } - bool is_cache_disabled() const { return (raw() & CacheDisabled) == CacheDisabled; } - void set_cache_disabled(bool b) { set_bit(CacheDisabled, b); } + void set_memory_type(MemoryType t) { set_bit(CacheDisabled, t != MemoryType::Normal); } bool is_global() const { return (raw() & Global) == Global; } void set_global(bool b) { set_bit(Global, b); } diff --git a/Kernel/Devices/GPU/Console/ContiguousFramebufferConsole.cpp b/Kernel/Devices/GPU/Console/ContiguousFramebufferConsole.cpp index ff1762c482d..1b0e77d54ea 100644 --- a/Kernel/Devices/GPU/Console/ContiguousFramebufferConsole.cpp +++ b/Kernel/Devices/GPU/Console/ContiguousFramebufferConsole.cpp @@ -29,7 +29,7 @@ void ContiguousFramebufferConsole::set_resolution(size_t width, size_t height, s size_t size = Memory::page_round_up(pitch * height).release_value_but_fixme_should_propagate_errors(); dbgln("Framebuffer Console: taking {} bytes", size); - auto region_or_error = MM.allocate_mmio_kernel_region(m_framebuffer_address, size, "Framebuffer Console"sv, Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No); + auto region_or_error = MM.allocate_mmio_kernel_region(m_framebuffer_address, size, "Framebuffer Console"sv, Memory::Region::Access::ReadWrite, Memory::MemoryType::NonCacheable); VERIFY(!region_or_error.is_error()); m_framebuffer_region = region_or_error.release_value(); diff --git a/Kernel/Devices/Storage/AHCI/Port.cpp b/Kernel/Devices/Storage/AHCI/Port.cpp index 36dd9d752a7..36ab4fbd3d2 100644 --- a/Kernel/Devices/Storage/AHCI/Port.cpp +++ b/Kernel/Devices/Storage/AHCI/Port.cpp @@ -507,7 +507,7 @@ bool AHCIPort::access_device(AsyncBlockDeviceRequest::RequestType direction, u64 dbgln_if(AHCI_DEBUG, "AHCI Port {}: CLE: ctba={:#08x}, ctbau={:#08x}, prdbc={:#08x}, prdtl={:#04x}, attributes={:#04x}", representative_port_index(), (u32)command_list_entries[unused_command_header.value()].ctba, (u32)command_list_entries[unused_command_header.value()].ctbau, (u32)command_list_entries[unused_command_header.value()].prdbc, (u16)command_list_entries[unused_command_header.value()].prdtl, (u16)command_list_entries[unused_command_header.value()].attributes); - auto command_table_region = MM.allocate_kernel_region_with_physical_pages({ &m_command_table_pages[unused_command_header.value()], 1 }, "AHCI Command Table"sv, Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No).release_value(); + auto command_table_region = MM.allocate_kernel_region_with_physical_pages({ &m_command_table_pages[unused_command_header.value()], 1 }, "AHCI Command Table"sv, Memory::Region::Access::ReadWrite, Memory::MemoryType::IO).release_value(); auto& command_table = *(volatile AHCI::CommandTable*)command_table_region->vaddr().as_ptr(); dbgln_if(AHCI_DEBUG, "AHCI Port {}: Allocated command table at {}", representative_port_index(), command_table_region->vaddr()); @@ -591,7 +591,7 @@ bool AHCIPort::identify_device() // QEMU doesn't care if we don't set the correct CFL field in this register, real hardware will set an handshake error bit in PxSERR register. command_list_entries[unused_command_header.value()].attributes = (size_t)FIS::DwordCount::RegisterHostToDevice | AHCI::CommandHeaderAttributes::P; - auto command_table_region = MM.allocate_kernel_region_with_physical_pages({ &m_command_table_pages[unused_command_header.value()], 1 }, "AHCI Command Table"sv, Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No).release_value(); + auto command_table_region = MM.allocate_kernel_region_with_physical_pages({ &m_command_table_pages[unused_command_header.value()], 1 }, "AHCI Command Table"sv, Memory::Region::Access::ReadWrite, Memory::MemoryType::IO).release_value(); auto& command_table = *(volatile AHCI::CommandTable*)command_table_region->vaddr().as_ptr(); memset(const_cast(command_table.command_fis), 0, 64); command_table.descriptors[0].base_high = 0; diff --git a/Kernel/FileSystem/ProcFS/ProcessExposed.cpp b/Kernel/FileSystem/ProcFS/ProcessExposed.cpp index 89ed9e014ce..10c5512b74e 100644 --- a/Kernel/FileSystem/ProcFS/ProcessExposed.cpp +++ b/Kernel/FileSystem/ProcFS/ProcessExposed.cpp @@ -311,7 +311,7 @@ ErrorOr Process::procfs_get_virtual_memory_stats(KBufferBuilder& builder) if (region.vmobject().is_anonymous()) { TRY(region_object.add("volatile"sv, static_cast(region.vmobject()).is_volatile())); } - TRY(region_object.add("cacheable"sv, region.is_cacheable())); + TRY(region_object.add("memory_type"sv, Memory::memory_type_to_string(region.memory_type()))); TRY(region_object.add("address"sv, region.vaddr().get())); TRY(region_object.add("size"sv, region.size())); TRY(region_object.add("amount_resident"sv, region.amount_resident())); diff --git a/Kernel/Memory/AddressSpace.cpp b/Kernel/Memory/AddressSpace.cpp index e96406e8b19..ff5a33dfad6 100644 --- a/Kernel/Memory/AddressSpace.cpp +++ b/Kernel/Memory/AddressSpace.cpp @@ -144,7 +144,7 @@ ErrorOr AddressSpace::try_allocate_split_region(Region const& source_re region_name = TRY(KString::try_create(source_region.name())); auto new_region = TRY(Region::create_unplaced( - source_region.vmobject(), offset_in_vmobject, move(region_name), source_region.access(), source_region.is_cacheable() ? Region::Cacheable::Yes : Region::Cacheable::No, source_region.is_shared())); + source_region.vmobject(), offset_in_vmobject, move(region_name), source_region.access(), source_region.memory_type(), source_region.is_shared())); new_region->set_syscall_region(source_region.is_syscall_region()); new_region->set_mmap(source_region.is_mmap(), source_region.mmapped_from_readable(), source_region.mmapped_from_writable()); new_region->set_stack(source_region.is_stack()); @@ -201,7 +201,7 @@ ErrorOr AddressSpace::allocate_region_with_vmobject(RandomizeVirtualAdd if (!name.is_null()) region_name = TRY(KString::try_create(name)); - auto region = TRY(Region::create_unplaced(move(vmobject), offset_in_vmobject, move(region_name), prot_to_region_access_flags(prot), Region::Cacheable::Yes, shared)); + auto region = TRY(Region::create_unplaced(move(vmobject), offset_in_vmobject, move(region_name), prot_to_region_access_flags(prot), MemoryType::Normal, shared)); if (requested_address.is_null()) TRY(m_region_tree.place_anywhere(*region, randomize_virtual_address, size, alignment)); diff --git a/Kernel/Memory/MemoryManager.cpp b/Kernel/Memory/MemoryManager.cpp index 72df5c45c38..85368d80206 100644 --- a/Kernel/Memory/MemoryManager.cpp +++ b/Kernel/Memory/MemoryManager.cpp @@ -1058,14 +1058,14 @@ PageFaultResponse MemoryManager::handle_page_fault(PageFault const& fault) return response; } -ErrorOr> MemoryManager::allocate_contiguous_kernel_region(size_t size, StringView name, Region::Access access, Region::Cacheable cacheable) +ErrorOr> MemoryManager::allocate_contiguous_kernel_region(size_t size, StringView name, Region::Access access, MemoryType memory_type) { VERIFY(!(size % PAGE_SIZE)); OwnPtr name_kstring; if (!name.is_null()) name_kstring = TRY(KString::try_create(name)); auto vmobject = TRY(AnonymousVMObject::try_create_physically_contiguous_with_size(size)); - auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, cacheable)); + auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, memory_type)); TRY(m_global_data.with([&](auto& global_data) { return global_data.region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size); })); TRY(region->map(kernel_page_directory())); return region; @@ -1076,7 +1076,7 @@ ErrorOr> MemoryManager::allocate_dma_buffer_page(S auto page = TRY(allocate_physical_page()); dma_buffer_page = page; // Do not enable Cache for this region as physical memory transfers are performed (Most architectures have this behavior by default) - return allocate_kernel_region_with_physical_pages({ &page, 1 }, name, access, Region::Cacheable::No); + return allocate_kernel_region_with_physical_pages({ &page, 1 }, name, access, MemoryType::NonCacheable); } ErrorOr> MemoryManager::allocate_dma_buffer_page(StringView name, Memory::Region::Access access) @@ -1091,7 +1091,7 @@ ErrorOr> MemoryManager::allocate_dma_buffer_pages( VERIFY(!(size % PAGE_SIZE)); dma_buffer_pages = TRY(allocate_contiguous_physical_pages(size)); // Do not enable Cache for this region as physical memory transfers are performed (Most architectures have this behavior by default) - return allocate_kernel_region_with_physical_pages(dma_buffer_pages, name, access, Region::Cacheable::No); + return allocate_kernel_region_with_physical_pages(dma_buffer_pages, name, access, MemoryType::NonCacheable); } ErrorOr> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access) @@ -1102,45 +1102,45 @@ ErrorOr> MemoryManager::allocate_dma_buffer_pages( return allocate_dma_buffer_pages(size, name, access, dma_buffer_pages); } -ErrorOr> MemoryManager::allocate_kernel_region(size_t size, StringView name, Region::Access access, AllocationStrategy strategy, Region::Cacheable cacheable) +ErrorOr> MemoryManager::allocate_kernel_region(size_t size, StringView name, Region::Access access, AllocationStrategy strategy, MemoryType memory_type) { VERIFY(!(size % PAGE_SIZE)); OwnPtr name_kstring; if (!name.is_null()) name_kstring = TRY(KString::try_create(name)); auto vmobject = TRY(AnonymousVMObject::try_create_with_size(size, strategy)); - auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, cacheable)); + auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, memory_type)); TRY(m_global_data.with([&](auto& global_data) { return global_data.region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size); })); TRY(region->map(kernel_page_directory())); return region; } -ErrorOr> MemoryManager::allocate_kernel_region_with_physical_pages(Span> pages, StringView name, Region::Access access, Region::Cacheable cacheable) +ErrorOr> MemoryManager::allocate_kernel_region_with_physical_pages(Span> pages, StringView name, Region::Access access, MemoryType memory_type) { auto vmobject = TRY(AnonymousVMObject::try_create_with_physical_pages(pages)); OwnPtr name_kstring; if (!name.is_null()) name_kstring = TRY(KString::try_create(name)); - auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, cacheable)); + auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, memory_type)); TRY(m_global_data.with([&](auto& global_data) { return global_data.region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, pages.size() * PAGE_SIZE, PAGE_SIZE); })); TRY(region->map(kernel_page_directory())); return region; } -ErrorOr> MemoryManager::allocate_mmio_kernel_region(PhysicalAddress paddr, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable) +ErrorOr> MemoryManager::allocate_mmio_kernel_region(PhysicalAddress paddr, size_t size, StringView name, Region::Access access, MemoryType memory_type) { VERIFY(!(size % PAGE_SIZE)); auto vmobject = TRY(MMIOVMObject::try_create_for_physical_range(paddr, size)); OwnPtr name_kstring; if (!name.is_null()) name_kstring = TRY(KString::try_create(name)); - auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, cacheable)); + auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, memory_type)); TRY(m_global_data.with([&](auto& global_data) { return global_data.region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size, PAGE_SIZE); })); TRY(region->map(kernel_page_directory(), paddr)); return region; } -ErrorOr> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable) +ErrorOr> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, StringView name, Region::Access access, MemoryType memory_type) { VERIFY(!(size % PAGE_SIZE)); @@ -1148,7 +1148,7 @@ ErrorOr> MemoryManager::allocate_kernel_region_with_vmobje if (!name.is_null()) name_kstring = TRY(KString::try_create(name)); - auto region = TRY(Region::create_unplaced(vmobject, 0, move(name_kstring), access, cacheable)); + auto region = TRY(Region::create_unplaced(vmobject, 0, move(name_kstring), access, memory_type)); TRY(m_global_data.with([&](auto& global_data) { return global_data.region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size); })); TRY(region->map(kernel_page_directory())); return region; diff --git a/Kernel/Memory/MemoryManager.h b/Kernel/Memory/MemoryManager.h index deba6dfd91c..150c805bcd3 100644 --- a/Kernel/Memory/MemoryManager.h +++ b/Kernel/Memory/MemoryManager.h @@ -168,15 +168,15 @@ public: ErrorOr>> allocate_contiguous_physical_pages(size_t size); void deallocate_physical_page(PhysicalAddress); - ErrorOr> allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); + ErrorOr> allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, MemoryType = MemoryType::Normal); ErrorOr> allocate_dma_buffer_page(StringView name, Region::Access access, RefPtr& dma_buffer_page); ErrorOr> allocate_dma_buffer_page(StringView name, Region::Access access); ErrorOr> allocate_dma_buffer_pages(size_t size, StringView name, Region::Access access, Vector>& dma_buffer_pages); ErrorOr> allocate_dma_buffer_pages(size_t size, StringView name, Region::Access access); - ErrorOr> allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes); - ErrorOr> allocate_mmio_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::No); - ErrorOr> allocate_kernel_region_with_physical_pages(Span>, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); - ErrorOr> allocate_kernel_region_with_vmobject(VMObject&, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); + ErrorOr> allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, MemoryType = MemoryType::Normal); + ErrorOr> allocate_mmio_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, MemoryType = MemoryType::IO); + ErrorOr> allocate_kernel_region_with_physical_pages(Span>, StringView name, Region::Access access, MemoryType = MemoryType::Normal); + ErrorOr> allocate_kernel_region_with_vmobject(VMObject&, size_t, StringView name, Region::Access access, MemoryType = MemoryType::Normal); ErrorOr> allocate_unbacked_region_anywhere(size_t size, size_t alignment); ErrorOr> create_identity_mapped_region(PhysicalAddress, size_t); diff --git a/Kernel/Memory/MemoryType.h b/Kernel/Memory/MemoryType.h new file mode 100644 index 00000000000..c975be3a02a --- /dev/null +++ b/Kernel/Memory/MemoryType.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2024, Sönke Holz + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#pragma once + +#include + +namespace Kernel::Memory { + +// This enum is used to control how memory accesses to mapped regions are handled by the hardware. +// NOTE: Memory types may be ignored if the architecture/platform does not support specifying memory types in page tables. +enum class MemoryType : u8 { + // Used for normal main memory mappings + // - Speculative accesses are allowed + // - Accesses can be cached + // - Accesses can be reordered + // - Accesses can be merged + Normal, + + // Used for framebuffers, DMA buffers etc. + // - Speculative accesses are allowed + // - Accesses are *not* cached + // - Accesses can be reordered + // - Accesses can be merged + NonCacheable, + + // Used for MMIO (with side effects on accesses) + // - Speculative accesses are *not* allowed + // - Accesses are *not* cached + // - Accesses are *not* reordered + // - Accesses are *not* merged + IO, +}; + +constexpr StringView memory_type_to_string(MemoryType memory_type) +{ + switch (memory_type) { + case MemoryType::Normal: + return "Normal"sv; + case MemoryType::NonCacheable: + return "NonCacheable"sv; + case MemoryType::IO: + return "IO"sv; + } + + VERIFY_NOT_REACHED(); +} + +} diff --git a/Kernel/Memory/Region.cpp b/Kernel/Memory/Region.cpp index b6d98f160a1..5374dcd3508 100644 --- a/Kernel/Memory/Region.cpp +++ b/Kernel/Memory/Region.cpp @@ -27,26 +27,26 @@ Region::Region() { } -Region::Region(NonnullLockRefPtr vmobject, size_t offset_in_vmobject, OwnPtr name, Region::Access access, Cacheable cacheable, bool shared) +Region::Region(NonnullLockRefPtr vmobject, size_t offset_in_vmobject, OwnPtr name, Region::Access access, MemoryType memory_type, bool shared) : m_range(VirtualRange({}, 0)) , m_offset_in_vmobject(offset_in_vmobject) , m_vmobject(move(vmobject)) , m_name(move(name)) , m_access(access | ((access & 0x7) << 4)) , m_shared(shared) - , m_cacheable(cacheable == Cacheable::Yes) + , m_memory_type(memory_type) { m_vmobject->add_region(*this); } -Region::Region(VirtualRange const& range, NonnullLockRefPtr vmobject, size_t offset_in_vmobject, OwnPtr name, Region::Access access, Cacheable cacheable, bool shared) +Region::Region(VirtualRange const& range, NonnullLockRefPtr vmobject, size_t offset_in_vmobject, OwnPtr name, Region::Access access, MemoryType memory_type, bool shared) : m_range(range) , m_offset_in_vmobject(offset_in_vmobject) , m_vmobject(move(vmobject)) , m_name(move(name)) , m_access(access | ((access & 0x7) << 4)) , m_shared(shared) - , m_cacheable(cacheable == Cacheable::Yes) + , m_memory_type(memory_type) { VERIFY(m_range.base().is_page_aligned()); VERIFY(m_range.size()); @@ -91,9 +91,9 @@ ErrorOr> Region::create_unbacked() return adopt_nonnull_own_or_enomem(new (nothrow) Region); } -ErrorOr> Region::create_unplaced(NonnullLockRefPtr vmobject, size_t offset_in_vmobject, OwnPtr name, Region::Access access, Cacheable cacheable, bool shared) +ErrorOr> Region::create_unplaced(NonnullLockRefPtr vmobject, size_t offset_in_vmobject, OwnPtr name, Region::Access access, MemoryType memory_type, bool shared) { - return adopt_nonnull_own_or_enomem(new (nothrow) Region(move(vmobject), offset_in_vmobject, move(name), access, cacheable, shared)); + return adopt_nonnull_own_or_enomem(new (nothrow) Region(move(vmobject), offset_in_vmobject, move(name), access, memory_type, shared)); } ErrorOr> Region::try_clone() @@ -112,7 +112,7 @@ ErrorOr> Region::try_clone() region_name = TRY(m_name->try_clone()); auto region = TRY(Region::try_create_user_accessible( - m_range, vmobject(), m_offset_in_vmobject, move(region_name), access(), m_cacheable ? Cacheable::Yes : Cacheable::No, m_shared)); + m_range, vmobject(), m_offset_in_vmobject, move(region_name), access(), m_memory_type, m_shared)); region->set_mmap(m_mmap, m_mmapped_from_readable, m_mmapped_from_writable); region->set_shared(m_shared); region->set_syscall_region(is_syscall_region()); @@ -133,7 +133,7 @@ ErrorOr> Region::try_clone() clone_region_name = TRY(m_name->try_clone()); auto clone_region = TRY(Region::try_create_user_accessible( - m_range, move(vmobject_clone), m_offset_in_vmobject, move(clone_region_name), access(), m_cacheable ? Cacheable::Yes : Cacheable::No, m_shared)); + m_range, move(vmobject_clone), m_offset_in_vmobject, move(clone_region_name), access(), m_memory_type, m_shared)); if (m_stack) { VERIFY(vmobject().is_anonymous()); @@ -189,9 +189,9 @@ size_t Region::amount_shared() const return bytes; } -ErrorOr> Region::try_create_user_accessible(VirtualRange const& range, NonnullLockRefPtr vmobject, size_t offset_in_vmobject, OwnPtr name, Region::Access access, Cacheable cacheable, bool shared) +ErrorOr> Region::try_create_user_accessible(VirtualRange const& range, NonnullLockRefPtr vmobject, size_t offset_in_vmobject, OwnPtr name, Region::Access access, MemoryType memory_type, bool shared) { - return adopt_nonnull_own_or_enomem(new (nothrow) Region(range, move(vmobject), offset_in_vmobject, move(name), access, cacheable, shared)); + return adopt_nonnull_own_or_enomem(new (nothrow) Region(range, move(vmobject), offset_in_vmobject, move(name), access, memory_type, shared)); } bool Region::should_cow(size_t page_index) const @@ -243,7 +243,7 @@ bool Region::map_individual_page_impl(size_t page_index, PhysicalAddress paddr, bool is_writable = writable && !(should_cow(page_index) || should_dirty_on_write(page_index)); - pte->set_cache_disabled(!m_cacheable); + pte->set_memory_type(m_memory_type); pte->set_physical_page_base(paddr.get()); pte->set_present(true); pte->set_writable(is_writable); diff --git a/Kernel/Memory/Region.h b/Kernel/Memory/Region.h index 82b08d0fe79..12f8079d0f0 100644 --- a/Kernel/Memory/Region.h +++ b/Kernel/Memory/Region.h @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -50,14 +51,9 @@ public: ReadWriteExecute = Read | Write | Execute, }; - enum class Cacheable { - No = 0, - Yes, - }; - - static ErrorOr> try_create_user_accessible(VirtualRange const&, NonnullLockRefPtr, size_t offset_in_vmobject, OwnPtr name, Region::Access access, Cacheable, bool shared); + static ErrorOr> try_create_user_accessible(VirtualRange const&, NonnullLockRefPtr, size_t offset_in_vmobject, OwnPtr name, Region::Access access, MemoryType, bool shared); static ErrorOr> create_unbacked(); - static ErrorOr> create_unplaced(NonnullLockRefPtr, size_t offset_in_vmobject, OwnPtr name, Region::Access access, Cacheable = Cacheable::Yes, bool shared = false); + static ErrorOr> create_unplaced(NonnullLockRefPtr, size_t offset_in_vmobject, OwnPtr name, Region::Access access, MemoryType = MemoryType::Normal, bool shared = false); ~Region(); @@ -72,7 +68,7 @@ public: [[nodiscard]] bool has_been_writable() const { return m_has_been_writable.was_set(); } [[nodiscard]] bool has_been_executable() const { return m_has_been_executable.was_set(); } - [[nodiscard]] bool is_cacheable() const { return m_cacheable; } + [[nodiscard]] MemoryType memory_type() const { return m_memory_type; } [[nodiscard]] StringView name() const { return m_name ? m_name->view() : StringView {}; } [[nodiscard]] OwnPtr take_name() { return move(m_name); } [[nodiscard]] Region::Access access() const { return static_cast(m_access); } @@ -232,8 +228,8 @@ public: private: Region(); - Region(NonnullLockRefPtr, size_t offset_in_vmobject, OwnPtr, Region::Access access, Cacheable, bool shared); - Region(VirtualRange const&, NonnullLockRefPtr, size_t offset_in_vmobject, OwnPtr, Region::Access access, Cacheable, bool shared); + Region(NonnullLockRefPtr, size_t offset_in_vmobject, OwnPtr, Region::Access access, MemoryType, bool shared); + Region(VirtualRange const&, NonnullLockRefPtr, size_t offset_in_vmobject, OwnPtr, Region::Access access, MemoryType, bool shared); [[nodiscard]] bool remap_vmobject_page(size_t page_index, NonnullRefPtr); @@ -263,7 +259,6 @@ private: Atomic m_in_progress_page_faults; u8 m_access { Region::None }; bool m_shared : 1 { false }; - bool m_cacheable : 1 { false }; bool m_stack : 1 { false }; bool m_mmap : 1 { false }; bool m_syscall_region : 1 { false }; @@ -271,6 +266,8 @@ private: bool m_mmapped_from_readable : 1 { false }; bool m_mmapped_from_writable : 1 { false }; + MemoryType m_memory_type; + SetOnce m_immutable; SetOnce m_initially_loaded_executable_segment; SetOnce m_has_been_readable; diff --git a/Kernel/Memory/ScatterGatherList.cpp b/Kernel/Memory/ScatterGatherList.cpp index f5521d5369a..fa8275a8d3a 100644 --- a/Kernel/Memory/ScatterGatherList.cpp +++ b/Kernel/Memory/ScatterGatherList.cpp @@ -12,7 +12,7 @@ ErrorOr> ScatterGatherList::try_create(AsyncBlockD { auto vm_object = TRY(AnonymousVMObject::try_create_with_physical_pages(allocated_pages)); auto size = TRY(page_round_up((request.block_count() * device_block_size))); - auto region = TRY(MM.allocate_kernel_region_with_vmobject(vm_object, size, region_name, Region::Access::Read | Region::Access::Write, Region::Cacheable::Yes)); + auto region = TRY(MM.allocate_kernel_region_with_vmobject(vm_object, size, region_name, Region::Access::Read | Region::Access::Write, MemoryType::Normal)); return adopt_lock_ref_if_nonnull(new (nothrow) ScatterGatherList(vm_object, move(region))); }