mirror of
https://github.com/SerenityOS/serenity
synced 2026-05-13 02:16:39 +02:00
Kernel/x86: Set the WC PAT memory type for MemoryType::NonCacheable
This removes the old Region::set_write_combine function and replaces all usages of it with setting the MemoryType to NonCacheable instead.
This commit is contained in:
@@ -143,9 +143,6 @@ public:
|
||||
bool is_execute_disabled() const { TODO_AARCH64(); }
|
||||
void set_execute_disabled(bool) { }
|
||||
|
||||
bool is_pat() const { TODO_AARCH64(); }
|
||||
void set_pat(bool) { }
|
||||
|
||||
bool is_null() const { return m_raw == 0; }
|
||||
void clear() { m_raw = 0; }
|
||||
|
||||
|
||||
@@ -129,9 +129,6 @@ public:
|
||||
bool is_execute_disabled() const { TODO_RISCV64(); }
|
||||
void set_execute_disabled(bool b) { set_bit(PageTableEntryBits::Executable, !b); }
|
||||
|
||||
bool is_pat() const { TODO_RISCV64(); }
|
||||
void set_pat(bool) { }
|
||||
|
||||
bool is_null() const { return m_raw == 0; }
|
||||
void clear() { m_raw = 0; }
|
||||
|
||||
|
||||
@@ -117,7 +117,30 @@ public:
|
||||
bool is_write_through() const { return (raw() & WriteThrough) == WriteThrough; }
|
||||
void set_write_through(bool b) { set_bit(WriteThrough, b); }
|
||||
|
||||
void set_memory_type(MemoryType t) { set_bit(CacheDisabled, t != MemoryType::Normal); }
|
||||
void set_memory_type(MemoryType t)
|
||||
{
|
||||
// The PAT is indexed through the PWT (as bit 0), PCD (as bit 1), and PAT (as bit 2) bits.
|
||||
m_raw &= ~(WriteThrough | CacheDisabled | PAT);
|
||||
|
||||
// We use the default PAT entries combined with a custom entry for PAT=b100, which maps to WC.
|
||||
// The default entries are backwards-compatible with systems without PAT (which only use the PWT and PCD bits for their original purpose).
|
||||
|
||||
switch (t) {
|
||||
case MemoryType::Normal: // WB (write back) => PAT=0b000
|
||||
break;
|
||||
case MemoryType::NonCacheable: // WC (write combining) => PAT=0b100
|
||||
if (Processor::current().has_pat()) {
|
||||
m_raw |= PAT;
|
||||
break;
|
||||
}
|
||||
|
||||
// Fall back to MemoryType::IO if PAT is not supported.
|
||||
// TODO: Implement a MTRR fallback?
|
||||
[[fallthrough]];
|
||||
case MemoryType::IO: // UC- (uncacheable, can be overriden by WC in MTRRs) => PAT=0b010
|
||||
m_raw |= CacheDisabled;
|
||||
}
|
||||
}
|
||||
|
||||
bool is_global() const { return (raw() & Global) == Global; }
|
||||
void set_global(bool b) { set_bit(Global, b); }
|
||||
@@ -125,9 +148,6 @@ public:
|
||||
bool is_execute_disabled() const { return (raw() & NoExecute) == NoExecute; }
|
||||
void set_execute_disabled(bool b) { set_bit(NoExecute, b); }
|
||||
|
||||
bool is_pat() const { return (raw() & PAT) == PAT; }
|
||||
void set_pat(bool b) { set_bit(PAT, b); }
|
||||
|
||||
bool is_null() const { return m_raw == 0; }
|
||||
void clear() { m_raw = 0; }
|
||||
|
||||
|
||||
@@ -15,9 +15,8 @@ BootFramebufferConsole::BootFramebufferConsole(PhysicalAddress framebuffer_addr,
|
||||
{
|
||||
// NOTE: We're very early in the boot process, memory allocations shouldn't really fail
|
||||
auto framebuffer_end = Memory::page_round_up(framebuffer_addr.offset(height * pitch).get()).release_value();
|
||||
m_framebuffer = MM.allocate_mmio_kernel_region(framebuffer_addr.page_base(), framebuffer_end - framebuffer_addr.page_base().get(), "Boot Framebuffer"sv, Memory::Region::Access::ReadWrite).release_value();
|
||||
m_framebuffer = MM.allocate_mmio_kernel_region(framebuffer_addr.page_base(), framebuffer_end - framebuffer_addr.page_base().get(), "Boot Framebuffer"sv, Memory::Region::Access::ReadWrite, Memory::MemoryType::NonCacheable).release_value();
|
||||
|
||||
[[maybe_unused]] auto result = m_framebuffer->set_write_combine(true);
|
||||
m_framebuffer_data = m_framebuffer->vaddr().offset(framebuffer_addr.offset_in_page()).as_ptr();
|
||||
memset(m_framebuffer_data, 0, height * pitch);
|
||||
}
|
||||
|
||||
@@ -81,10 +81,10 @@ ErrorOr<void> DisplayConnector::allocate_framebuffer_resources(size_t rounded_si
|
||||
if (!m_framebuffer_at_arbitrary_physical_range) {
|
||||
VERIFY(m_framebuffer_address.value().page_base() == m_framebuffer_address.value());
|
||||
m_shared_framebuffer_vmobject = TRY(Memory::SharedFramebufferVMObject::try_create_for_physical_range(m_framebuffer_address.value(), rounded_size));
|
||||
m_framebuffer_region = TRY(MM.allocate_mmio_kernel_region(m_framebuffer_address.value().page_base(), rounded_size, "Framebuffer"sv, Memory::Region::Access::ReadWrite));
|
||||
m_framebuffer_region = TRY(MM.allocate_mmio_kernel_region(m_framebuffer_address.value().page_base(), rounded_size, "Framebuffer"sv, Memory::Region::Access::ReadWrite, m_enable_write_combine_optimization ? Memory::MemoryType::NonCacheable : Memory::MemoryType::IO));
|
||||
} else {
|
||||
m_shared_framebuffer_vmobject = TRY(Memory::SharedFramebufferVMObject::try_create_at_arbitrary_physical_range(rounded_size));
|
||||
m_framebuffer_region = TRY(MM.allocate_kernel_region_with_vmobject(m_shared_framebuffer_vmobject->real_writes_framebuffer_vmobject(), rounded_size, "Framebuffer"sv, Memory::Region::Access::ReadWrite));
|
||||
m_framebuffer_region = TRY(MM.allocate_kernel_region_with_vmobject(m_shared_framebuffer_vmobject->real_writes_framebuffer_vmobject(), rounded_size, "Framebuffer"sv, Memory::Region::Access::ReadWrite, m_enable_write_combine_optimization ? Memory::MemoryType::NonCacheable : Memory::MemoryType::IO));
|
||||
}
|
||||
|
||||
m_framebuffer_data = m_framebuffer_region->vaddr().as_ptr();
|
||||
@@ -134,9 +134,6 @@ ErrorOr<void> DisplayConnector::after_inserting()
|
||||
clean_symlink_to_device_identifier_directory.disarm();
|
||||
|
||||
GraphicsManagement::the().attach_new_display_connector({}, *this);
|
||||
if (m_enable_write_combine_optimization) {
|
||||
[[maybe_unused]] auto result = m_framebuffer_region->set_write_combine(true);
|
||||
}
|
||||
after_inserting_add_to_device_management();
|
||||
return {};
|
||||
}
|
||||
|
||||
@@ -249,8 +249,6 @@ bool Region::map_individual_page_impl(size_t page_index, PhysicalAddress paddr,
|
||||
pte->set_writable(is_writable);
|
||||
if (Processor::current().has_nx())
|
||||
pte->set_execute_disabled(!is_executable());
|
||||
if (Processor::current().has_pat())
|
||||
pte->set_pat(is_write_combine());
|
||||
pte->set_user_allowed(user_allowed);
|
||||
|
||||
return true;
|
||||
@@ -365,18 +363,6 @@ void Region::remap()
|
||||
TODO();
|
||||
}
|
||||
|
||||
ErrorOr<void> Region::set_write_combine(bool enable)
|
||||
{
|
||||
if (enable && !Processor::current().has_pat()) {
|
||||
dbgln("PAT is not supported, implement MTRR fallback if available");
|
||||
return Error::from_errno(ENOTSUP);
|
||||
}
|
||||
|
||||
m_write_combine = enable;
|
||||
remap();
|
||||
return {};
|
||||
}
|
||||
|
||||
void Region::clear_to_zero()
|
||||
{
|
||||
VERIFY(vmobject().is_anonymous());
|
||||
|
||||
@@ -100,9 +100,6 @@ public:
|
||||
[[nodiscard]] bool is_initially_loaded_executable_segment() const { return m_initially_loaded_executable_segment.was_set(); }
|
||||
void set_initially_loaded_executable_segment() { m_initially_loaded_executable_segment.set(); }
|
||||
|
||||
[[nodiscard]] bool is_write_combine() const { return m_write_combine; }
|
||||
ErrorOr<void> set_write_combine(bool);
|
||||
|
||||
[[nodiscard]] bool is_user() const { return !is_kernel(); }
|
||||
[[nodiscard]] bool is_kernel() const { return vaddr().get() < USER_RANGE_BASE || vaddr().get() >= g_boot_info.kernel_mapping_base; }
|
||||
|
||||
@@ -262,7 +259,6 @@ private:
|
||||
bool m_stack : 1 { false };
|
||||
bool m_mmap : 1 { false };
|
||||
bool m_syscall_region : 1 { false };
|
||||
bool m_write_combine : 1 { false };
|
||||
bool m_mmapped_from_readable : 1 { false };
|
||||
bool m_mmapped_from_writable : 1 { false };
|
||||
|
||||
|
||||
Reference in New Issue
Block a user