mirror of
https://github.com/SerenityOS/serenity
synced 2026-05-11 09:26:28 +02:00
This essentially reverts 5ada38f9c3.
Previously, two threads could end up trying to allocate a committed
page at once, possibly resulting in a panic because we tried to
allocate more pages than committed.
Another problem was that a thread could incorrectly think that the page
fault was already handled. This can happen if the thread handling the
page fault already set the physical page slot to the newly allocated
page, but didn't remap the page yet. We check if a page fault was
already processed based on the physical page slot contents.
This issue is not causing problems currently, since thinking a page
fault was already handled and incorrectly returning will still work
eventually when the other thread is done remapping the page.
However, a future commit will add extra assertions checking that page
faults were already handled appropriately if we couldn't find a reason
for the fault. These assertions would trip on this.
Prevent these issues by taking the lock for a longer amount of time.
There might be a better solution to this, but that would likely require
more complex code changes.
Also modify the code in handle_fault() a bit to avoid using should_cow()
for zero faults. The checks in should_cow() can refer to a different
physical page if the page fault was handled immediately after the check.
109 lines
3.0 KiB
C++
109 lines
3.0 KiB
C++
/*
|
|
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
|
|
*
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
|
*/
|
|
|
|
#pragma once
|
|
|
|
#include <AK/FixedArray.h>
|
|
#include <AK/IntrusiveList.h>
|
|
#include <AK/RefPtr.h>
|
|
#include <Kernel/Forward.h>
|
|
#include <Kernel/Library/ListedRefCounted.h>
|
|
#include <Kernel/Library/LockWeakable.h>
|
|
#include <Kernel/Locking/Mutex.h>
|
|
#include <Kernel/Memory/Region.h>
|
|
|
|
namespace Kernel::Memory {
|
|
|
|
class VMObject
|
|
: public ListedRefCounted<VMObject, LockType::Spinlock>
|
|
, public LockWeakable<VMObject> {
|
|
friend class MemoryManager;
|
|
friend class Region;
|
|
|
|
public:
|
|
virtual ~VMObject();
|
|
|
|
virtual ErrorOr<NonnullLockRefPtr<VMObject>> try_clone() = 0;
|
|
|
|
virtual bool is_anonymous() const { return false; }
|
|
virtual bool is_inode() const { return false; }
|
|
virtual bool is_shared_inode() const { return false; }
|
|
virtual bool is_private_inode() const { return false; }
|
|
virtual bool is_mmio() const { return false; }
|
|
|
|
size_t page_count() const { return m_physical_pages.size(); }
|
|
|
|
virtual ReadonlySpan<RefPtr<PhysicalRAMPage>> physical_pages() const { return m_physical_pages.span(); }
|
|
virtual Span<RefPtr<PhysicalRAMPage>> physical_pages() { return m_physical_pages.span(); }
|
|
|
|
size_t size() const { return m_physical_pages.size() * PAGE_SIZE; }
|
|
|
|
virtual StringView class_name() const = 0;
|
|
|
|
ALWAYS_INLINE void add_region(Region& region)
|
|
{
|
|
SpinlockLocker locker(m_lock);
|
|
m_regions.append(region);
|
|
}
|
|
|
|
ALWAYS_INLINE void remove_region(Region& region)
|
|
{
|
|
SpinlockLocker locker(m_lock);
|
|
m_regions.remove(region);
|
|
}
|
|
|
|
protected:
|
|
static ErrorOr<FixedArray<RefPtr<PhysicalRAMPage>>> try_create_physical_pages(size_t);
|
|
ErrorOr<FixedArray<RefPtr<PhysicalRAMPage>>> try_clone_physical_pages() const;
|
|
explicit VMObject(FixedArray<RefPtr<PhysicalRAMPage>>&&);
|
|
|
|
template<typename Callback>
|
|
void for_each_region(Callback);
|
|
|
|
template<typename Callback>
|
|
void for_each_region_locked(Callback);
|
|
|
|
void remap_regions_locked();
|
|
void remap_regions();
|
|
bool remap_regions_one_page(size_t page_index, NonnullRefPtr<PhysicalRAMPage> page);
|
|
|
|
IntrusiveListNode<VMObject> m_list_node;
|
|
FixedArray<RefPtr<PhysicalRAMPage>> m_physical_pages;
|
|
|
|
mutable Spinlock<LockRank::None> m_lock {};
|
|
|
|
private:
|
|
VMObject& operator=(VMObject const&) = delete;
|
|
VMObject& operator=(VMObject&&) = delete;
|
|
VMObject(VMObject&&) = delete;
|
|
|
|
Region::ListInVMObject m_regions;
|
|
|
|
public:
|
|
using AllInstancesList = IntrusiveList<&VMObject::m_list_node>;
|
|
static RecursiveSpinlockProtected<VMObject::AllInstancesList, LockRank::None>& all_instances();
|
|
};
|
|
|
|
template<typename Callback>
|
|
inline void VMObject::for_each_region(Callback callback)
|
|
{
|
|
SpinlockLocker lock(m_lock);
|
|
for (auto& region : m_regions) {
|
|
callback(region);
|
|
}
|
|
}
|
|
|
|
template<typename Callback>
|
|
inline void VMObject::for_each_region_locked(Callback callback)
|
|
{
|
|
VERIFY(m_lock.is_locked());
|
|
for (auto& region : m_regions) {
|
|
callback(region);
|
|
}
|
|
}
|
|
|
|
}
|