summaryrefslogtreecommitdiff
path: root/Kernel/VM
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2021-08-06 10:45:34 +0200
committerAndreas Kling <kling@serenityos.org>2021-08-06 14:05:58 +0200
commita1d7ebf85adca1550b5d61c8b7ab7fe95217e0e2 (patch)
treedd2e9c08a07ca98694a040ff2c4bd4c86f2741f8 /Kernel/VM
parent4e8e1b7b3a2cd25ab4b69cebea32232496f4a5d3 (diff)
downloadserenity-a1d7ebf85adca1550b5d61c8b7ab7fe95217e0e2.zip
Kernel: Rename Kernel/VM/ to Kernel/Memory/
This directory isn't just about virtual memory, it's about all kinds of memory management.
Diffstat (limited to 'Kernel/VM')
-rw-r--r--Kernel/VM/AllocationStrategy.h17
-rw-r--r--Kernel/VM/AnonymousVMObject.cpp383
-rw-r--r--Kernel/VM/AnonymousVMObject.h90
-rw-r--r--Kernel/VM/InodeVMObject.cpp92
-rw-r--r--Kernel/VM/InodeVMObject.h44
-rw-r--r--Kernel/VM/MappedROM.h36
-rw-r--r--Kernel/VM/MemoryManager.cpp1147
-rw-r--r--Kernel/VM/MemoryManager.h325
-rw-r--r--Kernel/VM/PageDirectory.cpp168
-rw-r--r--Kernel/VM/PageDirectory.h70
-rw-r--r--Kernel/VM/PageFaultResponse.h17
-rw-r--r--Kernel/VM/PhysicalPage.cpp43
-rw-r--r--Kernel/VM/PhysicalPage.h71
-rw-r--r--Kernel/VM/PhysicalRegion.cpp141
-rw-r--r--Kernel/VM/PhysicalRegion.h54
-rw-r--r--Kernel/VM/PhysicalZone.cpp198
-rw-r--r--Kernel/VM/PhysicalZone.h95
-rw-r--r--Kernel/VM/PrivateInodeVMObject.cpp36
-rw-r--r--Kernel/VM/PrivateInodeVMObject.h35
-rw-r--r--Kernel/VM/ProcessPagingScope.cpp27
-rw-r--r--Kernel/VM/ProcessPagingScope.h23
-rw-r--r--Kernel/VM/Range.cpp56
-rw-r--r--Kernel/VM/Range.h68
-rw-r--r--Kernel/VM/RangeAllocator.cpp194
-rw-r--r--Kernel/VM/RangeAllocator.h48
-rw-r--r--Kernel/VM/Region.cpp458
-rw-r--r--Kernel/VM/Region.h247
-rw-r--r--Kernel/VM/RingBuffer.cpp66
-rw-r--r--Kernel/VM/RingBuffer.h40
-rw-r--r--Kernel/VM/ScatterGatherList.cpp25
-rw-r--r--Kernel/VM/ScatterGatherList.h32
-rw-r--r--Kernel/VM/SharedInodeVMObject.cpp39
-rw-r--r--Kernel/VM/SharedInodeVMObject.h33
-rw-r--r--Kernel/VM/Space.cpp439
-rw-r--r--Kernel/VM/Space.h87
-rw-r--r--Kernel/VM/TypedMapping.h48
-rw-r--r--Kernel/VM/VMObject.cpp37
-rw-r--r--Kernel/VM/VMObject.h122
38 files changed, 0 insertions, 5151 deletions
diff --git a/Kernel/VM/AllocationStrategy.h b/Kernel/VM/AllocationStrategy.h
deleted file mode 100644
index 28cff4c9c0..0000000000
--- a/Kernel/VM/AllocationStrategy.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Copyright (c) 2020, the SerenityOS developers.
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#pragma once
-
-namespace Kernel {
-
-enum class AllocationStrategy {
- Reserve = 0,
- AllocateNow,
- None
-};
-
-}
diff --git a/Kernel/VM/AnonymousVMObject.cpp b/Kernel/VM/AnonymousVMObject.cpp
deleted file mode 100644
index 8602567693..0000000000
--- a/Kernel/VM/AnonymousVMObject.cpp
+++ /dev/null
@@ -1,383 +0,0 @@
-/*
- * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#include <Kernel/Arch/x86/SmapDisabler.h>
-#include <Kernel/Debug.h>
-#include <Kernel/Process.h>
-#include <Kernel/VM/AnonymousVMObject.h>
-#include <Kernel/VM/MemoryManager.h>
-#include <Kernel/VM/PhysicalPage.h>
-
-namespace Kernel {
-
-RefPtr<VMObject> AnonymousVMObject::try_clone()
-{
- // We need to acquire our lock so we copy a sane state
- ScopedSpinLock lock(m_lock);
-
- if (is_purgeable() && is_volatile()) {
- // If this object is purgeable+volatile, create a new zero-filled purgeable+volatile
- // object, effectively "pre-purging" it in the child process.
- auto clone = try_create_purgeable_with_size(size(), AllocationStrategy::None);
- if (!clone)
- return {};
- clone->m_volatile = true;
- return clone;
- }
-
- // We're the parent. Since we're about to become COW we need to
- // commit the number of pages that we need to potentially allocate
- // so that the parent is still guaranteed to be able to have all
- // non-volatile memory available.
- size_t new_cow_pages_needed = page_count();
-
- dbgln_if(COMMIT_DEBUG, "Cloning {:p}, need {} committed cow pages", this, new_cow_pages_needed);
-
- auto committed_pages = MM.commit_user_physical_pages(new_cow_pages_needed);
- if (!committed_pages.has_value())
- return {};
-
- // Create or replace the committed cow pages. When cloning a previously
- // cloned vmobject, we want to essentially "fork", leaving us and the
- // new clone with one set of shared committed cow pages, and the original
- // one would keep the one it still has. This ensures that the original
- // one and this one, as well as the clone have sufficient resources
- // to cow all pages as needed
- auto new_shared_committed_cow_pages = try_create<SharedCommittedCowPages>(committed_pages.release_value());
-
- if (!new_shared_committed_cow_pages)
- return {};
-
- auto clone = adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(*this, *new_shared_committed_cow_pages));
- if (!clone)
- return {};
-
- m_shared_committed_cow_pages = move(new_shared_committed_cow_pages);
-
- // Both original and clone become COW. So create a COW map for ourselves
- // or reset all pages to be copied again if we were previously cloned
- ensure_or_reset_cow_map();
-
- if (m_unused_committed_pages.has_value() && !m_unused_committed_pages->is_empty()) {
- // The parent vmobject didn't use up all committed pages. When
- // cloning (fork) we will overcommit. For this purpose we drop all
- // lazy-commit references and replace them with shared zero pages.
- for (size_t i = 0; i < page_count(); i++) {
- auto& page = clone->m_physical_pages[i];
- if (page && page->is_lazy_committed_page()) {
- page = MM.shared_zero_page();
- }
- }
- }
-
- return clone;
-}
-
-RefPtr<AnonymousVMObject> AnonymousVMObject::try_create_with_size(size_t size, AllocationStrategy strategy)
-{
- Optional<CommittedPhysicalPageSet> committed_pages;
- if (strategy == AllocationStrategy::Reserve || strategy == AllocationStrategy::AllocateNow) {
- committed_pages = MM.commit_user_physical_pages(ceil_div(size, static_cast<size_t>(PAGE_SIZE)));
- if (!committed_pages.has_value())
- return {};
- }
- return adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(size, strategy, move(committed_pages)));
-}
-
-RefPtr<AnonymousVMObject> AnonymousVMObject::try_create_physically_contiguous_with_size(size_t size)
-{
- auto contiguous_physical_pages = MM.allocate_contiguous_supervisor_physical_pages(size);
- if (contiguous_physical_pages.is_empty())
- return {};
- return adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(contiguous_physical_pages.span()));
-}
-
-RefPtr<AnonymousVMObject> AnonymousVMObject::try_create_purgeable_with_size(size_t size, AllocationStrategy strategy)
-{
- Optional<CommittedPhysicalPageSet> committed_pages;
- if (strategy == AllocationStrategy::Reserve || strategy == AllocationStrategy::AllocateNow) {
- committed_pages = MM.commit_user_physical_pages(ceil_div(size, static_cast<size_t>(PAGE_SIZE)));
- if (!committed_pages.has_value())
- return {};
- }
- auto vmobject = adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(size, strategy, move(committed_pages)));
- if (!vmobject)
- return {};
- vmobject->m_purgeable = true;
- return vmobject;
-}
-
-RefPtr<AnonymousVMObject> AnonymousVMObject::try_create_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>> physical_pages)
-{
- return adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(physical_pages));
-}
-
-RefPtr<AnonymousVMObject> AnonymousVMObject::try_create_for_physical_range(PhysicalAddress paddr, size_t size)
-{
- if (paddr.offset(size) < paddr) {
- dbgln("Shenanigans! try_create_for_physical_range({}, {}) would wrap around", paddr, size);
- return nullptr;
- }
- return adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(paddr, size));
-}
-
-AnonymousVMObject::AnonymousVMObject(size_t size, AllocationStrategy strategy, Optional<CommittedPhysicalPageSet> committed_pages)
- : VMObject(size)
- , m_unused_committed_pages(move(committed_pages))
-{
- if (strategy == AllocationStrategy::AllocateNow) {
- // Allocate all pages right now. We know we can get all because we committed the amount needed
- for (size_t i = 0; i < page_count(); ++i)
- physical_pages()[i] = m_unused_committed_pages->take_one();
- } else {
- auto& initial_page = (strategy == AllocationStrategy::Reserve) ? MM.lazy_committed_page() : MM.shared_zero_page();
- for (size_t i = 0; i < page_count(); ++i)
- physical_pages()[i] = initial_page;
- }
-}
-
-AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, size_t size)
- : VMObject(size)
-{
- VERIFY(paddr.page_base() == paddr);
- for (size_t i = 0; i < page_count(); ++i)
- physical_pages()[i] = PhysicalPage::create(paddr.offset(i * PAGE_SIZE), MayReturnToFreeList::No);
-}
-
-AnonymousVMObject::AnonymousVMObject(Span<NonnullRefPtr<PhysicalPage>> physical_pages)
- : VMObject(physical_pages.size() * PAGE_SIZE)
-{
- for (size_t i = 0; i < physical_pages.size(); ++i) {
- m_physical_pages[i] = physical_pages[i];
- }
-}
-
-AnonymousVMObject::AnonymousVMObject(AnonymousVMObject const& other, NonnullRefPtr<SharedCommittedCowPages> shared_committed_cow_pages)
- : VMObject(other)
- , m_shared_committed_cow_pages(move(shared_committed_cow_pages))
- , m_purgeable(other.m_purgeable)
-{
- ensure_cow_map();
-}
-
-AnonymousVMObject::~AnonymousVMObject()
-{
-}
-
-size_t AnonymousVMObject::purge()
-{
- ScopedSpinLock lock(m_lock);
-
- if (!is_purgeable() || !is_volatile())
- return 0;
-
- size_t total_pages_purged = 0;
-
- for (auto& page : m_physical_pages) {
- VERIFY(page);
- if (page->is_shared_zero_page())
- continue;
- page = MM.shared_zero_page();
- ++total_pages_purged;
- }
-
- m_was_purged = true;
-
- for_each_region([](Region& region) {
- region.remap();
- });
-
- return total_pages_purged;
-}
-
-KResult AnonymousVMObject::set_volatile(bool is_volatile, bool& was_purged)
-{
- VERIFY(is_purgeable());
-
- ScopedSpinLock locker(m_lock);
-
- was_purged = m_was_purged;
- if (m_volatile == is_volatile)
- return KSuccess;
-
- if (is_volatile) {
- // When a VMObject is made volatile, it gives up all of its committed memory.
- // Any physical pages already allocated remain in the VMObject for now, but the kernel is free to take them at any moment.
- for (auto& page : m_physical_pages) {
- if (page && page->is_lazy_committed_page())
- page = MM.shared_zero_page();
- }
-
- m_unused_committed_pages = {};
- m_shared_committed_cow_pages = nullptr;
-
- if (!m_cow_map.is_null())
- m_cow_map = {};
-
- m_volatile = true;
- m_was_purged = false;
-
- for_each_region([&](auto& region) { region.remap(); });
- return KSuccess;
- }
- // When a VMObject is made non-volatile, we try to commit however many pages are not currently available.
- // If that fails, we return false to indicate that memory allocation failed.
- size_t committed_pages_needed = 0;
- for (auto& page : m_physical_pages) {
- VERIFY(page);
- if (page->is_shared_zero_page())
- ++committed_pages_needed;
- }
-
- if (!committed_pages_needed) {
- m_volatile = false;
- return KSuccess;
- }
-
- m_unused_committed_pages = MM.commit_user_physical_pages(committed_pages_needed);
- if (!m_unused_committed_pages.has_value())
- return ENOMEM;
-
- for (auto& page : m_physical_pages) {
- if (page->is_shared_zero_page())
- page = MM.lazy_committed_page();
- }
-
- m_volatile = false;
- m_was_purged = false;
- for_each_region([&](auto& region) { region.remap(); });
- return KSuccess;
-}
-
-NonnullRefPtr<PhysicalPage> AnonymousVMObject::allocate_committed_page(Badge<Region>)
-{
- return m_unused_committed_pages->take_one();
-}
-
-Bitmap& AnonymousVMObject::ensure_cow_map()
-{
- if (m_cow_map.is_null())
- m_cow_map = Bitmap { page_count(), true };
- return m_cow_map;
-}
-
-void AnonymousVMObject::ensure_or_reset_cow_map()
-{
- if (m_cow_map.is_null())
- ensure_cow_map();
- else
- m_cow_map.fill(true);
-}
-
-bool AnonymousVMObject::should_cow(size_t page_index, bool is_shared) const
-{
- auto& page = physical_pages()[page_index];
- if (page && (page->is_shared_zero_page() || page->is_lazy_committed_page()))
- return true;
- if (is_shared)
- return false;
- return !m_cow_map.is_null() && m_cow_map.get(page_index);
-}
-
-void AnonymousVMObject::set_should_cow(size_t page_index, bool cow)
-{
- ensure_cow_map().set(page_index, cow);
-}
-
-size_t AnonymousVMObject::cow_pages() const
-{
- if (m_cow_map.is_null())
- return 0;
- return m_cow_map.count_slow(true);
-}
-
-PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, VirtualAddress vaddr)
-{
- VERIFY_INTERRUPTS_DISABLED();
- ScopedSpinLock lock(m_lock);
-
- if (is_volatile()) {
- // A COW fault in a volatile region? Userspace is writing to volatile memory, this is a bug. Crash.
- dbgln("COW fault in volatile region, will crash.");
- return PageFaultResponse::ShouldCrash;
- }
-
- auto& page_slot = physical_pages()[page_index];
-
- // If we were sharing committed COW pages with another process, and the other process
- // has exhausted the supply, we can stop counting the shared pages.
- if (m_shared_committed_cow_pages && m_shared_committed_cow_pages->is_empty())
- m_shared_committed_cow_pages = nullptr;
-
- if (page_slot->ref_count() == 1) {
- dbgln_if(PAGE_FAULT_DEBUG, " >> It's a COW page but nobody is sharing it anymore. Remap r/w");
- set_should_cow(page_index, false);
-
- if (m_shared_committed_cow_pages) {
- m_shared_committed_cow_pages->uncommit_one();
- if (m_shared_committed_cow_pages->is_empty())
- m_shared_committed_cow_pages = nullptr;
- }
- return PageFaultResponse::Continue;
- }
-
- RefPtr<PhysicalPage> page;
- if (m_shared_committed_cow_pages) {
- dbgln_if(PAGE_FAULT_DEBUG, " >> It's a committed COW page and it's time to COW!");
- page = m_shared_committed_cow_pages->take_one();
- } else {
- dbgln_if(PAGE_FAULT_DEBUG, " >> It's a COW page and it's time to COW!");
- page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::No);
- if (page.is_null()) {
- dmesgln("MM: handle_cow_fault was unable to allocate a physical page");
- return PageFaultResponse::OutOfMemory;
- }
- }
-
- u8* dest_ptr = MM.quickmap_page(*page);
- dbgln_if(PAGE_FAULT_DEBUG, " >> COW {} <- {}", page->paddr(), page_slot->paddr());
- {
- SmapDisabler disabler;
- void* fault_at;
- if (!safe_memcpy(dest_ptr, vaddr.as_ptr(), PAGE_SIZE, fault_at)) {
- if ((u8*)fault_at >= dest_ptr && (u8*)fault_at <= dest_ptr + PAGE_SIZE)
- dbgln(" >> COW: error copying page {}/{} to {}/{}: failed to write to page at {}",
- page_slot->paddr(), vaddr, page->paddr(), VirtualAddress(dest_ptr), VirtualAddress(fault_at));
- else if ((u8*)fault_at >= vaddr.as_ptr() && (u8*)fault_at <= vaddr.as_ptr() + PAGE_SIZE)
- dbgln(" >> COW: error copying page {}/{} to {}/{}: failed to read from page at {}",
- page_slot->paddr(), vaddr, page->paddr(), VirtualAddress(dest_ptr), VirtualAddress(fault_at));
- else
- VERIFY_NOT_REACHED();
- }
- }
- page_slot = move(page);
- MM.unquickmap_page();
- set_should_cow(page_index, false);
- return PageFaultResponse::Continue;
-}
-
-AnonymousVMObject::SharedCommittedCowPages::SharedCommittedCowPages(CommittedPhysicalPageSet&& committed_pages)
- : m_committed_pages(move(committed_pages))
-{
-}
-
-AnonymousVMObject::SharedCommittedCowPages::~SharedCommittedCowPages()
-{
-}
-
-NonnullRefPtr<PhysicalPage> AnonymousVMObject::SharedCommittedCowPages::take_one()
-{
- ScopedSpinLock locker(m_lock);
- return m_committed_pages.take_one();
-}
-
-void AnonymousVMObject::SharedCommittedCowPages::uncommit_one()
-{
- ScopedSpinLock locker(m_lock);
- m_committed_pages.uncommit_one();
-}
-
-}
diff --git a/Kernel/VM/AnonymousVMObject.h b/Kernel/VM/AnonymousVMObject.h
deleted file mode 100644
index 0a6d38aadd..0000000000
--- a/Kernel/VM/AnonymousVMObject.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#pragma once
-
-#include <Kernel/PhysicalAddress.h>
-#include <Kernel/VM/AllocationStrategy.h>
-#include <Kernel/VM/MemoryManager.h>
-#include <Kernel/VM/PageFaultResponse.h>
-#include <Kernel/VM/VMObject.h>
-
-namespace Kernel {
-
-class AnonymousVMObject final : public VMObject {
-public:
- virtual ~AnonymousVMObject() override;
-
- static RefPtr<AnonymousVMObject> try_create_with_size(size_t, AllocationStrategy);
- static RefPtr<AnonymousVMObject> try_create_for_physical_range(PhysicalAddress paddr, size_t size);
- static RefPtr<AnonymousVMObject> try_create_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>>);
- static RefPtr<AnonymousVMObject> try_create_purgeable_with_size(size_t, AllocationStrategy);
- static RefPtr<AnonymousVMObject> try_create_physically_contiguous_with_size(size_t);
- virtual RefPtr<VMObject> try_clone() override;
-
- [[nodiscard]] NonnullRefPtr<PhysicalPage> allocate_committed_page(Badge<Region>);
- PageFaultResponse handle_cow_fault(size_t, VirtualAddress);
- size_t cow_pages() const;
- bool should_cow(size_t page_index, bool) const;
- void set_should_cow(size_t page_index, bool);
-
- bool is_purgeable() const { return m_purgeable; }
- bool is_volatile() const { return m_volatile; }
-
- KResult set_volatile(bool is_volatile, bool& was_purged);
-
- size_t purge();
-
-private:
- class SharedCommittedCowPages;
-
- explicit AnonymousVMObject(size_t, AllocationStrategy, Optional<CommittedPhysicalPageSet>);
- explicit AnonymousVMObject(PhysicalAddress, size_t);
- explicit AnonymousVMObject(Span<NonnullRefPtr<PhysicalPage>>);
- explicit AnonymousVMObject(AnonymousVMObject const&, NonnullRefPtr<SharedCommittedCowPages>);
-
- virtual StringView class_name() const override { return "AnonymousVMObject"sv; }
-
- AnonymousVMObject& operator=(AnonymousVMObject const&) = delete;
- AnonymousVMObject& operator=(AnonymousVMObject&&) = delete;
- AnonymousVMObject(AnonymousVMObject&&) = delete;
-
- virtual bool is_anonymous() const override { return true; }
-
- Bitmap& ensure_cow_map();
- void ensure_or_reset_cow_map();
-
- Optional<CommittedPhysicalPageSet> m_unused_committed_pages;
- Bitmap m_cow_map;
-
- // AnonymousVMObject shares committed COW pages with cloned children (happens on fork)
- class SharedCommittedCowPages : public RefCounted<SharedCommittedCowPages> {
- AK_MAKE_NONCOPYABLE(SharedCommittedCowPages);
-
- public:
- SharedCommittedCowPages() = delete;
-
- explicit SharedCommittedCowPages(CommittedPhysicalPageSet&&);
- ~SharedCommittedCowPages();
-
- [[nodiscard]] bool is_empty() const { return m_committed_pages.is_empty(); }
-
- [[nodiscard]] NonnullRefPtr<PhysicalPage> take_one();
- void uncommit_one();
-
- public:
- SpinLock<u8> m_lock;
- CommittedPhysicalPageSet m_committed_pages;
- };
-
- RefPtr<SharedCommittedCowPages> m_shared_committed_cow_pages;
-
- bool m_purgeable { false };
- bool m_volatile { false };
- bool m_was_purged { false };
-};
-
-}
diff --git a/Kernel/VM/InodeVMObject.cpp b/Kernel/VM/InodeVMObject.cpp
deleted file mode 100644
index 3ad2ac3d7d..0000000000
--- a/Kernel/VM/InodeVMObject.cpp
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#include <Kernel/FileSystem/Inode.h>
-#include <Kernel/VM/InodeVMObject.h>
-
-namespace Kernel {
-
-InodeVMObject::InodeVMObject(Inode& inode, size_t size)
- : VMObject(size)
- , m_inode(inode)
- , m_dirty_pages(page_count(), false)
-{
-}
-
-InodeVMObject::InodeVMObject(InodeVMObject const& other)
- : VMObject(other)
- , m_inode(other.m_inode)
- , m_dirty_pages(page_count(), false)
-{
- for (size_t i = 0; i < page_count(); ++i)
- m_dirty_pages.set(i, other.m_dirty_pages.get(i));
-}
-
-InodeVMObject::~InodeVMObject()
-{
-}
-
-size_t InodeVMObject::amount_clean() const
-{
- size_t count = 0;
- VERIFY(page_count() == m_dirty_pages.size());
- for (size_t i = 0; i < page_count(); ++i) {
- if (!m_dirty_pages.get(i) && m_physical_pages[i])
- ++count;
- }
- return count * PAGE_SIZE;
-}
-
-size_t InodeVMObject::amount_dirty() const
-{
- size_t count = 0;
- for (size_t i = 0; i < m_dirty_pages.size(); ++i) {
- if (m_dirty_pages.get(i))
- ++count;
- }
- return count * PAGE_SIZE;
-}
-
-int InodeVMObject::release_all_clean_pages()
-{
- ScopedSpinLock locker(m_lock);
-
- int count = 0;
- for (size_t i = 0; i < page_count(); ++i) {
- if (!m_dirty_pages.get(i) && m_physical_pages[i]) {
- m_physical_pages[i] = nullptr;
- ++count;
- }
- }
- if (count) {
- for_each_region([](auto& region) {
- region.remap();
- });
- }
- return count;
-}
-
-u32 InodeVMObject::writable_mappings() const
-{
- u32 count = 0;
- const_cast<InodeVMObject&>(*this).for_each_region([&](auto& region) {
- if (region.is_writable())
- ++count;
- });
- return count;
-}
-
-u32 InodeVMObject::executable_mappings() const
-{
- u32 count = 0;
- const_cast<InodeVMObject&>(*this).for_each_region([&](auto& region) {
- if (region.is_executable())
- ++count;
- });
- return count;
-}
-
-}
diff --git a/Kernel/VM/InodeVMObject.h b/Kernel/VM/InodeVMObject.h
deleted file mode 100644
index 85cde2ca1b..0000000000
--- a/Kernel/VM/InodeVMObject.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#pragma once
-
-#include <AK/Bitmap.h>
-#include <Kernel/UnixTypes.h>
-#include <Kernel/VM/VMObject.h>
-
-namespace Kernel {
-
-class InodeVMObject : public VMObject {
-public:
- virtual ~InodeVMObject() override;
-
- Inode& inode() { return *m_inode; }
- Inode const& inode() const { return *m_inode; }
-
- size_t amount_dirty() const;
- size_t amount_clean() const;
-
- int release_all_clean_pages();
-
- u32 writable_mappings() const;
- u32 executable_mappings() const;
-
-protected:
- explicit InodeVMObject(Inode&, size_t);
- explicit InodeVMObject(InodeVMObject const&);
-
- InodeVMObject& operator=(InodeVMObject const&) = delete;
- InodeVMObject& operator=(InodeVMObject&&) = delete;
- InodeVMObject(InodeVMObject&&) = delete;
-
- virtual bool is_inode() const final { return true; }
-
- NonnullRefPtr<Inode> m_inode;
- Bitmap m_dirty_pages;
-};
-
-}
diff --git a/Kernel/VM/MappedROM.h b/Kernel/VM/MappedROM.h
deleted file mode 100644
index e00d47a0cf..0000000000
--- a/Kernel/VM/MappedROM.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#pragma once
-
-#include <AK/OwnPtr.h>
-#include <Kernel/PhysicalAddress.h>
-#include <Kernel/VM/Region.h>
-
-namespace Kernel {
-
-class MappedROM {
-public:
- const u8* base() const { return region->vaddr().offset(offset).as_ptr(); }
- const u8* end() const { return base() + size; }
- OwnPtr<Region> region;
- size_t size { 0 };
- size_t offset { 0 };
- PhysicalAddress paddr;
-
- Optional<PhysicalAddress> find_chunk_starting_with(StringView prefix, size_t chunk_size) const
- {
- for (auto* candidate = base(); candidate < end(); candidate += chunk_size) {
- if (!__builtin_memcmp(prefix.characters_without_null_termination(), candidate, prefix.length()))
- return paddr_of(candidate);
- }
- return {};
- }
-
- PhysicalAddress paddr_of(const u8* ptr) const { return paddr.offset(ptr - this->base()); }
-};
-
-}
diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp
deleted file mode 100644
index 239b95ad39..0000000000
--- a/Kernel/VM/MemoryManager.cpp
+++ /dev/null
@@ -1,1147 +0,0 @@
-/*
- * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#include <AK/Assertions.h>
-#include <AK/Memory.h>
-#include <AK/StringView.h>
-#include <Kernel/BootInfo.h>
-#include <Kernel/CMOS.h>
-#include <Kernel/FileSystem/Inode.h>
-#include <Kernel/Heap/kmalloc.h>
-#include <Kernel/Multiboot.h>
-#include <Kernel/Panic.h>
-#include <Kernel/Process.h>
-#include <Kernel/Sections.h>
-#include <Kernel/StdLib.h>
-#include <Kernel/VM/AnonymousVMObject.h>
-#include <Kernel/VM/MemoryManager.h>
-#include <Kernel/VM/PageDirectory.h>
-#include <Kernel/VM/PhysicalRegion.h>
-#include <Kernel/VM/SharedInodeVMObject.h>
-
-extern u8 start_of_kernel_image[];
-extern u8 end_of_kernel_image[];
-extern u8 start_of_kernel_text[];
-extern u8 start_of_kernel_data[];
-extern u8 end_of_kernel_bss[];
-extern u8 start_of_ro_after_init[];
-extern u8 end_of_ro_after_init[];
-extern u8 start_of_unmap_after_init[];
-extern u8 end_of_unmap_after_init[];
-extern u8 start_of_kernel_ksyms[];
-extern u8 end_of_kernel_ksyms[];
-
-extern multiboot_module_entry_t multiboot_copy_boot_modules_array[16];
-extern size_t multiboot_copy_boot_modules_count;
-
-// Treat the super pages as logically separate from .bss
-__attribute__((section(".super_pages"))) static u8 super_pages[1 * MiB];
-
-namespace Kernel {
-
-// NOTE: We can NOT use AK::Singleton for this class, because
-// MemoryManager::initialize is called *before* global constructors are
-// run. If we do, then AK::Singleton would get re-initialized, causing
-// the memory manager to be initialized twice!
-static MemoryManager* s_the;
-RecursiveSpinLock s_mm_lock;
-
-MemoryManager& MM
-{
- return *s_the;
-}
-
-bool MemoryManager::is_initialized()
-{
- return s_the != nullptr;
-}
-
-UNMAP_AFTER_INIT MemoryManager::MemoryManager()
-{
- s_the = this;
-
- ScopedSpinLock lock(s_mm_lock);
- parse_memory_map();
- write_cr3(kernel_page_directory().cr3());
- protect_kernel_image();
-
- // We're temporarily "committing" to two pages that we need to allocate below
- auto committed_pages = commit_user_physical_pages(2);
-
- m_shared_zero_page = committed_pages->take_one();
-
- // We're wasting a page here, we just need a special tag (physical
- // address) so that we know when we need to lazily allocate a page
- // that we should be drawing this page from the committed pool rather
- // than potentially failing if no pages are available anymore.
- // By using a tag we don't have to query the VMObject for every page
- // whether it was committed or not
- m_lazy_committed_page = committed_pages->take_one();
-}
-
-UNMAP_AFTER_INIT MemoryManager::~MemoryManager()
-{
-}
-
-UNMAP_AFTER_INIT void MemoryManager::protect_kernel_image()
-{
- ScopedSpinLock page_lock(kernel_page_directory().get_lock());
- // Disable writing to the kernel text and rodata segments.
- for (auto i = start_of_kernel_text; i < start_of_kernel_data; i += PAGE_SIZE) {
- auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
- pte.set_writable(false);
- }
- if (Processor::current().has_feature(CPUFeature::NX)) {
- // Disable execution of the kernel data, bss and heap segments.
- for (auto i = start_of_kernel_data; i < end_of_kernel_image; i += PAGE_SIZE) {
- auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
- pte.set_execute_disabled(true);
- }
- }
-}
-
-UNMAP_AFTER_INIT void MemoryManager::protect_readonly_after_init_memory()
-{
- ScopedSpinLock mm_lock(s_mm_lock);
- ScopedSpinLock page_lock(kernel_page_directory().get_lock());
- // Disable writing to the .ro_after_init section
- for (auto i = (FlatPtr)&start_of_ro_after_init; i < (FlatPtr)&end_of_ro_after_init; i += PAGE_SIZE) {
- auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
- pte.set_writable(false);
- flush_tlb(&kernel_page_directory(), VirtualAddress(i));
- }
-}
-
-void MemoryManager::unmap_text_after_init()
-{
- ScopedSpinLock mm_lock(s_mm_lock);
- ScopedSpinLock page_lock(kernel_page_directory().get_lock());
-
- auto start = page_round_down((FlatPtr)&start_of_unmap_after_init);
- auto end = page_round_up((FlatPtr)&end_of_unmap_after_init);
-
- // Unmap the entire .unmap_after_init section
- for (auto i = start; i < end; i += PAGE_SIZE) {
- auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
- pte.clear();
- flush_tlb(&kernel_page_directory(), VirtualAddress(i));
- }
-
- dmesgln("Unmapped {} KiB of kernel text after init! :^)", (end - start) / KiB);
-}
-
-void MemoryManager::unmap_ksyms_after_init()
-{
- ScopedSpinLock mm_lock(s_mm_lock);
- ScopedSpinLock page_lock(kernel_page_directory().get_lock());
-
- auto start = page_round_down((FlatPtr)start_of_kernel_ksyms);
- auto end = page_round_up((FlatPtr)end_of_kernel_ksyms);
-
- // Unmap the entire .ksyms section
- for (auto i = start; i < end; i += PAGE_SIZE) {
- auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
- pte.clear();
- flush_tlb(&kernel_page_directory(), VirtualAddress(i));
- }
-
- dmesgln("Unmapped {} KiB of kernel symbols after init! :^)", (end - start) / KiB);
-}
-
-UNMAP_AFTER_INIT void MemoryManager::register_reserved_ranges()
-{
- VERIFY(!m_physical_memory_ranges.is_empty());
- ContiguousReservedMemoryRange range;
- for (auto& current_range : m_physical_memory_ranges) {
- if (current_range.type != PhysicalMemoryRangeType::Reserved) {
- if (range.start.is_null())
- continue;
- m_reserved_memory_ranges.append(ContiguousReservedMemoryRange { range.start, current_range.start.get() - range.start.get() });
- range.start.set((FlatPtr) nullptr);
- continue;
- }
- if (!range.start.is_null()) {
- continue;
- }
- range.start = current_range.start;
- }
- if (m_physical_memory_ranges.last().type != PhysicalMemoryRangeType::Reserved)
- return;
- if (range.start.is_null())
- return;
- m_reserved_memory_ranges.append(ContiguousReservedMemoryRange { range.start, m_physical_memory_ranges.last().start.get() + m_physical_memory_ranges.last().length - range.start.get() });
-}
-
-bool MemoryManager::is_allowed_to_mmap_to_userspace(PhysicalAddress start_address, Range const& range) const
-{
- VERIFY(!m_reserved_memory_ranges.is_empty());
- for (auto& current_range : m_reserved_memory_ranges) {
- if (!(current_range.start <= start_address))
- continue;
- if (!(current_range.start.offset(current_range.length) > start_address))
- continue;
- if (current_range.length < range.size())
- return false;
- return true;
- }
- return false;
-}
-
-UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
-{
- // Register used memory regions that we know of.
- m_used_memory_ranges.ensure_capacity(4);
- m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::LowMemory, PhysicalAddress(0x00000000), PhysicalAddress(1 * MiB) });
- m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Prekernel, start_of_prekernel_image, end_of_prekernel_image });
- m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Kernel, PhysicalAddress(virtual_to_low_physical((FlatPtr)start_of_kernel_image)), PhysicalAddress(page_round_up(virtual_to_low_physical((FlatPtr)end_of_kernel_image))) });
-
- if (multiboot_flags & 0x4) {
- auto* bootmods_start = multiboot_copy_boot_modules_array;
- auto* bootmods_end = bootmods_start + multiboot_copy_boot_modules_count;
-
- for (auto* bootmod = bootmods_start; bootmod < bootmods_end; bootmod++) {
- m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::BootModule, PhysicalAddress(bootmod->start), PhysicalAddress(bootmod->end) });
- }
- }
-
- auto* mmap_begin = multiboot_memory_map;
- auto* mmap_end = multiboot_memory_map + multiboot_memory_map_count;
-
- struct ContiguousPhysicalRange {
- PhysicalAddress lower;
- PhysicalAddress upper;
- };
-
- Vector<ContiguousPhysicalRange> contiguous_physical_ranges;
-
- for (auto* mmap = mmap_begin; mmap < mmap_end; mmap++) {
- dmesgln("MM: Multiboot mmap: address={:p}, length={}, type={}", mmap->addr, mmap->len, mmap->type);
-
- auto start_address = PhysicalAddress(mmap->addr);
- auto length = mmap->len;
- switch (mmap->type) {
- case (MULTIBOOT_MEMORY_AVAILABLE):
- m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Usable, start_address, length });
- break;
- case (MULTIBOOT_MEMORY_RESERVED):
- m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Reserved, start_address, length });
- break;
- case (MULTIBOOT_MEMORY_ACPI_RECLAIMABLE):
- m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::ACPI_Reclaimable, start_address, length });
- break;
- case (MULTIBOOT_MEMORY_NVS):
- m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::ACPI_NVS, start_address, length });
- break;
- case (MULTIBOOT_MEMORY_BADRAM):
- dmesgln("MM: Warning, detected bad memory range!");
- m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::BadMemory, start_address, length });
- break;
- default:
- dbgln("MM: Unknown range!");
- m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Unknown, start_address, length });
- break;
- }
-
- if (mmap->type != MULTIBOOT_MEMORY_AVAILABLE)
- continue;
-
- // Fix up unaligned memory regions.
- auto diff = (FlatPtr)mmap->addr % PAGE_SIZE;
- if (diff != 0) {
- dmesgln("MM: Got an unaligned physical_region from the bootloader; correcting {:p} by {} bytes", mmap->addr, diff);
- diff = PAGE_SIZE - diff;
- mmap->addr += diff;
- mmap->len -= diff;
- }
- if ((mmap->len % PAGE_SIZE) != 0) {
- dmesgln("MM: Got an unaligned physical_region from the bootloader; correcting length {} by {} bytes", mmap->len, mmap->len % PAGE_SIZE);
- mmap->len -= mmap->len % PAGE_SIZE;
- }
- if (mmap->len < PAGE_SIZE) {
- dmesgln("MM: Memory physical_region from bootloader is too small; we want >= {} bytes, but got {} bytes", PAGE_SIZE, mmap->len);
- continue;
- }
-
- for (PhysicalSize page_base = mmap->addr; page_base <= (mmap->addr + mmap->len); page_base += PAGE_SIZE) {
- auto addr = PhysicalAddress(page_base);
-
- // Skip used memory ranges.
- bool should_skip = false;
- for (auto& used_range : m_used_memory_ranges) {
- if (addr.get() >= used_range.start.get() && addr.get() <= used_range.end.get()) {
- should_skip = true;
- break;
- }
- }
- if (should_skip)
- continue;
-
- if (contiguous_physical_ranges.is_empty() || contiguous_physical_ranges.last().upper.offset(PAGE_SIZE) != addr) {
- contiguous_physical_ranges.append(ContiguousPhysicalRange {
- .lower = addr,
- .upper = addr,
- });
- } else {
- contiguous_physical_ranges.last().upper = addr;
- }
- }
- }
-
- for (auto& range : contiguous_physical_ranges) {
- m_user_physical_regions.append(PhysicalRegion::try_create(range.lower, range.upper).release_nonnull());
- }
-
- // Super pages are guaranteed to be in the first 16MB of physical memory
- VERIFY(virtual_to_low_physical((FlatPtr)super_pages) + sizeof(super_pages) < 0x1000000);
-
- // Append statically-allocated super physical physical_region.
- m_super_physical_region = PhysicalRegion::try_create(
- PhysicalAddress(virtual_to_low_physical(FlatPtr(super_pages))),
- PhysicalAddress(virtual_to_low_physical(FlatPtr(super_pages + sizeof(super_pages)))));
- VERIFY(m_super_physical_region);
-
- m_system_memory_info.super_physical_pages += m_super_physical_region->size();
-
- for (auto& region : m_user_physical_regions)
- m_system_memory_info.user_physical_pages += region.size();
-
- register_reserved_ranges();
- for (auto& range : m_reserved_memory_ranges) {
- dmesgln("MM: Contiguous reserved range from {}, length is {}", range.start, range.length);
- }
-
- initialize_physical_pages();
-
- VERIFY(m_system_memory_info.super_physical_pages > 0);
- VERIFY(m_system_memory_info.user_physical_pages > 0);
-
- // We start out with no committed pages
- m_system_memory_info.user_physical_pages_uncommitted = m_system_memory_info.user_physical_pages;
-
- for (auto& used_range : m_used_memory_ranges) {
- dmesgln("MM: {} range @ {} - {} (size {:#x})", UserMemoryRangeTypeNames[to_underlying(used_range.type)], used_range.start, used_range.end.offset(-1), used_range.end.as_ptr() - used_range.start.as_ptr());
- }
-
- dmesgln("MM: Super physical region: {} - {} (size {:#x})", m_super_physical_region->lower(), m_super_physical_region->upper().offset(-1), PAGE_SIZE * m_super_physical_region->size());
- m_super_physical_region->initialize_zones();
-
- for (auto& region : m_user_physical_regions) {
- dmesgln("MM: User physical region: {} - {} (size {:#x})", region.lower(), region.upper().offset(-1), PAGE_SIZE * region.size());
- region.initialize_zones();
- }
-}
-
-UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
-{
- // We assume that the physical page range is contiguous and doesn't contain huge gaps!
- PhysicalAddress highest_physical_address;
- for (auto& range : m_used_memory_ranges) {
- if (range.end.get() > highest_physical_address.get())
- highest_physical_address = range.end;
- }
- for (auto& region : m_physical_memory_ranges) {
- auto range_end = PhysicalAddress(region.start).offset(region.length);
- if (range_end.get() > highest_physical_address.get())
- highest_physical_address = range_end;
- }
-
- // Calculate how many total physical pages the array will have
- m_physical_page_entries_count = PhysicalAddress::physical_page_index(highest_physical_address.get()) + 1;
- VERIFY(m_physical_page_entries_count != 0);
- VERIFY(!Checked<decltype(m_physical_page_entries_count)>::multiplication_would_overflow(m_physical_page_entries_count, sizeof(PhysicalPageEntry)));
-
- // Calculate how many bytes the array will consume
- auto physical_page_array_size = m_physical_page_entries_count * sizeof(PhysicalPageEntry);
- auto physical_page_array_pages = page_round_up(physical_page_array_size) / PAGE_SIZE;
- VERIFY(physical_page_array_pages * PAGE_SIZE >= physical_page_array_size);
-
- // Calculate how many page tables we will need to be able to map them all
- auto needed_page_table_count = (physical_page_array_pages + 512 - 1) / 512;
-
- auto physical_page_array_pages_and_page_tables_count = physical_page_array_pages + needed_page_table_count;
-
- // Now that we know how much memory we need for a contiguous array of PhysicalPage instances, find a memory region that can fit it
- PhysicalRegion* found_region { nullptr };
- Optional<size_t> found_region_index;
- for (size_t i = 0; i < m_user_physical_regions.size(); ++i) {
- auto& region = m_user_physical_regions[i];
- if (region.size() >= physical_page_array_pages_and_page_tables_count) {
- found_region = &region;
- found_region_index = i;
- break;
- }
- }
-
- if (!found_region) {
- dmesgln("MM: Need {} bytes for physical page management, but no memory region is large enough!", physical_page_array_pages_and_page_tables_count);
- VERIFY_NOT_REACHED();
- }
-
- VERIFY(m_system_memory_info.user_physical_pages >= physical_page_array_pages_and_page_tables_count);
- m_system_memory_info.user_physical_pages -= physical_page_array_pages_and_page_tables_count;
-
- if (found_region->size() == physical_page_array_pages_and_page_tables_count) {
- // We're stealing the entire region
- m_physical_pages_region = m_user_physical_regions.take(*found_region_index);
- } else {
- m_physical_pages_region = found_region->try_take_pages_from_beginning(physical_page_array_pages_and_page_tables_count);
- }
- m_used_memory_ranges.append({ UsedMemoryRangeType::PhysicalPages, m_physical_pages_region->lower(), m_physical_pages_region->upper() });
-
- // Create the bare page directory. This is not a fully constructed page directory and merely contains the allocators!
- m_kernel_page_directory = PageDirectory::must_create_kernel_page_directory();
-
- // Allocate a virtual address range for our array
- auto range = m_kernel_page_directory->range_allocator().allocate_anywhere(physical_page_array_pages * PAGE_SIZE);
- if (!range.has_value()) {
- dmesgln("MM: Could not allocate {} bytes to map physical page array!", physical_page_array_pages * PAGE_SIZE);
- VERIFY_NOT_REACHED();
- }
-
- // Now that we have our special m_physical_pages_region region with enough pages to hold the entire array
- // try to map the entire region into kernel space so we always have it
- // We can't use ensure_pte here because it would try to allocate a PhysicalPage and we don't have the array
- // mapped yet so we can't create them
- ScopedSpinLock lock(s_mm_lock);
-
- // Create page tables at the beginning of m_physical_pages_region, followed by the PhysicalPageEntry array
- auto page_tables_base = m_physical_pages_region->lower();
- auto physical_page_array_base = page_tables_base.offset(needed_page_table_count * PAGE_SIZE);
- auto physical_page_array_current_page = physical_page_array_base.get();
- auto virtual_page_array_base = range.value().base().get();
- auto virtual_page_array_current_page = virtual_page_array_base;
- for (size_t pt_index = 0; pt_index < needed_page_table_count; pt_index++) {
- auto virtual_page_base_for_this_pt = virtual_page_array_current_page;
- auto pt_paddr = page_tables_base.offset(pt_index * PAGE_SIZE);
- auto* pt = reinterpret_cast<PageTableEntry*>(quickmap_page(pt_paddr));
- __builtin_memset(pt, 0, PAGE_SIZE);
- for (size_t pte_index = 0; pte_index < PAGE_SIZE / sizeof(PageTableEntry); pte_index++) {
- auto& pte = pt[pte_index];
- pte.set_physical_page_base(physical_page_array_current_page);
- pte.set_user_allowed(false);
- pte.set_writable(true);
- if (Processor::current().has_feature(CPUFeature::NX))
- pte.set_execute_disabled(false);
- pte.set_global(true);
- pte.set_present(true);
-
- physical_page_array_current_page += PAGE_SIZE;
- virtual_page_array_current_page += PAGE_SIZE;
- }
- unquickmap_page();
-
- // Hook the page table into the kernel page directory
- u32 page_directory_index = (virtual_page_base_for_this_pt >> 21) & 0x1ff;
- auto* pd = reinterpret_cast<PageDirectoryEntry*>(quickmap_page(boot_pd_kernel));
- PageDirectoryEntry& pde = pd[page_directory_index];
-
- VERIFY(!pde.is_present()); // Nothing should be using this PD yet
-
- // We can't use ensure_pte quite yet!
- pde.set_page_table_base(pt_paddr.get());
- pde.set_user_allowed(false);
- pde.set_present(true);
- pde.set_writable(true);
- pde.set_global(true);
-
- unquickmap_page();
-
- flush_tlb_local(VirtualAddress(virtual_page_base_for_this_pt));
- }
-
- // We now have the entire PhysicalPageEntry array mapped!
- m_physical_page_entries = (PhysicalPageEntry*)range.value().base().get();
- for (size_t i = 0; i < m_physical_page_entries_count; i++)
- new (&m_physical_page_entries[i]) PageTableEntry();
-
- // Now we should be able to allocate PhysicalPage instances,
- // so finish setting up the kernel page directory
- m_kernel_page_directory->allocate_kernel_directory();
-
- // Now create legit PhysicalPage objects for the page tables we created, so that
- // we can put them into kernel_page_directory().m_page_tables
- auto& kernel_page_tables = kernel_page_directory().m_page_tables;
- virtual_page_array_current_page = virtual_page_array_base;
- for (size_t pt_index = 0; pt_index < needed_page_table_count; pt_index++) {
- VERIFY(virtual_page_array_current_page <= range.value().end().get());
- auto pt_paddr = page_tables_base.offset(pt_index * PAGE_SIZE);
- auto physical_page_index = PhysicalAddress::physical_page_index(pt_paddr.get());
- auto& physical_page_entry = m_physical_page_entries[physical_page_index];
- auto physical_page = adopt_ref(*new (&physical_page_entry.allocated.physical_page) PhysicalPage(MayReturnToFreeList::No));
- auto result = kernel_page_tables.set(virtual_page_array_current_page & ~0x1fffff, move(physical_page));
- VERIFY(result == AK::HashSetResult::InsertedNewEntry);
-
- virtual_page_array_current_page += (PAGE_SIZE / sizeof(PageTableEntry)) * PAGE_SIZE;
- }
-
- dmesgln("MM: Physical page entries: {}", range.value());
-}
-
-PhysicalPageEntry& MemoryManager::get_physical_page_entry(PhysicalAddress physical_address)
-{
- VERIFY(m_physical_page_entries);
- auto physical_page_entry_index = PhysicalAddress::physical_page_index(physical_address.get());
- VERIFY(physical_page_entry_index < m_physical_page_entries_count);
- return m_physical_page_entries[physical_page_entry_index];
-}
-
-PhysicalAddress MemoryManager::get_physical_address(PhysicalPage const& physical_page)
-{
- PhysicalPageEntry const& physical_page_entry = *reinterpret_cast<PhysicalPageEntry const*>((u8 const*)&physical_page - __builtin_offsetof(PhysicalPageEntry, allocated.physical_page));
- VERIFY(m_physical_page_entries);
- size_t physical_page_entry_index = &physical_page_entry - m_physical_page_entries;
- VERIFY(physical_page_entry_index < m_physical_page_entries_count);
- return PhysicalAddress((PhysicalPtr)physical_page_entry_index * PAGE_SIZE);
-}
-
-PageTableEntry* MemoryManager::pte(PageDirectory& page_directory, VirtualAddress vaddr)
-{
- VERIFY_INTERRUPTS_DISABLED();
- VERIFY(s_mm_lock.own_lock());
- VERIFY(page_directory.get_lock().own_lock());
- u32 page_directory_table_index = (vaddr.get() >> 30) & 0x1ff;
- u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
- u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
-
- auto* pd = quickmap_pd(const_cast<PageDirectory&>(page_directory), page_directory_table_index);
- PageDirectoryEntry const& pde = pd[page_directory_index];
- if (!pde.is_present())
- return nullptr;
-
- return &quickmap_pt(PhysicalAddress((FlatPtr)pde.page_table_base()))[page_table_index];
-}
-
-PageTableEntry* MemoryManager::ensure_pte(PageDirectory& page_directory, VirtualAddress vaddr)
-{
- VERIFY_INTERRUPTS_DISABLED();
- VERIFY(s_mm_lock.own_lock());
- VERIFY(page_directory.get_lock().own_lock());
- u32 page_directory_table_index = (vaddr.get() >> 30) & 0x1ff;
- u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
- u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
-
- auto* pd = quickmap_pd(page_directory, page_directory_table_index);
- PageDirectoryEntry& pde = pd[page_directory_index];
- if (!pde.is_present()) {
- bool did_purge = false;
- auto page_table = allocate_user_physical_page(ShouldZeroFill::Yes, &did_purge);
- if (!page_table) {
- dbgln("MM: Unable to allocate page table to map {}", vaddr);
- return nullptr;
- }
- if (did_purge) {
- // If any memory had to be purged, ensure_pte may have been called as part
- // of the purging process. So we need to re-map the pd in this case to ensure
- // we're writing to the correct underlying physical page
- pd = quickmap_pd(page_directory, page_directory_table_index);
- VERIFY(&pde == &pd[page_directory_index]); // Sanity check
-
- VERIFY(!pde.is_present()); // Should have not changed
- }
- pde.set_page_table_base(page_table->paddr().get());
- pde.set_user_allowed(true);
- pde.set_present(true);
- pde.set_writable(true);
- pde.set_global(&page_directory == m_kernel_page_directory.ptr());
- // Use page_directory_table_index and page_directory_index as key
- // This allows us to release the page table entry when no longer needed
- auto result = page_directory.m_page_tables.set(vaddr.get() & ~(FlatPtr)0x1fffff, move(page_table));
- // If you're hitting this VERIFY on x86_64 chances are a 64-bit pointer was truncated somewhere
- VERIFY(result == AK::HashSetResult::InsertedNewEntry);
- }
-
- return &quickmap_pt(PhysicalAddress((FlatPtr)pde.page_table_base()))[page_table_index];
-}
-
-void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress vaddr, bool is_last_release)
-{
- VERIFY_INTERRUPTS_DISABLED();
- VERIFY(s_mm_lock.own_lock());
- VERIFY(page_directory.get_lock().own_lock());
- u32 page_directory_table_index = (vaddr.get() >> 30) & 0x1ff;
- u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
- u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
-
- auto* pd = quickmap_pd(page_directory, page_directory_table_index);
- PageDirectoryEntry& pde = pd[page_directory_index];
- if (pde.is_present()) {
- auto* page_table = quickmap_pt(PhysicalAddress((FlatPtr)pde.page_table_base()));
- auto& pte = page_table[page_table_index];
- pte.clear();
-
- if (is_last_release || page_table_index == 0x1ff) {
- // If this is the last PTE in a region or the last PTE in a page table then
- // check if we can also release the page table
- bool all_clear = true;
- for (u32 i = 0; i <= 0x1ff; i++) {
- if (!page_table[i].is_null()) {
- all_clear = false;
- break;
- }
- }
- if (all_clear) {
- pde.clear();
-
- auto result = page_directory.m_page_tables.remove(vaddr.get() & ~0x1fffff);
- VERIFY(result);
- }
- }
- }
-}
-
-UNMAP_AFTER_INIT void MemoryManager::initialize(u32 cpu)
-{
- ProcessorSpecific<MemoryManagerData>::initialize();
-
- if (cpu == 0) {
- new MemoryManager;
- kmalloc_enable_expand();
- }
-}
-
-Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress vaddr)
-{
- ScopedSpinLock lock(s_mm_lock);
- for (auto& region : MM.m_kernel_regions) {
- if (region.contains(vaddr))
- return &region;
- }
- return nullptr;
-}
-
-Region* MemoryManager::find_user_region_from_vaddr_no_lock(Space& space, VirtualAddress vaddr)
-{
- VERIFY(space.get_lock().own_lock());
- return space.find_region_containing({ vaddr, 1 });
-}
-
-Region* MemoryManager::find_user_region_from_vaddr(Space& space, VirtualAddress vaddr)
-{
- ScopedSpinLock lock(space.get_lock());
- return find_user_region_from_vaddr_no_lock(space, vaddr);
-}
-
-void MemoryManager::validate_syscall_preconditions(Space& space, RegisterState const& regs)
-{
- // We take the space lock once here and then use the no_lock variants
- // to avoid excessive spinlock recursion in this extemely common path.
- ScopedSpinLock lock(space.get_lock());
-
- auto unlock_and_handle_crash = [&lock, &regs](const char* description, int signal) {
- lock.unlock();
- handle_crash(regs, description, signal);
- };
-
- {
- VirtualAddress userspace_sp = VirtualAddress { regs.userspace_sp() };
- if (!MM.validate_user_stack_no_lock(space, userspace_sp)) {
- dbgln("Invalid stack pointer: {:p}", userspace_sp);
- unlock_and_handle_crash("Bad stack on syscall entry", SIGSTKFLT);
- }
- }
-
- {
- VirtualAddress ip = VirtualAddress { regs.ip() };
- auto* calling_region = MM.find_user_region_from_vaddr_no_lock(space, ip);
- if (!calling_region) {
- dbgln("Syscall from {:p} which has no associated region", ip);
- unlock_and_handle_crash("Syscall from unknown region", SIGSEGV);
- }
-
- if (calling_region->is_writable()) {
- dbgln("Syscall from writable memory at {:p}", ip);
- unlock_and_handle_crash("Syscall from writable memory", SIGSEGV);
- }
-
- if (space.enforces_syscall_regions() && !calling_region->is_syscall_region()) {
- dbgln("Syscall from non-syscall region");
- unlock_and_handle_crash("Syscall from non-syscall region", SIGSEGV);
- }
- }
-}
-
-Region* MemoryManager::find_region_from_vaddr(VirtualAddress vaddr)
-{
- ScopedSpinLock lock(s_mm_lock);
- if (auto* region = kernel_region_from_vaddr(vaddr))
- return region;
- auto page_directory = PageDirectory::find_by_cr3(read_cr3());
- if (!page_directory)
- return nullptr;
- VERIFY(page_directory->space());
- return find_user_region_from_vaddr(*page_directory->space(), vaddr);
-}
-
-PageFaultResponse MemoryManager::handle_page_fault(PageFault const& fault)
-{
- VERIFY_INTERRUPTS_DISABLED();
- if (Processor::current().in_irq()) {
- dbgln("CPU[{}] BUG! Page fault while handling IRQ! code={}, vaddr={}, irq level: {}",
- Processor::id(), fault.code(), fault.vaddr(), Processor::current().in_irq());
- dump_kernel_regions();
- return PageFaultResponse::ShouldCrash;
- }
- dbgln_if(PAGE_FAULT_DEBUG, "MM: CPU[{}] handle_page_fault({:#04x}) at {}", Processor::id(), fault.code(), fault.vaddr());
- auto* region = find_region_from_vaddr(fault.vaddr());
- if (!region) {
- return PageFaultResponse::ShouldCrash;
- }
- return region->handle_fault(fault);
-}
-
-OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
-{
- VERIFY(!(size % PAGE_SIZE));
- ScopedSpinLock lock(s_mm_lock);
- auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
- if (!range.has_value())
- return {};
- auto vmobject = AnonymousVMObject::try_create_physically_contiguous_with_size(size);
- if (!vmobject) {
- kernel_page_directory().range_allocator().deallocate(range.value());
- return {};
- }
- return allocate_kernel_region_with_vmobject(range.value(), *vmobject, name, access, cacheable);
-}
-
-OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, StringView name, Region::Access access, AllocationStrategy strategy, Region::Cacheable cacheable)
-{
- VERIFY(!(size % PAGE_SIZE));
- auto vm_object = AnonymousVMObject::try_create_with_size(size, strategy);
- if (!vm_object)
- return {};
- ScopedSpinLock lock(s_mm_lock);
- auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
- if (!range.has_value())
- return {};
- return allocate_kernel_region_with_vmobject(range.value(), vm_object.release_nonnull(), name, access, cacheable);
-}
-
-OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
-{
- auto vm_object = AnonymousVMObject::try_create_for_physical_range(paddr, size);
- if (!vm_object)
- return {};
- VERIFY(!(size % PAGE_SIZE));
- ScopedSpinLock lock(s_mm_lock);
- auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
- if (!range.has_value())
- return {};
- return allocate_kernel_region_with_vmobject(range.value(), *vm_object, name, access, cacheable);
-}
-
-OwnPtr<Region> MemoryManager::allocate_kernel_region_identity(PhysicalAddress paddr, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
-{
- auto vm_object = AnonymousVMObject::try_create_for_physical_range(paddr, size);
- if (!vm_object)
- return {};
- VERIFY(!(size % PAGE_SIZE));
- ScopedSpinLock lock(s_mm_lock);
- auto range = kernel_page_directory().identity_range_allocator().allocate_specific(VirtualAddress(paddr.get()), size);
- if (!range.has_value())
- return {};
- return allocate_kernel_region_with_vmobject(range.value(), *vm_object, name, access, cacheable);
-}
-
-OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(Range const& range, VMObject& vmobject, StringView name, Region::Access access, Region::Cacheable cacheable)
-{
- ScopedSpinLock lock(s_mm_lock);
- auto region = Region::try_create_kernel_only(range, vmobject, 0, KString::try_create(name), access, cacheable);
- if (region)
- region->map(kernel_page_directory());
- return region;
-}
-
-OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
-{
- VERIFY(!(size % PAGE_SIZE));
- ScopedSpinLock lock(s_mm_lock);
- auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
- if (!range.has_value())
- return {};
- return allocate_kernel_region_with_vmobject(range.value(), vmobject, name, access, cacheable);
-}
-
-Optional<CommittedPhysicalPageSet> MemoryManager::commit_user_physical_pages(size_t page_count)
-{
- VERIFY(page_count > 0);
- ScopedSpinLock lock(s_mm_lock);
- if (m_system_memory_info.user_physical_pages_uncommitted < page_count)
- return {};
-
- m_system_memory_info.user_physical_pages_uncommitted -= page_count;
- m_system_memory_info.user_physical_pages_committed += page_count;
- return CommittedPhysicalPageSet { {}, page_count };
-}
-
-void MemoryManager::uncommit_user_physical_pages(Badge<CommittedPhysicalPageSet>, size_t page_count)
-{
- VERIFY(page_count > 0);
-
- ScopedSpinLock lock(s_mm_lock);
- VERIFY(m_system_memory_info.user_physical_pages_committed >= page_count);
-
- m_system_memory_info.user_physical_pages_uncommitted += page_count;
- m_system_memory_info.user_physical_pages_committed -= page_count;
-}
-
-void MemoryManager::deallocate_physical_page(PhysicalAddress paddr)
-{
- ScopedSpinLock lock(s_mm_lock);
-
- // Are we returning a user page?
- for (auto& region : m_user_physical_regions) {
- if (!region.contains(paddr))
- continue;
-
- region.return_page(paddr);
- --m_system_memory_info.user_physical_pages_used;
-
- // Always return pages to the uncommitted pool. Pages that were
- // committed and allocated are only freed upon request. Once
- // returned there is no guarantee being able to get them back.
- ++m_system_memory_info.user_physical_pages_uncommitted;
- return;
- }
-
- // If it's not a user page, it should be a supervisor page.
- if (!m_super_physical_region->contains(paddr))
- PANIC("MM: deallocate_user_physical_page couldn't figure out region for page @ {}", paddr);
-
- m_super_physical_region->return_page(paddr);
- --m_system_memory_info.super_physical_pages_used;
-}
-
-RefPtr<PhysicalPage> MemoryManager::find_free_user_physical_page(bool committed)
-{
- VERIFY(s_mm_lock.is_locked());
- RefPtr<PhysicalPage> page;
- if (committed) {
- // Draw from the committed pages pool. We should always have these pages available
- VERIFY(m_system_memory_info.user_physical_pages_committed > 0);
- m_system_memory_info.user_physical_pages_committed--;
- } else {
- // We need to make sure we don't touch pages that we have committed to
- if (m_system_memory_info.user_physical_pages_uncommitted == 0)
- return {};
- m_system_memory_info.user_physical_pages_uncommitted--;
- }
- for (auto& region : m_user_physical_regions) {
- page = region.take_free_page();
- if (!page.is_null()) {
- ++m_system_memory_info.user_physical_pages_used;
- break;
- }
- }
- VERIFY(!committed || !page.is_null());
- return page;
-}
-
-NonnullRefPtr<PhysicalPage> MemoryManager::allocate_committed_user_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill should_zero_fill)
-{
- ScopedSpinLock lock(s_mm_lock);
- auto page = find_free_user_physical_page(true);
- if (should_zero_fill == ShouldZeroFill::Yes) {
- auto* ptr = quickmap_page(*page);
- memset(ptr, 0, PAGE_SIZE);
- unquickmap_page();
- }
- return page.release_nonnull();
-}
-
-RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill should_zero_fill, bool* did_purge)
-{
- ScopedSpinLock lock(s_mm_lock);
- auto page = find_free_user_physical_page(false);
- bool purged_pages = false;
-
- if (!page) {
- // We didn't have a single free physical page. Let's try to free something up!
- // First, we look for a purgeable VMObject in the volatile state.
- for_each_vmobject([&](auto& vmobject) {
- if (!vmobject.is_anonymous())
- return IterationDecision::Continue;
- auto& anonymous_vmobject = static_cast<AnonymousVMObject&>(vmobject);
- if (!anonymous_vmobject.is_purgeable() || !anonymous_vmobject.is_volatile())
- return IterationDecision::Continue;
- if (auto purged_page_count = anonymous_vmobject.purge()) {
- dbgln("MM: Purge saved the day! Purged {} pages from AnonymousVMObject", purged_page_count);
- page = find_free_user_physical_page(false);
- purged_pages = true;
- VERIFY(page);
- return IterationDecision::Break;
- }
- return IterationDecision::Continue;
- });
- if (!page) {
- dmesgln("MM: no user physical pages available");
- return {};
- }
- }
-
- if (should_zero_fill == ShouldZeroFill::Yes) {
- auto* ptr = quickmap_page(*page);
- memset(ptr, 0, PAGE_SIZE);
- unquickmap_page();
- }
-
- if (did_purge)
- *did_purge = purged_pages;
- return page;
-}
-
-NonnullRefPtrVector<PhysicalPage> MemoryManager::allocate_contiguous_supervisor_physical_pages(size_t size)
-{
- VERIFY(!(size % PAGE_SIZE));
- ScopedSpinLock lock(s_mm_lock);
- size_t count = ceil_div(size, static_cast<size_t>(PAGE_SIZE));
- auto physical_pages = m_super_physical_region->take_contiguous_free_pages(count);
-
- if (physical_pages.is_empty()) {
- dmesgln("MM: no super physical pages available");
- VERIFY_NOT_REACHED();
- return {};
- }
-
- auto cleanup_region = MM.allocate_kernel_region(physical_pages[0].paddr(), PAGE_SIZE * count, "MemoryManager Allocation Sanitization", Region::Access::Read | Region::Access::Write);
- fast_u32_fill((u32*)cleanup_region->vaddr().as_ptr(), 0, (PAGE_SIZE * count) / sizeof(u32));
- m_system_memory_info.super_physical_pages_used += count;
- return physical_pages;
-}
-
-RefPtr<PhysicalPage> MemoryManager::allocate_supervisor_physical_page()
-{
- ScopedSpinLock lock(s_mm_lock);
- auto page = m_super_physical_region->take_free_page();
-
- if (!page) {
- dmesgln("MM: no super physical pages available");
- VERIFY_NOT_REACHED();
- return {};
- }
-
- fast_u32_fill((u32*)page->paddr().offset(physical_to_virtual_offset).as_ptr(), 0, PAGE_SIZE / sizeof(u32));
- ++m_system_memory_info.super_physical_pages_used;
- return page;
-}
-
-void MemoryManager::enter_process_paging_scope(Process& process)
-{
- enter_space(process.space());
-}
-
-void MemoryManager::enter_space(Space& space)
-{
- auto current_thread = Thread::current();
- VERIFY(current_thread != nullptr);
- ScopedSpinLock lock(s_mm_lock);
-
- current_thread->regs().cr3 = space.page_directory().cr3();
- write_cr3(space.page_directory().cr3());
-}
-
-void MemoryManager::flush_tlb_local(VirtualAddress vaddr, size_t page_count)
-{
- Processor::flush_tlb_local(vaddr, page_count);
-}
-
-void MemoryManager::flush_tlb(PageDirectory const* page_directory, VirtualAddress vaddr, size_t page_count)
-{
- Processor::flush_tlb(page_directory, vaddr, page_count);
-}
-
-PageDirectoryEntry* MemoryManager::quickmap_pd(PageDirectory& directory, size_t pdpt_index)
-{
- VERIFY(s_mm_lock.own_lock());
- auto& mm_data = get_data();
- auto& pte = boot_pd_kernel_pt1023[(KERNEL_QUICKMAP_PD - KERNEL_PT1024_BASE) / PAGE_SIZE];
- auto pd_paddr = directory.m_directory_pages[pdpt_index]->paddr();
- if (pte.physical_page_base() != pd_paddr.get()) {
- pte.set_physical_page_base(pd_paddr.get());
- pte.set_present(true);
- pte.set_writable(true);
- pte.set_user_allowed(false);
- // Because we must continue to hold the MM lock while we use this
- // mapping, it is sufficient to only flush on the current CPU. Other
- // CPUs trying to use this API must wait on the MM lock anyway
- flush_tlb_local(VirtualAddress(KERNEL_QUICKMAP_PD));
- } else {
- // Even though we don't allow this to be called concurrently, it's
- // possible that this PD was mapped on a different CPU and we don't
- // broadcast the flush. If so, we still need to flush the TLB.
- if (mm_data.m_last_quickmap_pd != pd_paddr)
- flush_tlb_local(VirtualAddress(KERNEL_QUICKMAP_PD));
- }
- mm_data.m_last_quickmap_pd = pd_paddr;
- return (PageDirectoryEntry*)KERNEL_QUICKMAP_PD;
-}
-
-PageTableEntry* MemoryManager::quickmap_pt(PhysicalAddress pt_paddr)
-{
- VERIFY(s_mm_lock.own_lock());
- auto& mm_data = get_data();
- auto& pte = ((PageTableEntry*)boot_pd_kernel_pt1023)[(KERNEL_QUICKMAP_PT - KERNEL_PT1024_BASE) / PAGE_SIZE];
- if (pte.physical_page_base() != pt_paddr.get()) {
- pte.set_physical_page_base(pt_paddr.get());
- pte.set_present(true);
- pte.set_writable(true);
- pte.set_user_allowed(false);
- // Because we must continue to hold the MM lock while we use this
- // mapping, it is sufficient to only flush on the current CPU. Other
- // CPUs trying to use this API must wait on the MM lock anyway
- flush_tlb_local(VirtualAddress(KERNEL_QUICKMAP_PT));
- } else {
- // Even though we don't allow this to be called concurrently, it's
- // possible that this PT was mapped on a different CPU and we don't
- // broadcast the flush. If so, we still need to flush the TLB.
- if (mm_data.m_last_quickmap_pt != pt_paddr)
- flush_tlb_local(VirtualAddress(KERNEL_QUICKMAP_PT));
- }
- mm_data.m_last_quickmap_pt = pt_paddr;
- return (PageTableEntry*)KERNEL_QUICKMAP_PT;
-}
-
-u8* MemoryManager::quickmap_page(PhysicalAddress const& physical_address)
-{
- VERIFY_INTERRUPTS_DISABLED();
- auto& mm_data = get_data();
- mm_data.m_quickmap_prev_flags = mm_data.m_quickmap_in_use.lock();
- ScopedSpinLock lock(s_mm_lock);
-
- VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::id() * PAGE_SIZE);
- u32 pte_idx = (vaddr.get() - KERNEL_PT1024_BASE) / PAGE_SIZE;
-
- auto& pte = ((PageTableEntry*)boot_pd_kernel_pt1023)[pte_idx];
- if (pte.physical_page_base() != physical_address.get()) {
- pte.set_physical_page_base(physical_address.get());
- pte.set_present(true);
- pte.set_writable(true);
- pte.set_user_allowed(false);
- flush_tlb_local(vaddr);
- }
- return vaddr.as_ptr();
-}
-
-void MemoryManager::unquickmap_page()
-{
- VERIFY_INTERRUPTS_DISABLED();
- ScopedSpinLock lock(s_mm_lock);
- auto& mm_data = get_data();
- VERIFY(mm_data.m_quickmap_in_use.is_locked());
- VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::id() * PAGE_SIZE);
- u32 pte_idx = (vaddr.get() - KERNEL_PT1024_BASE) / PAGE_SIZE;
- auto& pte = ((PageTableEntry*)boot_pd_kernel_pt1023)[pte_idx];
- pte.clear();
- flush_tlb_local(vaddr);
- mm_data.m_quickmap_in_use.unlock(mm_data.m_quickmap_prev_flags);
-}
-
-bool MemoryManager::validate_user_stack_no_lock(Space& space, VirtualAddress vaddr) const
-{
- VERIFY(space.get_lock().own_lock());
-
- if (!is_user_address(vaddr))
- return false;
-
- auto* region = find_user_region_from_vaddr_no_lock(space, vaddr);
- return region && region->is_user() && region->is_stack();
-}
-
-bool MemoryManager::validate_user_stack(Space& space, VirtualAddress vaddr) const
-{
- ScopedSpinLock lock(space.get_lock());
- return validate_user_stack_no_lock(space, vaddr);
-}
-
-void MemoryManager::register_vmobject(VMObject& vmobject)
-{
- ScopedSpinLock lock(s_mm_lock);
- m_vmobjects.append(vmobject);
-}
-
-void MemoryManager::unregister_vmobject(VMObject& vmobject)
-{
- ScopedSpinLock lock(s_mm_lock);
- m_vmobjects.remove(vmobject);
-}
-
-void MemoryManager::register_region(Region& region)
-{
- ScopedSpinLock lock(s_mm_lock);
- if (region.is_kernel())
- m_kernel_regions.append(region);
- else
- m_user_regions.append(region);
-}
-
-void MemoryManager::unregister_region(Region& region)
-{
- ScopedSpinLock lock(s_mm_lock);
- if (region.is_kernel())
- m_kernel_regions.remove(region);
- else
- m_user_regions.remove(region);
-}
-
-void MemoryManager::dump_kernel_regions()
-{
- dbgln("Kernel regions:");
-#if ARCH(I386)
- auto addr_padding = "";
-#else
- auto addr_padding = " ";
-#endif
- dbgln("BEGIN{} END{} SIZE{} ACCESS NAME",
- addr_padding, addr_padding, addr_padding);
- ScopedSpinLock lock(s_mm_lock);
- for (auto& region : m_kernel_regions) {
- dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}",
- region.vaddr().get(),
- region.vaddr().offset(region.size() - 1).get(),
- region.size(),
- region.is_readable() ? 'R' : ' ',
- region.is_writable() ? 'W' : ' ',
- region.is_executable() ? 'X' : ' ',
- region.is_shared() ? 'S' : ' ',
- region.is_stack() ? 'T' : ' ',
- region.is_syscall_region() ? 'C' : ' ',
- region.name());
- }
-}
-
-void MemoryManager::set_page_writable_direct(VirtualAddress vaddr, bool writable)
-{
- ScopedSpinLock lock(s_mm_lock);
- ScopedSpinLock page_lock(kernel_page_directory().get_lock());
- auto* pte = ensure_pte(kernel_page_directory(), vaddr);
- VERIFY(pte);
- if (pte->is_writable() == writable)
- return;
- pte->set_writable(writable);
- flush_tlb(&kernel_page_directory(), vaddr);
-}
-
-CommittedPhysicalPageSet::~CommittedPhysicalPageSet()
-{
- if (m_page_count)
- MM.uncommit_user_physical_pages({}, m_page_count);
-}
-
-NonnullRefPtr<PhysicalPage> CommittedPhysicalPageSet::take_one()
-{
- VERIFY(m_page_count > 0);
- --m_page_count;
- return MM.allocate_committed_user_physical_page({}, MemoryManager::ShouldZeroFill::Yes);
-}
-
-void CommittedPhysicalPageSet::uncommit_one()
-{
- VERIFY(m_page_count > 0);
- --m_page_count;
- MM.uncommit_user_physical_pages({}, 1);
-}
-
-}
diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h
deleted file mode 100644
index 20aafea3d4..0000000000
--- a/Kernel/VM/MemoryManager.h
+++ /dev/null
@@ -1,325 +0,0 @@
-/*
- * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#pragma once
-
-#include <AK/Concepts.h>
-#include <AK/HashTable.h>
-#include <AK/NonnullOwnPtrVector.h>
-#include <AK/NonnullRefPtrVector.h>
-#include <AK/String.h>
-#include <Kernel/Arch/x86/PageFault.h>
-#include <Kernel/Arch/x86/TrapFrame.h>
-#include <Kernel/Forward.h>
-#include <Kernel/SpinLock.h>
-#include <Kernel/VM/AllocationStrategy.h>
-#include <Kernel/VM/PhysicalPage.h>
-#include <Kernel/VM/PhysicalRegion.h>
-#include <Kernel/VM/Region.h>
-#include <Kernel/VM/VMObject.h>
-
-namespace Kernel {
-
-constexpr bool page_round_up_would_wrap(FlatPtr x)
-{
- return x > (explode_byte(0xFF) & ~0xFFF);
-}
-
-constexpr FlatPtr page_round_up(FlatPtr x)
-{
- FlatPtr rounded = (((FlatPtr)(x)) + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1));
- // Rounding up >0xfffff000 wraps back to 0. That's never what we want.
- VERIFY(x == 0 || rounded != 0);
- return rounded;
-}
-
-constexpr FlatPtr page_round_down(FlatPtr x)
-{
- return ((FlatPtr)(x)) & ~(PAGE_SIZE - 1);
-}
-
-inline FlatPtr virtual_to_low_physical(FlatPtr virtual_)
-{
- return virtual_ - physical_to_virtual_offset;
-}
-
-enum class UsedMemoryRangeType {
- LowMemory = 0,
- Prekernel,
- Kernel,
- BootModule,
- PhysicalPages,
-};
-
-static constexpr StringView UserMemoryRangeTypeNames[] {
- "Low memory",
- "Prekernel",
- "Kernel",
- "Boot module",
- "Physical Pages"
-};
-
-struct UsedMemoryRange {
- UsedMemoryRangeType type {};
- PhysicalAddress start;
- PhysicalAddress end;
-};
-
-struct ContiguousReservedMemoryRange {
- PhysicalAddress start;
- PhysicalSize length {};
-};
-
-enum class PhysicalMemoryRangeType {
- Usable = 0,
- Reserved,
- ACPI_Reclaimable,
- ACPI_NVS,
- BadMemory,
- Unknown,
-};
-
-struct PhysicalMemoryRange {
- PhysicalMemoryRangeType type { PhysicalMemoryRangeType::Unknown };
- PhysicalAddress start;
- PhysicalSize length {};
-};
-
-#define MM Kernel::MemoryManager::the()
-
-struct MemoryManagerData {
- static ProcessorSpecificDataID processor_specific_data_id() { return ProcessorSpecificDataID::MemoryManager; }
-
- SpinLock<u8> m_quickmap_in_use;
- u32 m_quickmap_prev_flags;
-
- PhysicalAddress m_last_quickmap_pd;
- PhysicalAddress m_last_quickmap_pt;
-};
-
-extern RecursiveSpinLock s_mm_lock;
-
-// This class represents a set of committed physical pages.
-// When you ask MemoryManager to commit pages for you, you get one of these in return.
-// You can allocate pages from it via `take_one()`
-// It will uncommit any (unallocated) remaining pages when destroyed.
-class CommittedPhysicalPageSet {
- AK_MAKE_NONCOPYABLE(CommittedPhysicalPageSet);
-
-public:
- CommittedPhysicalPageSet(Badge<MemoryManager>, size_t page_count)
- : m_page_count(page_count)
- {
- }
-
- CommittedPhysicalPageSet(CommittedPhysicalPageSet&& other)
- : m_page_count(exchange(other.m_page_count, 0))
- {
- }
-
- ~CommittedPhysicalPageSet();
-
- bool is_empty() const { return m_page_count == 0; }
- size_t page_count() const { return m_page_count; }
-
- [[nodiscard]] NonnullRefPtr<PhysicalPage> take_one();
- void uncommit_one();
-
- void operator=(CommittedPhysicalPageSet&&) = delete;
-
-private:
- size_t m_page_count { 0 };
-};
-
-class MemoryManager {
- AK_MAKE_ETERNAL
- friend class PageDirectory;
- friend class AnonymousVMObject;
- friend class Region;
- friend class VMObject;
-
-public:
- static MemoryManager& the();
- static bool is_initialized();
-
- static void initialize(u32 cpu);
-
- static inline MemoryManagerData& get_data()
- {
- return ProcessorSpecific<MemoryManagerData>::get();
- }
-
- PageFaultResponse handle_page_fault(PageFault const&);
-
- void set_page_writable_direct(VirtualAddress, bool);
-
- void protect_readonly_after_init_memory();
- void unmap_text_after_init();
- void unmap_ksyms_after_init();
-
- static void enter_process_paging_scope(Process&);
- static void enter_space(Space&);
-
- bool validate_user_stack_no_lock(Space&, VirtualAddress) const;
- bool validate_user_stack(Space&, VirtualAddress) const;
-
- enum class ShouldZeroFill {
- No,
- Yes
- };
-
- Optional<CommittedPhysicalPageSet> commit_user_physical_pages(size_t page_count);
- void uncommit_user_physical_pages(Badge<CommittedPhysicalPageSet>, size_t page_count);
-
- NonnullRefPtr<PhysicalPage> allocate_committed_user_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill = ShouldZeroFill::Yes);
- RefPtr<PhysicalPage> allocate_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr);
- RefPtr<PhysicalPage> allocate_supervisor_physical_page();
- NonnullRefPtrVector<PhysicalPage> allocate_contiguous_supervisor_physical_pages(size_t size);
- void deallocate_physical_page(PhysicalAddress);
-
- OwnPtr<Region> allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
- OwnPtr<Region> allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes);
- OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
- OwnPtr<Region> allocate_kernel_region_identity(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
- OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
- OwnPtr<Region> allocate_kernel_region_with_vmobject(Range const&, VMObject&, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
-
- struct SystemMemoryInfo {
- PhysicalSize user_physical_pages { 0 };
- PhysicalSize user_physical_pages_used { 0 };
- PhysicalSize user_physical_pages_committed { 0 };
- PhysicalSize user_physical_pages_uncommitted { 0 };
- PhysicalSize super_physical_pages { 0 };
- PhysicalSize super_physical_pages_used { 0 };
- };
-
- SystemMemoryInfo get_system_memory_info()
- {
- ScopedSpinLock lock(s_mm_lock);
- return m_system_memory_info;
- }
-
- template<IteratorFunction<VMObject&> Callback>
- static void for_each_vmobject(Callback callback)
- {
- ScopedSpinLock locker(s_mm_lock);
- for (auto& vmobject : MM.m_vmobjects) {
- if (callback(vmobject) == IterationDecision::Break)
- break;
- }
- }
-
- template<VoidFunction<VMObject&> Callback>
- static void for_each_vmobject(Callback callback)
- {
- for (auto& vmobject : MM.m_vmobjects)
- callback(vmobject);
- }
-
- static Region* find_user_region_from_vaddr(Space&, VirtualAddress);
- static Region* find_user_region_from_vaddr_no_lock(Space&, VirtualAddress);
- static void validate_syscall_preconditions(Space&, RegisterState const&);
-
- void dump_kernel_regions();
-
- PhysicalPage& shared_zero_page() { return *m_shared_zero_page; }
- PhysicalPage& lazy_committed_page() { return *m_lazy_committed_page; }
-
- PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; }
-
- Vector<UsedMemoryRange> const& used_memory_ranges() { return m_used_memory_ranges; }
- bool is_allowed_to_mmap_to_userspace(PhysicalAddress, Range const&) const;
-
- PhysicalPageEntry& get_physical_page_entry(PhysicalAddress);
- PhysicalAddress get_physical_address(PhysicalPage const&);
-
-private:
- MemoryManager();
- ~MemoryManager();
-
- void initialize_physical_pages();
- void register_reserved_ranges();
-
- void register_vmobject(VMObject&);
- void unregister_vmobject(VMObject&);
- void register_region(Region&);
- void unregister_region(Region&);
-
- void protect_kernel_image();
- void parse_memory_map();
- static void flush_tlb_local(VirtualAddress, size_t page_count = 1);
- static void flush_tlb(PageDirectory const*, VirtualAddress, size_t page_count = 1);
-
- static Region* kernel_region_from_vaddr(VirtualAddress);
-
- static Region* find_region_from_vaddr(VirtualAddress);
-
- RefPtr<PhysicalPage> find_free_user_physical_page(bool);
-
- ALWAYS_INLINE u8* quickmap_page(PhysicalPage& page)
- {
- return quickmap_page(page.paddr());
- }
- u8* quickmap_page(PhysicalAddress const&);
- void unquickmap_page();
-
- PageDirectoryEntry* quickmap_pd(PageDirectory&, size_t pdpt_index);
- PageTableEntry* quickmap_pt(PhysicalAddress);
-
- PageTableEntry* pte(PageDirectory&, VirtualAddress);
- PageTableEntry* ensure_pte(PageDirectory&, VirtualAddress);
- void release_pte(PageDirectory&, VirtualAddress, bool);
-
- RefPtr<PageDirectory> m_kernel_page_directory;
-
- RefPtr<PhysicalPage> m_shared_zero_page;
- RefPtr<PhysicalPage> m_lazy_committed_page;
-
- SystemMemoryInfo m_system_memory_info;
-
- NonnullOwnPtrVector<PhysicalRegion> m_user_physical_regions;
- OwnPtr<PhysicalRegion> m_super_physical_region;
- OwnPtr<PhysicalRegion> m_physical_pages_region;
- PhysicalPageEntry* m_physical_page_entries { nullptr };
- size_t m_physical_page_entries_count { 0 };
-
- Region::ListInMemoryManager m_user_regions;
- Region::ListInMemoryManager m_kernel_regions;
- Vector<UsedMemoryRange> m_used_memory_ranges;
- Vector<PhysicalMemoryRange> m_physical_memory_ranges;
- Vector<ContiguousReservedMemoryRange> m_reserved_memory_ranges;
-
- VMObject::List m_vmobjects;
-};
-
-inline bool is_user_address(VirtualAddress vaddr)
-{
- return vaddr.get() < USER_RANGE_CEILING;
-}
-
-inline bool is_user_range(VirtualAddress vaddr, size_t size)
-{
- if (vaddr.offset(size) < vaddr)
- return false;
- return is_user_address(vaddr) && is_user_address(vaddr.offset(size));
-}
-
-inline bool is_user_range(Range const& range)
-{
- return is_user_range(range.base(), range.size());
-}
-
-inline bool PhysicalPage::is_shared_zero_page() const
-{
- return this == &MM.shared_zero_page();
-}
-
-inline bool PhysicalPage::is_lazy_committed_page() const
-{
- return this == &MM.lazy_committed_page();
-}
-
-}
diff --git a/Kernel/VM/PageDirectory.cpp b/Kernel/VM/PageDirectory.cpp
deleted file mode 100644
index b49c6f3987..0000000000
--- a/Kernel/VM/PageDirectory.cpp
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#include <AK/Memory.h>
-#include <AK/Singleton.h>
-#include <Kernel/Prekernel/Prekernel.h>
-#include <Kernel/Process.h>
-#include <Kernel/Random.h>
-#include <Kernel/Sections.h>
-#include <Kernel/VM/MemoryManager.h>
-#include <Kernel/VM/PageDirectory.h>
-
-extern u8 end_of_kernel_image[];
-
-namespace Kernel {
-
-static AK::Singleton<HashMap<FlatPtr, PageDirectory*>> s_cr3_map;
-
-static HashMap<FlatPtr, PageDirectory*>& cr3_map()
-{
- VERIFY_INTERRUPTS_DISABLED();
- return *s_cr3_map;
-}
-
-RefPtr<PageDirectory> PageDirectory::find_by_cr3(FlatPtr cr3)
-{
- ScopedSpinLock lock(s_mm_lock);
- return cr3_map().get(cr3).value_or({});
-}
-
-UNMAP_AFTER_INIT NonnullRefPtr<PageDirectory> PageDirectory::must_create_kernel_page_directory()
-{
- auto directory = adopt_ref_if_nonnull(new (nothrow) PageDirectory).release_nonnull();
-
- // make sure this starts in a new page directory to make MemoryManager::initialize_physical_pages() happy
- FlatPtr start_of_range = ((FlatPtr)end_of_kernel_image & ~(FlatPtr)0x1fffff) + 0x200000;
- directory->m_range_allocator.initialize_with_range(VirtualAddress(start_of_range), KERNEL_PD_END - start_of_range);
- directory->m_identity_range_allocator.initialize_with_range(VirtualAddress(FlatPtr(0x00000000)), 0x00200000);
-
- return directory;
-}
-
-RefPtr<PageDirectory> PageDirectory::try_create_for_userspace(RangeAllocator const* parent_range_allocator)
-{
- constexpr FlatPtr userspace_range_base = 0x00800000;
- FlatPtr const userspace_range_ceiling = USER_RANGE_CEILING;
-
- auto directory = adopt_ref_if_nonnull(new (nothrow) PageDirectory);
- if (!directory)
- return {};
-
- if (parent_range_allocator) {
- directory->m_range_allocator.initialize_from_parent(*parent_range_allocator);
- } else {
- size_t random_offset = (get_fast_random<u8>() % 32 * MiB) & PAGE_MASK;
- u32 base = userspace_range_base + random_offset;
- directory->m_range_allocator.initialize_with_range(VirtualAddress(base), userspace_range_ceiling - base);
- }
-
- // NOTE: Take the MM lock since we need it for quickmap.
- ScopedSpinLock lock(s_mm_lock);
-
-#if ARCH(X86_64)
- directory->m_pml4t = MM.allocate_user_physical_page();
- if (!directory->m_pml4t)
- return {};
-#endif
-
- directory->m_directory_table = MM.allocate_user_physical_page();
- if (!directory->m_directory_table)
- return {};
- auto kernel_pd_index = (kernel_mapping_base >> 30) & 0x1ffu;
- for (size_t i = 0; i < kernel_pd_index; i++) {
- directory->m_directory_pages[i] = MM.allocate_user_physical_page();
- if (!directory->m_directory_pages[i])
- return {};
- }
-
- // Share the top 1 GiB of kernel-only mappings (>=kernel_mapping_base)
- directory->m_directory_pages[kernel_pd_index] = MM.kernel_page_directory().m_directory_pages[kernel_pd_index];
-
-#if ARCH(X86_64)
- {
- auto& table = *(PageDirectoryPointerTable*)MM.quickmap_page(*directory->m_pml4t);
- table.raw[0] = (FlatPtr)directory->m_directory_table->paddr().as_ptr() | 7;
- MM.unquickmap_page();
- }
-#endif
-
- {
- auto& table = *(PageDirectoryPointerTable*)MM.quickmap_page(*directory->m_directory_table);
- for (size_t i = 0; i < sizeof(m_directory_pages) / sizeof(m_directory_pages[0]); i++) {
- if (directory->m_directory_pages[i]) {
-#if ARCH(I386)
- table.raw[i] = (FlatPtr)directory->m_directory_pages[i]->paddr().as_ptr() | 1;
-#else
- table.raw[i] = (FlatPtr)directory->m_directory_pages[i]->paddr().as_ptr() | 7;
-#endif
- }
- }
-
- // 2 ** MAXPHYADDR - 1
- // Where MAXPHYADDR = physical_address_bit_width
- u64 max_physical_address = (1ULL << Processor::current().physical_address_bit_width()) - 1;
-
- // bit 63 = no execute
- // bit 7 = page size
- // bit 5 = accessed
- // bit 4 = cache disable
- // bit 3 = write through
- // bit 2 = user/supervisor
- // bit 1 = read/write
- // bit 0 = present
- constexpr u64 pdpte_bit_flags = 0x80000000000000BF;
-
- // This is to notify us of bugs where we're:
- // 1. Going over what the processor is capable of.
- // 2. Writing into the reserved bits (51:MAXPHYADDR), where doing so throws a GPF
- // when writing out the PDPT pointer to CR3.
- // The reason we're not checking the page directory's physical address directly is because
- // we're checking for sign extension when putting it into a PDPTE. See issue #4584.
- for (auto table_entry : table.raw)
- VERIFY((table_entry & ~pdpte_bit_flags) <= max_physical_address);
-
- MM.unquickmap_page();
- }
-
- // Clone bottom 2 MiB of mappings from kernel_page_directory
- PageDirectoryEntry buffer;
- auto* kernel_pd = MM.quickmap_pd(MM.kernel_page_directory(), 0);
- memcpy(&buffer, kernel_pd, sizeof(PageDirectoryEntry));
- auto* new_pd = MM.quickmap_pd(*directory, 0);
- memcpy(new_pd, &buffer, sizeof(PageDirectoryEntry));
-
- cr3_map().set(directory->cr3(), directory.ptr());
- return directory;
-}
-
-PageDirectory::PageDirectory()
-{
-}
-
-UNMAP_AFTER_INIT void PageDirectory::allocate_kernel_directory()
-{
- // Adopt the page tables already set up by boot.S
-#if ARCH(X86_64)
- dmesgln("MM: boot_pml4t @ {}", boot_pml4t);
- m_pml4t = PhysicalPage::create(boot_pml4t, MayReturnToFreeList::No);
-#endif
- dmesgln("MM: boot_pdpt @ {}", boot_pdpt);
- dmesgln("MM: boot_pd0 @ {}", boot_pd0);
- dmesgln("MM: boot_pd_kernel @ {}", boot_pd_kernel);
- m_directory_table = PhysicalPage::create(boot_pdpt, MayReturnToFreeList::No);
- m_directory_pages[0] = PhysicalPage::create(boot_pd0, MayReturnToFreeList::No);
- m_directory_pages[(kernel_mapping_base >> 30) & 0x1ff] = PhysicalPage::create(boot_pd_kernel, MayReturnToFreeList::No);
-}
-
-PageDirectory::~PageDirectory()
-{
- ScopedSpinLock lock(s_mm_lock);
- if (m_space)
- cr3_map().remove(cr3());
-}
-
-}
diff --git a/Kernel/VM/PageDirectory.h b/Kernel/VM/PageDirectory.h
deleted file mode 100644
index 28832056ea..0000000000
--- a/Kernel/VM/PageDirectory.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#pragma once
-
-#include <AK/HashMap.h>
-#include <AK/RefCounted.h>
-#include <AK/RefPtr.h>
-#include <Kernel/Forward.h>
-#include <Kernel/VM/PhysicalPage.h>
-#include <Kernel/VM/RangeAllocator.h>
-
-namespace Kernel {
-
-class PageDirectory : public RefCounted<PageDirectory> {
- friend class MemoryManager;
-
-public:
- static RefPtr<PageDirectory> try_create_for_userspace(RangeAllocator const* parent_range_allocator = nullptr);
- static NonnullRefPtr<PageDirectory> must_create_kernel_page_directory();
- static RefPtr<PageDirectory> find_by_cr3(FlatPtr);
-
- ~PageDirectory();
-
- void allocate_kernel_directory();
-
- FlatPtr cr3() const
- {
-#if ARCH(X86_64)
- return m_pml4t->paddr().get();
-#else
- return m_directory_table->paddr().get();
-#endif
- }
-
- RangeAllocator& range_allocator() { return m_range_allocator; }
- const RangeAllocator& range_allocator() const { return m_range_allocator; }
-
- RangeAllocator& identity_range_allocator() { return m_identity_range_allocator; }
-
- Space* space() { return m_space; }
- const Space* space() const { return m_space; }
-
- void set_space(Badge<Space>, Space& space) { m_space = &space; }
-
- RecursiveSpinLock& get_lock() { return m_lock; }
-
-private:
- PageDirectory();
-
- Space* m_space { nullptr };
- RangeAllocator m_range_allocator;
- RangeAllocator m_identity_range_allocator;
-#if ARCH(X86_64)
- RefPtr<PhysicalPage> m_pml4t;
-#endif
- RefPtr<PhysicalPage> m_directory_table;
-#if ARCH(X86_64)
- RefPtr<PhysicalPage> m_directory_pages[512];
-#else
- RefPtr<PhysicalPage> m_directory_pages[4];
-#endif
- HashMap<FlatPtr, RefPtr<PhysicalPage>> m_page_tables;
- RecursiveSpinLock m_lock;
-};
-
-}
diff --git a/Kernel/VM/PageFaultResponse.h b/Kernel/VM/PageFaultResponse.h
deleted file mode 100644
index 56297f6268..0000000000
--- a/Kernel/VM/PageFaultResponse.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Copyright (c) 2020, the SerenityOS developers.
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#pragma once
-
-namespace Kernel {
-
-enum class PageFaultResponse {
- ShouldCrash,
- OutOfMemory,
- Continue,
-};
-
-}
diff --git a/Kernel/VM/PhysicalPage.cpp b/Kernel/VM/PhysicalPage.cpp
deleted file mode 100644
index bc215c6b5d..0000000000
--- a/Kernel/VM/PhysicalPage.cpp
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#include <Kernel/Heap/kmalloc.h>
-#include <Kernel/VM/MemoryManager.h>
-#include <Kernel/VM/PhysicalPage.h>
-
-namespace Kernel {
-
-NonnullRefPtr<PhysicalPage> PhysicalPage::create(PhysicalAddress paddr, MayReturnToFreeList may_return_to_freelist)
-{
- auto& physical_page_entry = MM.get_physical_page_entry(paddr);
- return adopt_ref(*new (&physical_page_entry.allocated.physical_page) PhysicalPage(may_return_to_freelist));
-}
-
-PhysicalPage::PhysicalPage(MayReturnToFreeList may_return_to_freelist)
- : m_may_return_to_freelist(may_return_to_freelist)
-{
-}
-
-PhysicalAddress PhysicalPage::paddr() const
-{
- return MM.get_physical_address(*this);
-}
-
-void PhysicalPage::free_this()
-{
- auto paddr = MM.get_physical_address(*this);
- if (m_may_return_to_freelist == MayReturnToFreeList::Yes) {
- auto& this_as_freelist_entry = MM.get_physical_page_entry(paddr).freelist;
- this->~PhysicalPage(); // delete in place
- this_as_freelist_entry.next_index = -1;
- this_as_freelist_entry.prev_index = -1;
- MM.deallocate_physical_page(paddr);
- } else {
- this->~PhysicalPage(); // delete in place
- }
-}
-
-}
diff --git a/Kernel/VM/PhysicalPage.h b/Kernel/VM/PhysicalPage.h
deleted file mode 100644
index ab22be3534..0000000000
--- a/Kernel/VM/PhysicalPage.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#pragma once
-
-#include <AK/NonnullRefPtr.h>
-#include <Kernel/PhysicalAddress.h>
-
-namespace Kernel {
-
-enum class MayReturnToFreeList : bool {
- No,
- Yes
-};
-
-class PhysicalPage {
- AK_MAKE_NONCOPYABLE(PhysicalPage);
- AK_MAKE_NONMOVABLE(PhysicalPage);
-
- friend class MemoryManager;
-
-public:
- PhysicalAddress paddr() const;
-
- void ref()
- {
- m_ref_count.fetch_add(1, AK::memory_order_acq_rel);
- }
-
- void unref()
- {
- if (m_ref_count.fetch_sub(1, AK::memory_order_acq_rel) == 1)
- free_this();
- }
-
- static NonnullRefPtr<PhysicalPage> create(PhysicalAddress, MayReturnToFreeList may_return_to_freelist = MayReturnToFreeList::Yes);
-
- u32 ref_count() const { return m_ref_count.load(AK::memory_order_consume); }
-
- bool is_shared_zero_page() const;
- bool is_lazy_committed_page() const;
-
-private:
- explicit PhysicalPage(MayReturnToFreeList may_return_to_freelist);
- ~PhysicalPage() = default;
-
- void free_this();
-
- Atomic<u32> m_ref_count { 1 };
- MayReturnToFreeList m_may_return_to_freelist { MayReturnToFreeList::Yes };
-};
-
-struct PhysicalPageEntry {
- union {
- // If it's a live PhysicalPage object:
- struct {
- PhysicalPage physical_page;
- } allocated;
-
- // If it's an entry in a PhysicalZone::Bucket's freelist.
- struct {
- i16 next_index;
- i16 prev_index;
- } freelist;
- };
-};
-
-}
diff --git a/Kernel/VM/PhysicalRegion.cpp b/Kernel/VM/PhysicalRegion.cpp
deleted file mode 100644
index 42b5c7022b..0000000000
--- a/Kernel/VM/PhysicalRegion.cpp
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#include <AK/NonnullRefPtr.h>
-#include <AK/RefPtr.h>
-#include <Kernel/Assertions.h>
-#include <Kernel/Random.h>
-#include <Kernel/VM/MemoryManager.h>
-#include <Kernel/VM/PhysicalRegion.h>
-#include <Kernel/VM/PhysicalZone.h>
-
-namespace Kernel {
-
-static constexpr u32 next_power_of_two(u32 value)
-{
- value--;
- value |= value >> 1;
- value |= value >> 2;
- value |= value >> 4;
- value |= value >> 8;
- value |= value >> 16;
- value++;
- return value;
-}
-
-PhysicalRegion::~PhysicalRegion()
-{
-}
-
-PhysicalRegion::PhysicalRegion(PhysicalAddress lower, PhysicalAddress upper)
- : m_lower(lower)
- , m_upper(upper)
-{
- m_pages = (m_upper.get() - m_lower.get()) / PAGE_SIZE;
-}
-
-void PhysicalRegion::initialize_zones()
-{
- size_t remaining_pages = m_pages;
- auto base_address = m_lower;
-
- auto make_zones = [&](size_t pages_per_zone) {
- size_t zone_count = 0;
- auto first_address = base_address;
- while (remaining_pages >= pages_per_zone) {
- m_zones.append(make<PhysicalZone>(base_address, pages_per_zone));
- base_address = base_address.offset(pages_per_zone * PAGE_SIZE);
- m_usable_zones.append(m_zones.last());
- remaining_pages -= pages_per_zone;
- ++zone_count;
- }
- if (zone_count)
- dmesgln(" * {}x PhysicalZone ({} MiB) @ {:016x}-{:016x}", zone_count, pages_per_zone / 256, first_address.get(), base_address.get() - pages_per_zone * PAGE_SIZE - 1);
- };
-
- // First make 16 MiB zones (with 4096 pages each)
- make_zones(4096);
-
- // Then divide any remaining space into 1 MiB zones (with 256 pages each)
- make_zones(256);
-}
-
-OwnPtr<PhysicalRegion> PhysicalRegion::try_take_pages_from_beginning(unsigned page_count)
-{
- VERIFY(page_count > 0);
- VERIFY(page_count < m_pages);
- auto taken_lower = m_lower;
- auto taken_upper = taken_lower.offset((PhysicalPtr)page_count * PAGE_SIZE);
- m_lower = m_lower.offset((PhysicalPtr)page_count * PAGE_SIZE);
- m_pages = (m_upper.get() - m_lower.get()) / PAGE_SIZE;
-
- return try_create(taken_lower, taken_upper);
-}
-
-NonnullRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages(size_t count)
-{
- auto rounded_page_count = next_power_of_two(count);
- auto order = __builtin_ctz(rounded_page_count);
-
- Optional<PhysicalAddress> page_base;
- for (auto& zone : m_usable_zones) {
- page_base = zone.allocate_block(order);
- if (page_base.has_value()) {
- if (zone.is_empty()) {
- // We've exhausted this zone, move it to the full zones list.
- m_full_zones.append(zone);
- }
- break;
- }
- }
-
- if (!page_base.has_value())
- return {};
-
- NonnullRefPtrVector<PhysicalPage> physical_pages;
- physical_pages.ensure_capacity(count);
-
- for (size_t i = 0; i < count; ++i)
- physical_pages.append(PhysicalPage::create(page_base.value().offset(i * PAGE_SIZE)));
- return physical_pages;
-}
-
-RefPtr<PhysicalPage> PhysicalRegion::take_free_page()
-{
- if (m_usable_zones.is_empty())
- return nullptr;
-
- auto& zone = *m_usable_zones.first();
- auto page = zone.allocate_block(0);
- VERIFY(page.has_value());
-
- if (zone.is_empty()) {
- // We've exhausted this zone, move it to the full zones list.
- m_full_zones.append(zone);
- }
-
- return PhysicalPage::create(page.value());
-}
-
-void PhysicalRegion::return_page(PhysicalAddress paddr)
-{
- // FIXME: Find a way to avoid looping over the zones here.
- // (Do some math on the address to find the right zone index.)
- // The main thing that gets in the way of this is non-uniform zone sizes.
- // Perhaps it would be better if all zones had the same size.
- for (auto& zone : m_zones) {
- if (zone.contains(paddr)) {
- zone.deallocate_block(paddr, 0);
- if (m_full_zones.contains(zone))
- m_usable_zones.append(zone);
- return;
- }
- }
-
- VERIFY_NOT_REACHED();
-}
-
-}
diff --git a/Kernel/VM/PhysicalRegion.h b/Kernel/VM/PhysicalRegion.h
deleted file mode 100644
index c6962768c8..0000000000
--- a/Kernel/VM/PhysicalRegion.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#pragma once
-
-#include <AK/OwnPtr.h>
-#include <Kernel/VM/PhysicalPage.h>
-#include <Kernel/VM/PhysicalZone.h>
-
-namespace Kernel {
-
-class PhysicalRegion {
- AK_MAKE_ETERNAL;
- AK_MAKE_NONCOPYABLE(PhysicalRegion);
- AK_MAKE_NONMOVABLE(PhysicalRegion);
-
-public:
- static OwnPtr<PhysicalRegion> try_create(PhysicalAddress lower, PhysicalAddress upper)
- {
- return adopt_own_if_nonnull(new PhysicalRegion { lower, upper });
- }
-
- ~PhysicalRegion();
-
- void initialize_zones();
-
- PhysicalAddress lower() const { return m_lower; }
- PhysicalAddress upper() const { return m_upper; }
- unsigned size() const { return m_pages; }
- bool contains(PhysicalAddress paddr) const { return paddr >= m_lower && paddr < m_upper; }
-
- OwnPtr<PhysicalRegion> try_take_pages_from_beginning(unsigned);
-
- RefPtr<PhysicalPage> take_free_page();
- NonnullRefPtrVector<PhysicalPage> take_contiguous_free_pages(size_t count);
- void return_page(PhysicalAddress);
-
-private:
- PhysicalRegion(PhysicalAddress lower, PhysicalAddress upper);
-
- NonnullOwnPtrVector<PhysicalZone> m_zones;
-
- PhysicalZone::List m_usable_zones;
- PhysicalZone::List m_full_zones;
-
- PhysicalAddress m_lower;
- PhysicalAddress m_upper;
- unsigned m_pages { 0 };
-};
-
-}
diff --git a/Kernel/VM/PhysicalZone.cpp b/Kernel/VM/PhysicalZone.cpp
deleted file mode 100644
index 180f14fef8..0000000000
--- a/Kernel/VM/PhysicalZone.cpp
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Copyright (c) 2021, Andreas Kling <kling@serenityos.org>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#include <AK/Format.h>
-#include <Kernel/VM/MemoryManager.h>
-#include <Kernel/VM/PhysicalPage.h>
-#include <Kernel/VM/PhysicalZone.h>
-
-namespace Kernel {
-
-PhysicalPageEntry& PhysicalZone::get_freelist_entry(ChunkIndex index) const
-{
- return MM.get_physical_page_entry(m_base_address.offset(index * ZONE_CHUNK_SIZE));
-}
-
-PhysicalZone::PhysicalZone(PhysicalAddress base_address, size_t page_count)
- : m_base_address(base_address)
- , m_page_count(page_count)
- , m_used_chunks(0)
-{
- size_t const chunk_count = page_count * 2;
- for (int order = max_order; order >= 0; --order) {
- auto& bucket = m_buckets[order];
- size_t block_size = 2u << order;
- size_t bitmap_size_for_order = ceil_div((size_t)(chunk_count / block_size), (size_t)2);
- bucket.order = order;
- if (bitmap_size_for_order)
- bucket.bitmap.grow(bitmap_size_for_order, false);
- }
-
- auto first_order = __builtin_ctz(page_count);
- size_t block_size = 2u << first_order;
- auto& bucket = m_buckets[first_order];
- size_t remaining_chunk_count = chunk_count;
- size_t initial_bundle_count = remaining_chunk_count / block_size;
-
- size_t offset = 0;
- for (size_t i = 0; i < initial_bundle_count; ++i) {
- ChunkIndex index = offset + i;
- bucket.set_buddy_bit(index, true);
-
- auto& freelist_entry = get_freelist_entry(index).freelist;
- freelist_entry.next_index = bucket.freelist;
- freelist_entry.prev_index = -1;
- bucket.freelist = index;
-
- remaining_chunk_count -= block_size;
- offset += block_size;
- }
-}
-
-Optional<PhysicalAddress> PhysicalZone::allocate_block(size_t order)
-{
- size_t block_size = 2u << order;
- auto result = allocate_block_impl(order);
- if (!result.has_value())
- return {};
- m_used_chunks += block_size;
- VERIFY(!(result.value() & 1));
- return m_base_address.offset(result.value() * ZONE_CHUNK_SIZE);
-}
-
-Optional<PhysicalZone::ChunkIndex> PhysicalZone::allocate_block_impl(size_t order)
-{
- if (order > max_order)
- return {};
- size_t block_size = 2u << order;
- auto& bucket = m_buckets[order];
- if (bucket.freelist == -1) {
- // The freelist for this order is empty, try to allocate a block from one order higher, and split it.
- auto buddies = allocate_block_impl(order + 1);
-
- if (!buddies.has_value()) {
- // Looks like we're unable to satisfy this allocation request.
- return {};
- }
-
- // Split the block from order+1 into two parts.
- // We keep one (in the freelist for this order) and return the other.
-
- ChunkIndex index = buddies.value();
-
- // First half goes in the freelist
- auto& freelist_entry = get_freelist_entry(index).freelist;
- freelist_entry.next_index = -1;
- freelist_entry.prev_index = -1;
- bucket.freelist = index;
-
- VERIFY(bucket.get_buddy_bit(index) == false);
-
- // Set buddy bit to 1 (one used, one unused).
- bucket.set_buddy_bit(index, true);
-
- // Second half is returned.
- return index + block_size;
- }
-
- // Freelist has at least one entry, return that.
- ChunkIndex index = bucket.freelist;
-
- bucket.freelist = get_freelist_entry(bucket.freelist).freelist.next_index;
- if (bucket.freelist != -1) {
- get_freelist_entry(bucket.freelist).freelist.prev_index = -1;
- }
-
- VERIFY(bucket.get_buddy_bit(index) == true);
- bucket.set_buddy_bit(index, false);
-
- return index;
-}
-
-void PhysicalZone::deallocate_block(PhysicalAddress address, size_t order)
-{
- size_t block_size = 2u << order;
- ChunkIndex index = (address.get() - m_base_address.get()) / ZONE_CHUNK_SIZE;
- deallocate_block_impl(index, order);
- m_used_chunks -= block_size;
-}
-
-void PhysicalZone::deallocate_block_impl(ChunkIndex index, size_t order)
-{
- size_t block_size = 2u << order;
-
- // Basic algorithm:
- // If the buddy block is free (buddy bit is 1 -- because this block was the only used one):
- // Then,
- // 1. Merge with buddy.
- // 2. Return the merged block to order+1.
- // Else (buddy bit is 0 -- because both blocks are used)
- // 1. Add the block to the freelist.
- // 2. Set buddy bit to 1.
- auto& bucket = m_buckets[order];
-
- if (bucket.get_buddy_bit(index)) {
- // Buddy is free! Merge with buddy and coalesce upwards to the next order.
- auto buddy_bit_index = bucket.buddy_bit_index(index);
- ChunkIndex buddy_base_index = (buddy_bit_index << 1) << (1 + order);
-
- if (index == buddy_base_index)
- remove_from_freelist(bucket, buddy_base_index + block_size);
- else
- remove_from_freelist(bucket, buddy_base_index);
-
- bucket.set_buddy_bit(index, false);
- deallocate_block_impl(buddy_base_index, order + 1);
- } else {
- // Buddy is in use. Add freed block to freelist and set buddy bit to 1.
-
- if (bucket.freelist != -1) {
- get_freelist_entry(bucket.freelist).freelist.prev_index = index;
- }
-
- auto& freelist_entry = get_freelist_entry(index).freelist;
- freelist_entry.next_index = bucket.freelist;
- freelist_entry.prev_index = -1;
- bucket.freelist = index;
-
- bucket.set_buddy_bit(index, true);
- }
-}
-
-void PhysicalZone::remove_from_freelist(BuddyBucket& bucket, ChunkIndex index)
-{
- auto& freelist_entry = get_freelist_entry(index).freelist;
- VERIFY(freelist_entry.prev_index >= -1);
- VERIFY(freelist_entry.next_index >= -1);
- if (freelist_entry.prev_index != -1) {
- auto& prev_entry = get_freelist_entry(freelist_entry.prev_index).freelist;
- prev_entry.next_index = freelist_entry.next_index;
- }
- if (freelist_entry.next_index != -1) {
- auto& next_entry = get_freelist_entry(freelist_entry.next_index).freelist;
- next_entry.prev_index = freelist_entry.prev_index;
- }
- if (bucket.freelist == index)
- bucket.freelist = freelist_entry.next_index;
- freelist_entry.next_index = -1;
- freelist_entry.prev_index = -1;
-}
-
-void PhysicalZone::dump() const
-{
- dbgln("(( {} used, {} available, page_count: {} ))", m_used_chunks, available(), m_page_count);
- for (size_t i = 0; i <= max_order; ++i) {
- auto& bucket = m_buckets[i];
- dbgln("[{:2} / {:4}] ", i, (size_t)(2u << i));
- auto entry = bucket.freelist;
- while (entry != -1) {
- dbgln(" {}", entry);
- entry = get_freelist_entry(entry).freelist.next_index;
- }
- }
-}
-
-}
diff --git a/Kernel/VM/PhysicalZone.h b/Kernel/VM/PhysicalZone.h
deleted file mode 100644
index bd3bba924e..0000000000
--- a/Kernel/VM/PhysicalZone.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright (c) 2021, Andreas Kling <kling@serenityos.org>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#pragma once
-
-#include <AK/Bitmap.h>
-#include <AK/IntrusiveList.h>
-
-namespace Kernel {
-
-// A PhysicalZone is an allocator that manages a sub-area of a PhysicalRegion.
-// Its total size is always a power of two.
-// You allocate chunks at a time. One chunk is PAGE_SIZE/2, and the minimum allocation size is 2 chunks.
-// The allocator uses a buddy block scheme internally.
-
-class PhysicalZone {
- AK_MAKE_ETERNAL;
- AK_MAKE_NONCOPYABLE(PhysicalZone);
- AK_MAKE_NONMOVABLE(PhysicalZone);
-
-public:
- static constexpr size_t ZONE_CHUNK_SIZE = PAGE_SIZE / 2;
- using ChunkIndex = i16;
-
- PhysicalZone(PhysicalAddress base, size_t page_count);
-
- Optional<PhysicalAddress> allocate_block(size_t order);
- void deallocate_block(PhysicalAddress, size_t order);
-
- void dump() const;
- size_t available() const { return m_page_count - (m_used_chunks / 2); }
-
- bool is_empty() const { return !available(); }
-
- PhysicalAddress base() const { return m_base_address; }
- bool contains(PhysicalAddress paddr) const
- {
- return paddr >= m_base_address && paddr < m_base_address.offset(m_page_count * PAGE_SIZE);
- }
-
-private:
- Optional<ChunkIndex> allocate_block_impl(size_t order);
- void deallocate_block_impl(ChunkIndex, size_t order);
-
- struct BuddyBucket {
- bool get_buddy_bit(ChunkIndex index) const
- {
- return bitmap.get(buddy_bit_index(index));
- }
-
- void set_buddy_bit(ChunkIndex index, bool value)
- {
- bitmap.set(buddy_bit_index(index), value);
- }
-
- size_t buddy_bit_index(ChunkIndex index) const
- {
- // NOTE: We cut the index in half since one chunk is half a page.
- return (index >> 1) >> (1 + order);
- }
-
- // This bucket's index in the m_buckets array. (Redundant data kept here for convenience.)
- size_t order { 0 };
-
- // This is the start of the freelist for this buddy size.
- // It's an index into the global PhysicalPageEntry array (offset by this PhysicalRegion's base.)
- // A value of -1 indicates an empty freelist.
- ChunkIndex freelist { -1 };
-
- // Bitmap with 1 bit per buddy pair.
- // 0 == Both blocks either free or used.
- // 1 == One block free, one block used.
- Bitmap bitmap;
- };
-
- static constexpr size_t max_order = 12;
- BuddyBucket m_buckets[max_order + 1];
-
- PhysicalPageEntry& get_freelist_entry(ChunkIndex) const;
- void remove_from_freelist(BuddyBucket&, ChunkIndex);
-
- PhysicalAddress m_base_address { 0 };
- size_t m_page_count { 0 };
- size_t m_used_chunks { 0 };
-
- IntrusiveListNode<PhysicalZone> m_list_node;
-
-public:
- using List = IntrusiveList<PhysicalZone, RawPtr<PhysicalZone>, &PhysicalZone::m_list_node>;
-};
-
-}
diff --git a/Kernel/VM/PrivateInodeVMObject.cpp b/Kernel/VM/PrivateInodeVMObject.cpp
deleted file mode 100644
index c000fb4e14..0000000000
--- a/Kernel/VM/PrivateInodeVMObject.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#include <Kernel/FileSystem/Inode.h>
-#include <Kernel/VM/PrivateInodeVMObject.h>
-
-namespace Kernel {
-
-RefPtr<PrivateInodeVMObject> PrivateInodeVMObject::try_create_with_inode(Inode& inode)
-{
- return adopt_ref_if_nonnull(new (nothrow) PrivateInodeVMObject(inode, inode.size()));
-}
-
-RefPtr<VMObject> PrivateInodeVMObject::try_clone()
-{
- return adopt_ref_if_nonnull(new (nothrow) PrivateInodeVMObject(*this));
-}
-
-PrivateInodeVMObject::PrivateInodeVMObject(Inode& inode, size_t size)
- : InodeVMObject(inode, size)
-{
-}
-
-PrivateInodeVMObject::PrivateInodeVMObject(PrivateInodeVMObject const& other)
- : InodeVMObject(other)
-{
-}
-
-PrivateInodeVMObject::~PrivateInodeVMObject()
-{
-}
-
-}
diff --git a/Kernel/VM/PrivateInodeVMObject.h b/Kernel/VM/PrivateInodeVMObject.h
deleted file mode 100644
index 55b80680f8..0000000000
--- a/Kernel/VM/PrivateInodeVMObject.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#pragma once
-
-#include <AK/Bitmap.h>
-#include <Kernel/UnixTypes.h>
-#include <Kernel/VM/InodeVMObject.h>
-
-namespace Kernel {
-
-class PrivateInodeVMObject final : public InodeVMObject {
- AK_MAKE_NONMOVABLE(PrivateInodeVMObject);
-
-public:
- virtual ~PrivateInodeVMObject() override;
-
- static RefPtr<PrivateInodeVMObject> try_create_with_inode(Inode&);
- virtual RefPtr<VMObject> try_clone() override;
-
-private:
- virtual bool is_private_inode() const override { return true; }
-
- explicit PrivateInodeVMObject(Inode&, size_t);
- explicit PrivateInodeVMObject(PrivateInodeVMObject const&);
-
- virtual StringView class_name() const override { return "PrivateInodeVMObject"sv; }
-
- PrivateInodeVMObject& operator=(PrivateInodeVMObject const&) = delete;
-};
-
-}
diff --git a/Kernel/VM/ProcessPagingScope.cpp b/Kernel/VM/ProcessPagingScope.cpp
deleted file mode 100644
index 9d018e35d8..0000000000
--- a/Kernel/VM/ProcessPagingScope.cpp
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#include <Kernel/Arch/x86/InterruptDisabler.h>
-#include <Kernel/VM/MemoryManager.h>
-#include <Kernel/VM/ProcessPagingScope.h>
-
-namespace Kernel {
-
-ProcessPagingScope::ProcessPagingScope(Process& process)
-{
- VERIFY(Thread::current() != nullptr);
- m_previous_cr3 = read_cr3();
- MM.enter_process_paging_scope(process);
-}
-
-ProcessPagingScope::~ProcessPagingScope()
-{
- InterruptDisabler disabler;
- Thread::current()->regs().cr3 = m_previous_cr3;
- write_cr3(m_previous_cr3);
-}
-
-}
diff --git a/Kernel/VM/ProcessPagingScope.h b/Kernel/VM/ProcessPagingScope.h
deleted file mode 100644
index c2b88a896b..0000000000
--- a/Kernel/VM/ProcessPagingScope.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#pragma once
-
-#include <AK/Types.h>
-#include <Kernel/Forward.h>
-
-namespace Kernel {
-
-class ProcessPagingScope {
-public:
- explicit ProcessPagingScope(Process&);
- ~ProcessPagingScope();
-
-private:
- u32 m_previous_cr3 { 0 };
-};
-
-}
diff --git a/Kernel/VM/Range.cpp b/Kernel/VM/Range.cpp
deleted file mode 100644
index 4dc3c01bbc..0000000000
--- a/Kernel/VM/Range.cpp
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
- * Copyright (c) 2021, Leon Albrecht <leon2002.la@gmail.com>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#include <AK/Vector.h>
-#include <Kernel/VM/MemoryManager.h>
-#include <Kernel/VM/Range.h>
-#include <LibC/limits.h>
-
-namespace Kernel {
-
-Vector<Range, 2> Range::carve(const Range& taken) const
-{
- VERIFY((taken.size() % PAGE_SIZE) == 0);
-
- Vector<Range, 2> parts;
- if (taken == *this)
- return {};
- if (taken.base() > base())
- parts.append({ base(), taken.base().get() - base().get() });
- if (taken.end() < end())
- parts.append({ taken.end(), end().get() - taken.end().get() });
- return parts;
-}
-Range Range::intersect(const Range& other) const
-{
- if (*this == other) {
- return *this;
- }
- auto new_base = max(base(), other.base());
- auto new_end = min(end(), other.end());
- VERIFY(new_base < new_end);
- return Range(new_base, (new_end - new_base).get());
-}
-
-KResultOr<Range> Range::expand_to_page_boundaries(FlatPtr address, size_t size)
-{
- if (page_round_up_would_wrap(size))
- return EINVAL;
-
- if ((address + size) < address)
- return EINVAL;
-
- if (page_round_up_would_wrap(address + size))
- return EINVAL;
-
- auto base = VirtualAddress { address }.page_base();
- auto end = page_round_up(address + size);
-
- return Range { base, end - base.get() };
-}
-
-}
diff --git a/Kernel/VM/Range.h b/Kernel/VM/Range.h
deleted file mode 100644
index e9693a44dd..0000000000
--- a/Kernel/VM/Range.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
- * Copyright (c) 2021, Leon Albrecht <leon2002.la@gmail.com>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#pragma once
-
-#include <Kernel/VirtualAddress.h>
-
-namespace Kernel {
-
-class Range {
- friend class RangeAllocator;
-
-public:
- Range() = delete;
- Range(VirtualAddress base, size_t size)
- : m_base(base)
- , m_size(size)
- {
- }
-
- VirtualAddress base() const { return m_base; }
- size_t size() const { return m_size; }
- bool is_valid() const { return !m_base.is_null(); }
-
- bool contains(VirtualAddress vaddr) const { return vaddr >= base() && vaddr < end(); }
-
- VirtualAddress end() const { return m_base.offset(m_size); }
-
- bool operator==(const Range& other) const
- {
- return m_base == other.m_base && m_size == other.m_size;
- }
-
- bool contains(VirtualAddress base, size_t size) const
- {
- if (base.offset(size) < base)
- return false;
- return base >= m_base && base.offset(size) <= end();
- }
-
- bool contains(const Range& other) const
- {
- return contains(other.base(), other.size());
- }
-
- Vector<Range, 2> carve(const Range&) const;
- Range intersect(const Range&) const;
-
- static KResultOr<Range> expand_to_page_boundaries(FlatPtr address, size_t size);
-
-private:
- VirtualAddress m_base;
- size_t m_size { 0 };
-};
-
-}
-
-template<>
-struct AK::Formatter<Kernel::Range> : Formatter<FormatString> {
- void format(FormatBuilder& builder, Kernel::Range value)
- {
- return Formatter<FormatString>::format(builder, "{} - {} (size {:p})", value.base().as_ptr(), value.base().offset(value.size() - 1).as_ptr(), value.size());
- }
-};
diff --git a/Kernel/VM/RangeAllocator.cpp b/Kernel/VM/RangeAllocator.cpp
deleted file mode 100644
index 3ab88abdea..0000000000
--- a/Kernel/VM/RangeAllocator.cpp
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#include <AK/Checked.h>
-#include <Kernel/Random.h>
-#include <Kernel/VM/RangeAllocator.h>
-
-#define VM_GUARD_PAGES
-
-namespace Kernel {
-
-RangeAllocator::RangeAllocator()
- : m_total_range({}, 0)
-{
-}
-
-void RangeAllocator::initialize_with_range(VirtualAddress base, size_t size)
-{
- m_total_range = { base, size };
- m_available_ranges.insert(base.get(), Range { base, size });
-}
-
-void RangeAllocator::initialize_from_parent(RangeAllocator const& parent_allocator)
-{
- ScopedSpinLock lock(parent_allocator.m_lock);
- m_total_range = parent_allocator.m_total_range;
- m_available_ranges.clear();
- for (auto it = parent_allocator.m_available_ranges.begin(); !it.is_end(); ++it) {
- m_available_ranges.insert(it.key(), *it);
- }
-}
-
-void RangeAllocator::dump() const
-{
- VERIFY(m_lock.is_locked());
- dbgln("RangeAllocator({})", this);
- for (auto& range : m_available_ranges) {
- dbgln(" {:x} -> {:x}", range.base().get(), range.end().get() - 1);
- }
-}
-
-void RangeAllocator::carve_at_iterator(auto& it, Range const& range)
-{
- VERIFY(m_lock.is_locked());
- auto remaining_parts = (*it).carve(range);
- VERIFY(remaining_parts.size() >= 1);
- VERIFY(m_total_range.contains(remaining_parts[0]));
- m_available_ranges.remove(it.key());
- m_available_ranges.insert(remaining_parts[0].base().get(), remaining_parts[0]);
- if (remaining_parts.size() == 2) {
- VERIFY(m_total_range.contains(remaining_parts[1]));
- m_available_ranges.insert(remaining_parts[1].base().get(), remaining_parts[1]);
- }
-}
-
-Optional<Range> RangeAllocator::allocate_randomized(size_t size, size_t alignment)
-{
- if (!size)
- return {};
-
- VERIFY((size % PAGE_SIZE) == 0);
- VERIFY((alignment % PAGE_SIZE) == 0);
-
- // FIXME: I'm sure there's a smarter way to do this.
- static constexpr size_t maximum_randomization_attempts = 1000;
- for (size_t i = 0; i < maximum_randomization_attempts; ++i) {
- VirtualAddress random_address { round_up_to_power_of_two(get_fast_random<FlatPtr>() % m_total_range.end().get(), alignment) };
-
- if (!m_total_range.contains(random_address, size))
- continue;
-
- auto range = allocate_specific(random_address, size);
- if (range.has_value())
- return range;
- }
-
- return allocate_anywhere(size, alignment);
-}
-
-Optional<Range> RangeAllocator::allocate_anywhere(size_t size, size_t alignment)
-{
- if (!size)
- return {};
-
- VERIFY((size % PAGE_SIZE) == 0);
- VERIFY((alignment % PAGE_SIZE) == 0);
-
-#ifdef VM_GUARD_PAGES
- // NOTE: We pad VM allocations with a guard page on each side.
- if (Checked<size_t>::addition_would_overflow(size, PAGE_SIZE * 2))
- return {};
-
- size_t effective_size = size + PAGE_SIZE * 2;
- size_t offset_from_effective_base = PAGE_SIZE;
-#else
- size_t effective_size = size;
- size_t offset_from_effective_base = 0;
-#endif
-
- if (Checked<size_t>::addition_would_overflow(effective_size, alignment))
- return {};
-
- ScopedSpinLock lock(m_lock);
-
- for (auto it = m_available_ranges.begin(); !it.is_end(); ++it) {
- auto& available_range = *it;
- // FIXME: This check is probably excluding some valid candidates when using a large alignment.
- if (available_range.size() < (effective_size + alignment))
- continue;
-
- FlatPtr initial_base = available_range.base().offset(offset_from_effective_base).get();
- FlatPtr aligned_base = round_up_to_power_of_two(initial_base, alignment);
-
- Range const allocated_range(VirtualAddress(aligned_base), size);
-
- VERIFY(m_total_range.contains(allocated_range));
-
- if (available_range == allocated_range) {
- m_available_ranges.remove(it.key());
- return allocated_range;
- }
- carve_at_iterator(it, allocated_range);
- return allocated_range;
- }
- dmesgln("RangeAllocator: Failed to allocate anywhere: size={}, alignment={}", size, alignment);
- return {};
-}
-
-Optional<Range> RangeAllocator::allocate_specific(VirtualAddress base, size_t size)
-{
- if (!size)
- return {};
-
- VERIFY(base.is_page_aligned());
- VERIFY((size % PAGE_SIZE) == 0);
-
- Range const allocated_range(base, size);
- if (!m_total_range.contains(allocated_range)) {
- return {};
- }
-
- ScopedSpinLock lock(m_lock);
- for (auto it = m_available_ranges.begin(); !it.is_end(); ++it) {
- auto& available_range = *it;
- if (!available_range.contains(base, size))
- continue;
- if (available_range == allocated_range) {
- m_available_ranges.remove(it.key());
- return allocated_range;
- }
- carve_at_iterator(it, allocated_range);
- return allocated_range;
- }
- return {};
-}
-
-void RangeAllocator::deallocate(Range const& range)
-{
- ScopedSpinLock lock(m_lock);
- VERIFY(m_total_range.contains(range));
- VERIFY(range.size());
- VERIFY((range.size() % PAGE_SIZE) == 0);
- VERIFY(range.base() < range.end());
- VERIFY(!m_available_ranges.is_empty());
-
- Range merged_range = range;
-
- {
- // Try merging with preceding range.
- auto* preceding_range = m_available_ranges.find_largest_not_above(range.base().get());
- if (preceding_range && preceding_range->end() == range.base()) {
- preceding_range->m_size += range.size();
- merged_range = *preceding_range;
- } else {
- m_available_ranges.insert(range.base().get(), range);
- }
- }
-
- {
- // Try merging with following range.
- auto* following_range = m_available_ranges.find_largest_not_above(range.end().get());
- if (following_range && merged_range.end() == following_range->base()) {
- auto* existing_range = m_available_ranges.find_largest_not_above(range.base().get());
- VERIFY(existing_range->base() == merged_range.base());
- existing_range->m_size += following_range->size();
- m_available_ranges.remove(following_range->base().get());
- }
- }
-}
-
-}
diff --git a/Kernel/VM/RangeAllocator.h b/Kernel/VM/RangeAllocator.h
deleted file mode 100644
index c7e2962f2b..0000000000
--- a/Kernel/VM/RangeAllocator.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#pragma once
-
-#include <AK/RedBlackTree.h>
-#include <AK/Traits.h>
-#include <Kernel/SpinLock.h>
-#include <Kernel/VM/Range.h>
-
-namespace Kernel {
-
-class RangeAllocator {
-public:
- RangeAllocator();
- ~RangeAllocator() = default;
-
- void initialize_with_range(VirtualAddress, size_t);
- void initialize_from_parent(RangeAllocator const&);
-
- Optional<Range> allocate_anywhere(size_t, size_t alignment = PAGE_SIZE);
- Optional<Range> allocate_specific(VirtualAddress, size_t);
- Optional<Range> allocate_randomized(size_t, size_t alignment);
- void deallocate(Range const&);
-
- void dump() const;
-
- bool contains(Range const& range) const { return m_total_range.contains(range); }
-
-private:
- void carve_at_iterator(auto&, Range const&);
-
- RedBlackTree<FlatPtr, Range> m_available_ranges;
- Range m_total_range;
- mutable SpinLock<u8> m_lock;
-};
-
-}
-
-namespace AK {
-template<>
-struct Traits<Kernel::Range> : public GenericTraits<Kernel::Range> {
- static constexpr bool is_trivial() { return true; }
-};
-}
diff --git a/Kernel/VM/Region.cpp b/Kernel/VM/Region.cpp
deleted file mode 100644
index cbe625aa64..0000000000
--- a/Kernel/VM/Region.cpp
+++ /dev/null
@@ -1,458 +0,0 @@
-/*
- * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#include <AK/Memory.h>
-#include <AK/StringView.h>
-#include <Kernel/Debug.h>
-#include <Kernel/FileSystem/Inode.h>
-#include <Kernel/Panic.h>
-#include <Kernel/Process.h>
-#include <Kernel/Thread.h>
-#include <Kernel/VM/AnonymousVMObject.h>
-#include <Kernel/VM/MemoryManager.h>
-#include <Kernel/VM/PageDirectory.h>
-#include <Kernel/VM/Region.h>
-#include <Kernel/VM/SharedInodeVMObject.h>
-
-namespace Kernel {
-
-Region::Region(Range const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
- : m_range(range)
- , m_offset_in_vmobject(offset_in_vmobject)
- , m_vmobject(move(vmobject))
- , m_name(move(name))
- , m_access(access | ((access & 0x7) << 4))
- , m_shared(shared)
- , m_cacheable(cacheable == Cacheable::Yes)
-{
- VERIFY(m_range.base().is_page_aligned());
- VERIFY(m_range.size());
- VERIFY((m_range.size() % PAGE_SIZE) == 0);
-
- m_vmobject->add_region(*this);
- MM.register_region(*this);
-}
-
-Region::~Region()
-{
- m_vmobject->remove_region(*this);
-
- // Make sure we disable interrupts so we don't get interrupted between unmapping and unregistering.
- // Unmapping the region will give the VM back to the RangeAllocator, so an interrupt handler would
- // find the address<->region mappings in an invalid state there.
- ScopedSpinLock lock(s_mm_lock);
- if (m_page_directory) {
- unmap(ShouldDeallocateVirtualMemoryRange::Yes);
- VERIFY(!m_page_directory);
- }
-
- MM.unregister_region(*this);
-}
-
-OwnPtr<Region> Region::clone()
-{
- VERIFY(Process::current());
-
- ScopedSpinLock lock(s_mm_lock);
-
- if (m_shared) {
- VERIFY(!m_stack);
- if (vmobject().is_inode())
- VERIFY(vmobject().is_shared_inode());
-
- // Create a new region backed by the same VMObject.
- auto region = Region::try_create_user_accessible(
- m_range, m_vmobject, m_offset_in_vmobject, m_name ? m_name->try_clone() : OwnPtr<KString> {}, access(), m_cacheable ? Cacheable::Yes : Cacheable::No, m_shared);
- if (!region) {
- dbgln("Region::clone: Unable to allocate new Region");
- return nullptr;
- }
- region->set_mmap(m_mmap);
- region->set_shared(m_shared);
- region->set_syscall_region(is_syscall_region());
- return region;
- }
-
- if (vmobject().is_inode())
- VERIFY(vmobject().is_private_inode());
-
- auto vmobject_clone = vmobject().try_clone();
- if (!vmobject_clone)
- return {};
-
- // Set up a COW region. The parent (this) region becomes COW as well!
- remap();
- auto clone_region = Region::try_create_user_accessible(
- m_range, vmobject_clone.release_nonnull(), m_offset_in_vmobject, m_name ? m_name->try_clone() : OwnPtr<KString> {}, access(), m_cacheable ? Cacheable::Yes : Cacheable::No, m_shared);
- if (!clone_region) {
- dbgln("Region::clone: Unable to allocate new Region for COW");
- return nullptr;
- }
- if (m_stack) {
- VERIFY(is_readable());
- VERIFY(is_writable());
- VERIFY(vmobject().is_anonymous());
- clone_region->set_stack(true);
- }
- clone_region->set_syscall_region(is_syscall_region());
- clone_region->set_mmap(m_mmap);
- return clone_region;
-}
-
-void Region::set_vmobject(NonnullRefPtr<VMObject>&& obj)
-{
- if (m_vmobject.ptr() == obj.ptr())
- return;
- m_vmobject->remove_region(*this);
- m_vmobject = move(obj);
- m_vmobject->add_region(*this);
-}
-
-size_t Region::cow_pages() const
-{
- if (!vmobject().is_anonymous())
- return 0;
- return static_cast<AnonymousVMObject const&>(vmobject()).cow_pages();
-}
-
-size_t Region::amount_dirty() const
-{
- if (!vmobject().is_inode())
- return amount_resident();
- return static_cast<InodeVMObject const&>(vmobject()).amount_dirty();
-}
-
-size_t Region::amount_resident() const
-{
- size_t bytes = 0;
- for (size_t i = 0; i < page_count(); ++i) {
- auto* page = physical_page(i);
- if (page && !page->is_shared_zero_page() && !page->is_lazy_committed_page())
- bytes += PAGE_SIZE;
- }
- return bytes;
-}
-
-size_t Region::amount_shared() const
-{
- size_t bytes = 0;
- for (size_t i = 0; i < page_count(); ++i) {
- auto* page = physical_page(i);
- if (page && page->ref_count() > 1 && !page->is_shared_zero_page() && !page->is_lazy_committed_page())
- bytes += PAGE_SIZE;
- }
- return bytes;
-}
-
-OwnPtr<Region> Region::try_create_user_accessible(Range const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
-{
- auto region = adopt_own_if_nonnull(new (nothrow) Region(range, move(vmobject), offset_in_vmobject, move(name), access, cacheable, shared));
- if (!region)
- return nullptr;
- return region;
-}
-
-OwnPtr<Region> Region::try_create_kernel_only(Range const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable)
-{
- return adopt_own_if_nonnull(new (nothrow) Region(range, move(vmobject), offset_in_vmobject, move(name), access, cacheable, false));
-}
-
-bool Region::should_cow(size_t page_index) const
-{
- if (!vmobject().is_anonymous())
- return false;
- return static_cast<AnonymousVMObject const&>(vmobject()).should_cow(first_page_index() + page_index, m_shared);
-}
-
-void Region::set_should_cow(size_t page_index, bool cow)
-{
- VERIFY(!m_shared);
- if (vmobject().is_anonymous())
- static_cast<AnonymousVMObject&>(vmobject()).set_should_cow(first_page_index() + page_index, cow);
-}
-
-bool Region::map_individual_page_impl(size_t page_index)
-{
- VERIFY(m_page_directory->get_lock().own_lock());
- auto page_vaddr = vaddr_from_page_index(page_index);
-
- bool user_allowed = page_vaddr.get() >= 0x00800000 && is_user_address(page_vaddr);
- if (is_mmap() && !user_allowed) {
- PANIC("About to map mmap'ed page at a kernel address");
- }
-
- // NOTE: We have to take the MM lock for PTE's to stay valid while we use them.
- ScopedSpinLock mm_locker(s_mm_lock);
-
- auto* pte = MM.ensure_pte(*m_page_directory, page_vaddr);
- if (!pte)
- return false;
- auto* page = physical_page(page_index);
- if (!page || (!is_readable() && !is_writable())) {
- pte->clear();
- } else {
- pte->set_cache_disabled(!m_cacheable);
- pte->set_physical_page_base(page->paddr().get());
- pte->set_present(true);
- if (page->is_shared_zero_page() || page->is_lazy_committed_page() || should_cow(page_index))
- pte->set_writable(false);
- else
- pte->set_writable(is_writable());
- if (Processor::current().has_feature(CPUFeature::NX))
- pte->set_execute_disabled(!is_executable());
- pte->set_user_allowed(user_allowed);
- }
- return true;
-}
-
-bool Region::do_remap_vmobject_page(size_t page_index, bool with_flush)
-{
- ScopedSpinLock lock(vmobject().m_lock);
- if (!m_page_directory)
- return true; // not an error, region may have not yet mapped it
- if (!translate_vmobject_page(page_index))
- return true; // not an error, region doesn't map this page
- ScopedSpinLock page_lock(m_page_directory->get_lock());
- VERIFY(physical_page(page_index));
- bool success = map_individual_page_impl(page_index);
- if (with_flush)
- MM.flush_tlb(m_page_directory, vaddr_from_page_index(page_index));
- return success;
-}
-
-bool Region::remap_vmobject_page(size_t page_index, bool with_flush)
-{
- auto& vmobject = this->vmobject();
- bool success = true;
- vmobject.for_each_region([&](auto& region) {
- if (!region.do_remap_vmobject_page(page_index, with_flush))
- success = false;
- });
- return success;
-}
-
-void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
-{
- ScopedSpinLock lock(s_mm_lock);
- if (!m_page_directory)
- return;
- ScopedSpinLock page_lock(m_page_directory->get_lock());
- size_t count = page_count();
- for (size_t i = 0; i < count; ++i) {
- auto vaddr = vaddr_from_page_index(i);
- MM.release_pte(*m_page_directory, vaddr, i == count - 1);
- }
- MM.flush_tlb(m_page_directory, vaddr(), page_count());
- if (deallocate_range == ShouldDeallocateVirtualMemoryRange::Yes) {
- if (m_page_directory->range_allocator().contains(range()))
- m_page_directory->range_allocator().deallocate(range());
- else
- m_page_directory->identity_range_allocator().deallocate(range());
- }
- m_page_directory = nullptr;
-}
-
-void Region::set_page_directory(PageDirectory& page_directory)
-{
- VERIFY(!m_page_directory || m_page_directory == &page_directory);
- VERIFY(s_mm_lock.own_lock());
- m_page_directory = page_directory;
-}
-
-bool Region::map(PageDirectory& page_directory, ShouldFlushTLB should_flush_tlb)
-{
- ScopedSpinLock lock(s_mm_lock);
- ScopedSpinLock page_lock(page_directory.get_lock());
-
- // FIXME: Find a better place for this sanity check(?)
- if (is_user() && !is_shared()) {
- VERIFY(!vmobject().is_shared_inode());
- }
-
- set_page_directory(page_directory);
- size_t page_index = 0;
- while (page_index < page_count()) {
- if (!map_individual_page_impl(page_index))
- break;
- ++page_index;
- }
- if (page_index > 0) {
- if (should_flush_tlb == ShouldFlushTLB::Yes)
- MM.flush_tlb(m_page_directory, vaddr(), page_index);
- return page_index == page_count();
- }
- return false;
-}
-
-void Region::remap()
-{
- VERIFY(m_page_directory);
- map(*m_page_directory);
-}
-
-PageFaultResponse Region::handle_fault(PageFault const& fault)
-{
- auto page_index_in_region = page_index_from_address(fault.vaddr());
- if (fault.type() == PageFault::Type::PageNotPresent) {
- if (fault.is_read() && !is_readable()) {
- dbgln("NP(non-readable) fault in Region({})[{}]", this, page_index_in_region);
- return PageFaultResponse::ShouldCrash;
- }
- if (fault.is_write() && !is_writable()) {
- dbgln("NP(non-writable) write fault in Region({})[{}] at {}", this, page_index_in_region, fault.vaddr());
- return PageFaultResponse::ShouldCrash;
- }
- if (vmobject().is_inode()) {
- dbgln_if(PAGE_FAULT_DEBUG, "NP(inode) fault in Region({})[{}]", this, page_index_in_region);
- return handle_inode_fault(page_index_in_region);
- }
-
- auto& page_slot = physical_page_slot(page_index_in_region);
- if (page_slot->is_lazy_committed_page()) {
- auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region);
- VERIFY(m_vmobject->is_anonymous());
- page_slot = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page({});
- remap_vmobject_page(page_index_in_vmobject);
- return PageFaultResponse::Continue;
- }
- dbgln("BUG! Unexpected NP fault at {}", fault.vaddr());
- return PageFaultResponse::ShouldCrash;
- }
- VERIFY(fault.type() == PageFault::Type::ProtectionViolation);
- if (fault.access() == PageFault::Access::Write && is_writable() && should_cow(page_index_in_region)) {
- dbgln_if(PAGE_FAULT_DEBUG, "PV(cow) fault in Region({})[{}] at {}", this, page_index_in_region, fault.vaddr());
- auto* phys_page = physical_page(page_index_in_region);
- if (phys_page->is_shared_zero_page() || phys_page->is_lazy_committed_page()) {
- dbgln_if(PAGE_FAULT_DEBUG, "NP(zero) fault in Region({})[{}] at {}", this, page_index_in_region, fault.vaddr());
- return handle_zero_fault(page_index_in_region);
- }
- return handle_cow_fault(page_index_in_region);
- }
- dbgln("PV(error) fault in Region({})[{}] at {}", this, page_index_in_region, fault.vaddr());
- return PageFaultResponse::ShouldCrash;
-}
-
-PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
-{
- VERIFY_INTERRUPTS_DISABLED();
- VERIFY(vmobject().is_anonymous());
-
- auto& page_slot = physical_page_slot(page_index_in_region);
- auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region);
-
- ScopedSpinLock locker(vmobject().m_lock);
-
- if (!page_slot.is_null() && !page_slot->is_shared_zero_page() && !page_slot->is_lazy_committed_page()) {
- dbgln_if(PAGE_FAULT_DEBUG, "MM: zero_page() but page already present. Fine with me!");
- if (!remap_vmobject_page(page_index_in_vmobject))
- return PageFaultResponse::OutOfMemory;
- return PageFaultResponse::Continue;
- }
-
- auto current_thread = Thread::current();
- if (current_thread != nullptr)
- current_thread->did_zero_fault();
-
- if (page_slot->is_lazy_committed_page()) {
- VERIFY(m_vmobject->is_anonymous());
- page_slot = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page({});
- dbgln_if(PAGE_FAULT_DEBUG, " >> ALLOCATED COMMITTED {}", page_slot->paddr());
- } else {
- page_slot = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
- if (page_slot.is_null()) {
- dmesgln("MM: handle_zero_fault was unable to allocate a physical page");
- return PageFaultResponse::OutOfMemory;
- }
- dbgln_if(PAGE_FAULT_DEBUG, " >> ALLOCATED {}", page_slot->paddr());
- }
-
- if (!remap_vmobject_page(page_index_in_vmobject)) {
- dmesgln("MM: handle_zero_fault was unable to allocate a page table to map {}", page_slot);
- return PageFaultResponse::OutOfMemory;
- }
- return PageFaultResponse::Continue;
-}
-
-PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region)
-{
- VERIFY_INTERRUPTS_DISABLED();
- auto current_thread = Thread::current();
- if (current_thread)
- current_thread->did_cow_fault();
-
- if (!vmobject().is_anonymous())
- return PageFaultResponse::ShouldCrash;
-
- auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region);
- auto response = reinterpret_cast<AnonymousVMObject&>(vmobject()).handle_cow_fault(page_index_in_vmobject, vaddr().offset(page_index_in_region * PAGE_SIZE));
- if (!remap_vmobject_page(page_index_in_vmobject))
- return PageFaultResponse::OutOfMemory;
- return response;
-}
-
-PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
-{
- VERIFY_INTERRUPTS_DISABLED();
- VERIFY(vmobject().is_inode());
- VERIFY(!s_mm_lock.own_lock());
- VERIFY(!g_scheduler_lock.own_lock());
-
- auto& inode_vmobject = static_cast<InodeVMObject&>(vmobject());
-
- auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region);
- auto& vmobject_physical_page_entry = inode_vmobject.physical_pages()[page_index_in_vmobject];
- VERIFY(vmobject_physical_page_entry.is_null());
-
- dbgln_if(PAGE_FAULT_DEBUG, "Inode fault in {} page index: {}", name(), page_index_in_region);
-
- auto current_thread = Thread::current();
- if (current_thread)
- current_thread->did_inode_fault();
-
- u8 page_buffer[PAGE_SIZE];
- auto& inode = inode_vmobject.inode();
-
- auto buffer = UserOrKernelBuffer::for_kernel_buffer(page_buffer);
- auto result = inode.read_bytes(page_index_in_vmobject * PAGE_SIZE, PAGE_SIZE, buffer, nullptr);
-
- if (result.is_error()) {
- dmesgln("handle_inode_fault: Error ({}) while reading from inode", result.error());
- return PageFaultResponse::ShouldCrash;
- }
-
- auto nread = result.value();
- if (nread < PAGE_SIZE) {
- // If we read less than a page, zero out the rest to avoid leaking uninitialized data.
- memset(page_buffer + nread, 0, PAGE_SIZE - nread);
- }
-
- ScopedSpinLock locker(inode_vmobject.m_lock);
-
- if (!vmobject_physical_page_entry.is_null()) {
- // Someone else faulted in this page while we were reading from the inode.
- // No harm done (other than some duplicate work), remap the page here and return.
- dbgln_if(PAGE_FAULT_DEBUG, "handle_inode_fault: Page faulted in by someone else, remapping.");
- if (!remap_vmobject_page(page_index_in_vmobject))
- return PageFaultResponse::OutOfMemory;
- return PageFaultResponse::Continue;
- }
-
- vmobject_physical_page_entry = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::No);
-
- if (vmobject_physical_page_entry.is_null()) {
- dmesgln("MM: handle_inode_fault was unable to allocate a physical page");
- return PageFaultResponse::OutOfMemory;
- }
-
- u8* dest_ptr = MM.quickmap_page(*vmobject_physical_page_entry);
- memcpy(dest_ptr, page_buffer, PAGE_SIZE);
- MM.unquickmap_page();
-
- remap_vmobject_page(page_index_in_vmobject);
- return PageFaultResponse::Continue;
-}
-
-}
diff --git a/Kernel/VM/Region.h b/Kernel/VM/Region.h
deleted file mode 100644
index e312e98a04..0000000000
--- a/Kernel/VM/Region.h
+++ /dev/null
@@ -1,247 +0,0 @@
-/*
- * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#pragma once
-
-#include <AK/EnumBits.h>
-#include <AK/IntrusiveList.h>
-#include <AK/Weakable.h>
-#include <Kernel/Arch/x86/PageFault.h>
-#include <Kernel/Forward.h>
-#include <Kernel/Heap/SlabAllocator.h>
-#include <Kernel/KString.h>
-#include <Kernel/Sections.h>
-#include <Kernel/UnixTypes.h>
-#include <Kernel/VM/PageFaultResponse.h>
-#include <Kernel/VM/RangeAllocator.h>
-
-namespace Kernel {
-
-enum class ShouldFlushTLB {
- No,
- Yes,
-};
-
-class Region final
- : public Weakable<Region> {
- friend class MemoryManager;
-
- MAKE_SLAB_ALLOCATED(Region)
-public:
- enum Access : u8 {
- None = 0,
- Read = 1,
- Write = 2,
- Execute = 4,
- HasBeenReadable = 16,
- HasBeenWritable = 32,
- HasBeenExecutable = 64,
- };
-
- enum class Cacheable {
- No = 0,
- Yes,
- };
-
- static OwnPtr<Region> try_create_user_accessible(Range const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable, bool shared);
- static OwnPtr<Region> try_create_kernel_only(Range const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable = Cacheable::Yes);
-
- ~Region();
-
- Range const& range() const { return m_range; }
- VirtualAddress vaddr() const { return m_range.base(); }
- size_t size() const { return m_range.size(); }
- bool is_readable() const { return m_access & Access::Read; }
- bool is_writable() const { return m_access & Access::Write; }
- bool is_executable() const { return m_access & Access::Execute; }
-
- bool has_been_readable() const { return m_access & Access::HasBeenReadable; }
- bool has_been_writable() const { return m_access & Access::HasBeenWritable; }
- bool has_been_executable() const { return m_access & Access::HasBeenExecutable; }
-
- bool is_cacheable() const { return m_cacheable; }
- StringView name() const { return m_name ? m_name->view() : StringView {}; }
- OwnPtr<KString> take_name() { return move(m_name); }
- Region::Access access() const { return static_cast<Region::Access>(m_access); }
-
- void set_name(OwnPtr<KString> name) { m_name = move(name); }
-
- VMObject const& vmobject() const { return *m_vmobject; }
- VMObject& vmobject() { return *m_vmobject; }
- void set_vmobject(NonnullRefPtr<VMObject>&&);
-
- bool is_shared() const { return m_shared; }
- void set_shared(bool shared) { m_shared = shared; }
-
- bool is_stack() const { return m_stack; }
- void set_stack(bool stack) { m_stack = stack; }
-
- bool is_mmap() const { return m_mmap; }
- void set_mmap(bool mmap) { m_mmap = mmap; }
-
- bool is_user() const { return !is_kernel(); }
- bool is_kernel() const { return vaddr().get() < 0x00800000 || vaddr().get() >= kernel_mapping_base; }
-
- PageFaultResponse handle_fault(PageFault const&);
-
- OwnPtr<Region> clone();
-
- bool contains(VirtualAddress vaddr) const
- {
- return m_range.contains(vaddr);
- }
-
- bool contains(Range const& range) const
- {
- return m_range.contains(range);
- }
-
- unsigned page_index_from_address(VirtualAddress vaddr) const
- {
- return (vaddr - m_range.base()).get() / PAGE_SIZE;
- }
-
- VirtualAddress vaddr_from_page_index(size_t page_index) const
- {
- return vaddr().offset(page_index * PAGE_SIZE);
- }
-
- bool translate_vmobject_page(size_t& index) const
- {
- auto first_index = first_page_index();
- if (index < first_index) {
- index = first_index;
- return false;
- }
- index -= first_index;
- auto total_page_count = this->page_count();
- if (index >= total_page_count) {
- index = first_index + total_page_count - 1;
- return false;
- }
- return true;
- }
-
- ALWAYS_INLINE size_t translate_to_vmobject_page(size_t page_index) const
- {
- return first_page_index() + page_index;
- }
-
- size_t first_page_index() const
- {
- return m_offset_in_vmobject / PAGE_SIZE;
- }
-
- size_t page_count() const
- {
- return size() / PAGE_SIZE;
- }
-
- PhysicalPage const* physical_page(size_t index) const;
- RefPtr<PhysicalPage>& physical_page_slot(size_t index);
-
- size_t offset_in_vmobject() const
- {
- return m_offset_in_vmobject;
- }
-
- size_t offset_in_vmobject_from_vaddr(VirtualAddress vaddr) const
- {
- return m_offset_in_vmobject + vaddr.get() - this->vaddr().get();
- }
-
- size_t amount_resident() const;
- size_t amount_shared() const;
- size_t amount_dirty() const;
-
- bool should_cow(size_t page_index) const;
- void set_should_cow(size_t page_index, bool);
-
- size_t cow_pages() const;
-
- void set_readable(bool b) { set_access_bit(Access::Read, b); }
- void set_writable(bool b) { set_access_bit(Access::Write, b); }
- void set_executable(bool b) { set_access_bit(Access::Execute, b); }
-
- void set_page_directory(PageDirectory&);
- bool map(PageDirectory&, ShouldFlushTLB = ShouldFlushTLB::Yes);
- enum class ShouldDeallocateVirtualMemoryRange {
- No,
- Yes,
- };
- void unmap(ShouldDeallocateVirtualMemoryRange = ShouldDeallocateVirtualMemoryRange::Yes);
-
- void remap();
-
- bool is_syscall_region() const { return m_syscall_region; }
- void set_syscall_region(bool b) { m_syscall_region = b; }
-
-private:
- Region(Range const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString>, Region::Access access, Cacheable, bool shared);
-
- bool remap_vmobject_page(size_t page_index, bool with_flush = true);
- bool do_remap_vmobject_page(size_t page_index, bool with_flush = true);
-
- void set_access_bit(Access access, bool b)
- {
- if (b)
- m_access |= access | (access << 4);
- else
- m_access &= ~access;
- }
-
- PageFaultResponse handle_cow_fault(size_t page_index);
- PageFaultResponse handle_inode_fault(size_t page_index);
- PageFaultResponse handle_zero_fault(size_t page_index);
-
- bool map_individual_page_impl(size_t page_index);
-
- RefPtr<PageDirectory> m_page_directory;
- Range m_range;
- size_t m_offset_in_vmobject { 0 };
- NonnullRefPtr<VMObject> m_vmobject;
- OwnPtr<KString> m_name;
- u8 m_access { Region::None };
- bool m_shared : 1 { false };
- bool m_cacheable : 1 { false };
- bool m_stack : 1 { false };
- bool m_mmap : 1 { false };
- bool m_syscall_region : 1 { false };
- IntrusiveListNode<Region> m_memory_manager_list_node;
- IntrusiveListNode<Region> m_vmobject_list_node;
-
-public:
- using ListInMemoryManager = IntrusiveList<Region, RawPtr<Region>, &Region::m_memory_manager_list_node>;
- using ListInVMObject = IntrusiveList<Region, RawPtr<Region>, &Region::m_vmobject_list_node>;
-};
-
-AK_ENUM_BITWISE_OPERATORS(Region::Access)
-
-inline Region::Access prot_to_region_access_flags(int prot)
-{
- Region::Access access = Region::Access::None;
- if (prot & PROT_READ)
- access |= Region::Access::Read;
- if (prot & PROT_WRITE)
- access |= Region::Access::Write;
- if (prot & PROT_EXEC)
- access |= Region::Access::Execute;
- return access;
-}
-
-inline int region_access_flags_to_prot(Region::Access access)
-{
- int prot = 0;
- if (access & Region::Access::Read)
- prot |= PROT_READ;
- if (access & Region::Access::Write)
- prot |= PROT_WRITE;
- if (access & Region::Access::Execute)
- prot |= PROT_EXEC;
- return prot;
-}
-
-}
diff --git a/Kernel/VM/RingBuffer.cpp b/Kernel/VM/RingBuffer.cpp
deleted file mode 100644
index bfaa8da326..0000000000
--- a/Kernel/VM/RingBuffer.cpp
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2021, Sahan Fernando <sahan.h.fernando@gmail.com>.
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#include <Kernel/UserOrKernelBuffer.h>
-#include <Kernel/VM/MemoryManager.h>
-#include <Kernel/VM/RingBuffer.h>
-
-namespace Kernel {
-
-RingBuffer::RingBuffer(String region_name, size_t capacity)
- : m_region(MM.allocate_contiguous_kernel_region(page_round_up(capacity), move(region_name), Region::Access::Read | Region::Access::Write))
- , m_capacity_in_bytes(capacity)
-{
-}
-
-bool RingBuffer::copy_data_in(const UserOrKernelBuffer& buffer, size_t offset, size_t length, PhysicalAddress& start_of_copied_data, size_t& bytes_copied)
-{
- size_t start_of_free_area = (m_start_of_used + m_num_used_bytes) % m_capacity_in_bytes;
- bytes_copied = min(m_capacity_in_bytes - m_num_used_bytes, min(m_capacity_in_bytes - start_of_free_area, length));
- if (bytes_copied == 0)
- return false;
- if (buffer.read(m_region->vaddr().offset(start_of_free_area).as_ptr(), offset, bytes_copied)) {
- m_num_used_bytes += bytes_copied;
- start_of_copied_data = m_region->physical_page(start_of_free_area / PAGE_SIZE)->paddr().offset(start_of_free_area % PAGE_SIZE);
- return true;
- }
- return false;
-}
-
-KResultOr<size_t> RingBuffer::copy_data_out(size_t size, UserOrKernelBuffer& buffer) const
-{
- auto start = m_start_of_used % m_capacity_in_bytes;
- auto num_bytes = min(min(m_num_used_bytes, size), m_capacity_in_bytes - start);
- if (!buffer.write(m_region->vaddr().offset(start).as_ptr(), num_bytes))
- return EIO;
- return num_bytes;
-}
-
-KResultOr<PhysicalAddress> RingBuffer::reserve_space(size_t size)
-{
- if (m_capacity_in_bytes < m_num_used_bytes + size)
- return ENOSPC;
- size_t start_of_free_area = (m_start_of_used + m_num_used_bytes) % m_capacity_in_bytes;
- m_num_used_bytes += size;
- PhysicalAddress start_of_reserved_space = m_region->physical_page(start_of_free_area / PAGE_SIZE)->paddr().offset(start_of_free_area % PAGE_SIZE);
- return start_of_reserved_space;
-}
-
-void RingBuffer::reclaim_space(PhysicalAddress chunk_start, size_t chunk_size)
-{
- VERIFY(start_of_used() == chunk_start);
- VERIFY(m_num_used_bytes >= chunk_size);
- m_num_used_bytes -= chunk_size;
- m_start_of_used += chunk_size;
-}
-
-PhysicalAddress RingBuffer::start_of_used() const
-{
- size_t start = m_start_of_used % m_capacity_in_bytes;
- return m_region->physical_page(start / PAGE_SIZE)->paddr().offset(start % PAGE_SIZE);
-}
-
-}
diff --git a/Kernel/VM/RingBuffer.h b/Kernel/VM/RingBuffer.h
deleted file mode 100644
index 8a748e4b65..0000000000
--- a/Kernel/VM/RingBuffer.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2021, Sahan Fernando <sahan.h.fernando@gmail.com>.
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#pragma once
-
-#include <AK/String.h>
-#include <Kernel/PhysicalAddress.h>
-#include <Kernel/UserOrKernelBuffer.h>
-
-namespace Kernel {
-
-class RingBuffer {
-public:
- RingBuffer(String region_name, size_t capacity);
-
- bool has_space() const { return m_num_used_bytes < m_capacity_in_bytes; }
- bool copy_data_in(const UserOrKernelBuffer& buffer, size_t offset, size_t length, PhysicalAddress& start_of_copied_data, size_t& bytes_copied);
- KResultOr<size_t> copy_data_out(size_t size, UserOrKernelBuffer& buffer) const;
- KResultOr<PhysicalAddress> reserve_space(size_t size);
- void reclaim_space(PhysicalAddress chunk_start, size_t chunk_size);
- PhysicalAddress start_of_used() const;
-
- SpinLock<u8>& lock() { return m_lock; }
- size_t used_bytes() const { return m_num_used_bytes; }
- PhysicalAddress start_of_region() const { return m_region->physical_page(0)->paddr(); }
- VirtualAddress vaddr() const { return m_region->vaddr(); }
- size_t bytes_till_end() const { return (m_capacity_in_bytes - ((m_start_of_used + m_num_used_bytes) % m_capacity_in_bytes)) % m_capacity_in_bytes; };
-
-private:
- OwnPtr<Region> m_region;
- SpinLock<u8> m_lock;
- size_t m_start_of_used {};
- size_t m_num_used_bytes {};
- size_t m_capacity_in_bytes {};
-};
-
-}
diff --git a/Kernel/VM/ScatterGatherList.cpp b/Kernel/VM/ScatterGatherList.cpp
deleted file mode 100644
index 9bca559616..0000000000
--- a/Kernel/VM/ScatterGatherList.cpp
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (c) 2021, the SerenityOS developers.
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#include <Kernel/VM/ScatterGatherList.h>
-
-namespace Kernel {
-
-RefPtr<ScatterGatherList> ScatterGatherList::try_create(AsyncBlockDeviceRequest& request, Span<NonnullRefPtr<PhysicalPage>> allocated_pages, size_t device_block_size)
-{
- auto vm_object = AnonymousVMObject::try_create_with_physical_pages(allocated_pages);
- if (!vm_object)
- return {};
- return adopt_ref_if_nonnull(new (nothrow) ScatterGatherList(vm_object.release_nonnull(), request, device_block_size));
-}
-
-ScatterGatherList::ScatterGatherList(NonnullRefPtr<AnonymousVMObject> vm_object, AsyncBlockDeviceRequest& request, size_t device_block_size)
- : m_vm_object(move(vm_object))
-{
- m_dma_region = MM.allocate_kernel_region_with_vmobject(m_vm_object, page_round_up((request.block_count() * device_block_size)), "AHCI Scattered DMA", Region::Access::Read | Region::Access::Write, Region::Cacheable::Yes);
-}
-
-}
diff --git a/Kernel/VM/ScatterGatherList.h b/Kernel/VM/ScatterGatherList.h
deleted file mode 100644
index 1a087095c2..0000000000
--- a/Kernel/VM/ScatterGatherList.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (c) 2021, the SerenityOS developers.
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#pragma once
-
-#include <AK/Vector.h>
-#include <Kernel/Devices/BlockDevice.h>
-#include <Kernel/PhysicalAddress.h>
-#include <Kernel/VM/AnonymousVMObject.h>
-#include <Kernel/VM/MemoryManager.h>
-
-namespace Kernel {
-
-// A Scatter-Gather List type that owns its buffers
-
-class ScatterGatherList : public RefCounted<ScatterGatherList> {
-public:
- static RefPtr<ScatterGatherList> try_create(AsyncBlockDeviceRequest&, Span<NonnullRefPtr<PhysicalPage>> allocated_pages, size_t device_block_size);
- const VMObject& vmobject() const { return m_vm_object; }
- VirtualAddress dma_region() const { return m_dma_region->vaddr(); }
- size_t scatters_count() const { return m_vm_object->physical_pages().size(); }
-
-private:
- ScatterGatherList(NonnullRefPtr<AnonymousVMObject>, AsyncBlockDeviceRequest&, size_t device_block_size);
- NonnullRefPtr<AnonymousVMObject> m_vm_object;
- OwnPtr<Region> m_dma_region;
-};
-
-}
diff --git a/Kernel/VM/SharedInodeVMObject.cpp b/Kernel/VM/SharedInodeVMObject.cpp
deleted file mode 100644
index e603f70504..0000000000
--- a/Kernel/VM/SharedInodeVMObject.cpp
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#include <Kernel/FileSystem/Inode.h>
-#include <Kernel/VM/SharedInodeVMObject.h>
-
-namespace Kernel {
-
-RefPtr<SharedInodeVMObject> SharedInodeVMObject::try_create_with_inode(Inode& inode)
-{
- size_t size = inode.size();
- if (auto shared_vmobject = inode.shared_vmobject())
- return shared_vmobject.release_nonnull();
- auto vmobject = adopt_ref_if_nonnull(new (nothrow) SharedInodeVMObject(inode, size));
- if (!vmobject)
- return nullptr;
- vmobject->inode().set_shared_vmobject(*vmobject);
- return vmobject;
-}
-
-RefPtr<VMObject> SharedInodeVMObject::try_clone()
-{
- return adopt_ref_if_nonnull(new (nothrow) SharedInodeVMObject(*this));
-}
-
-SharedInodeVMObject::SharedInodeVMObject(Inode& inode, size_t size)
- : InodeVMObject(inode, size)
-{
-}
-
-SharedInodeVMObject::SharedInodeVMObject(SharedInodeVMObject const& other)
- : InodeVMObject(other)
-{
-}
-
-}
diff --git a/Kernel/VM/SharedInodeVMObject.h b/Kernel/VM/SharedInodeVMObject.h
deleted file mode 100644
index ef5b828b5e..0000000000
--- a/Kernel/VM/SharedInodeVMObject.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#pragma once
-
-#include <AK/Bitmap.h>
-#include <Kernel/UnixTypes.h>
-#include <Kernel/VM/InodeVMObject.h>
-
-namespace Kernel {
-
-class SharedInodeVMObject final : public InodeVMObject {
- AK_MAKE_NONMOVABLE(SharedInodeVMObject);
-
-public:
- static RefPtr<SharedInodeVMObject> try_create_with_inode(Inode&);
- virtual RefPtr<VMObject> try_clone() override;
-
-private:
- virtual bool is_shared_inode() const override { return true; }
-
- explicit SharedInodeVMObject(Inode&, size_t);
- explicit SharedInodeVMObject(SharedInodeVMObject const&);
-
- virtual StringView class_name() const override { return "SharedInodeVMObject"sv; }
-
- SharedInodeVMObject& operator=(SharedInodeVMObject const&) = delete;
-};
-
-}
diff --git a/Kernel/VM/Space.cpp b/Kernel/VM/Space.cpp
deleted file mode 100644
index 9196bfe78b..0000000000
--- a/Kernel/VM/Space.cpp
+++ /dev/null
@@ -1,439 +0,0 @@
-/*
- * Copyright (c) 2021, Andreas Kling <kling@serenityos.org>
- * Copyright (c) 2021, Leon Albrecht <leon2002.la@gmail.com>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#include <Kernel/PerformanceManager.h>
-#include <Kernel/Process.h>
-#include <Kernel/SpinLock.h>
-#include <Kernel/VM/AnonymousVMObject.h>
-#include <Kernel/VM/InodeVMObject.h>
-#include <Kernel/VM/MemoryManager.h>
-#include <Kernel/VM/Space.h>
-
-namespace Kernel {
-
-OwnPtr<Space> Space::try_create(Process& process, Space const* parent)
-{
- auto page_directory = PageDirectory::try_create_for_userspace(parent ? &parent->page_directory().range_allocator() : nullptr);
- if (!page_directory)
- return {};
- auto space = adopt_own_if_nonnull(new (nothrow) Space(process, page_directory.release_nonnull()));
- if (!space)
- return {};
- space->page_directory().set_space({}, *space);
- return space;
-}
-
-Space::Space(Process& process, NonnullRefPtr<PageDirectory> page_directory)
- : m_process(&process)
- , m_page_directory(move(page_directory))
-{
-}
-
-Space::~Space()
-{
-}
-
-KResult Space::unmap_mmap_range(VirtualAddress addr, size_t size)
-{
- if (!size)
- return EINVAL;
-
- auto range_or_error = Range::expand_to_page_boundaries(addr.get(), size);
- if (range_or_error.is_error())
- return range_or_error.error();
- auto range_to_unmap = range_or_error.value();
-
- if (!is_user_range(range_to_unmap))
- return EFAULT;
-
- if (auto* whole_region = find_region_from_range(range_to_unmap)) {
- if (!whole_region->is_mmap())
- return EPERM;
-
- PerformanceManager::add_unmap_perf_event(*Process::current(), whole_region->range());
-
- deallocate_region(*whole_region);
- return KSuccess;
- }
-
- if (auto* old_region = find_region_containing(range_to_unmap)) {
- if (!old_region->is_mmap())
- return EPERM;
-
- // Remove the old region from our regions tree, since were going to add another region
- // with the exact same start address, but don't deallocate it yet.
- auto region = take_region(*old_region);
-
- // We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
- region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
-
- auto new_regions_or_error = try_split_region_around_range(*region, range_to_unmap);
- if (new_regions_or_error.is_error())
- return new_regions_or_error.error();
- auto& new_regions = new_regions_or_error.value();
-
- // Instead we give back the unwanted VM manually.
- page_directory().range_allocator().deallocate(range_to_unmap);
-
- // And finally we map the new region(s) using our page directory (they were just allocated and don't have one).
- for (auto* new_region : new_regions) {
- new_region->map(page_directory());
- }
-
- PerformanceManager::add_unmap_perf_event(*Process::current(), range_to_unmap);
-
- return KSuccess;
- }
-
- // Try again while checking multiple regions at a time.
- auto const& regions = find_regions_intersecting(range_to_unmap);
- if (regions.is_empty())
- return KSuccess;
-
- // Check if any of the regions is not mmap'ed, to not accidentally
- // error out with just half a region map left.
- for (auto* region : regions) {
- if (!region->is_mmap())
- return EPERM;
- }
-
- Vector<Region*, 2> new_regions;
-
- for (auto* old_region : regions) {
- // If it's a full match we can remove the entire old region.
- if (old_region->range().intersect(range_to_unmap).size() == old_region->size()) {
- deallocate_region(*old_region);
- continue;
- }
-
- // Remove the old region from our regions tree, since were going to add another region
- // with the exact same start address, but don't deallocate it yet.
- auto region = take_region(*old_region);
-
- // We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
- region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
-
- // Otherwise, split the regions and collect them for future mapping.
- auto split_regions_or_error = try_split_region_around_range(*region, range_to_unmap);
- if (split_regions_or_error.is_error())
- return split_regions_or_error.error();
-
- if (new_regions.try_extend(split_regions_or_error.value()))
- return ENOMEM;
- }
-
- // Give back any unwanted VM to the range allocator.
- page_directory().range_allocator().deallocate(range_to_unmap);
-
- // And finally map the new region(s) into our page directory.
- for (auto* new_region : new_regions) {
- new_region->map(page_directory());
- }
-
- PerformanceManager::add_unmap_perf_event(*Process::current(), range_to_unmap);
-
- return KSuccess;
-}
-
-Optional<Range> Space::allocate_range(VirtualAddress vaddr, size_t size, size_t alignment)
-{
- vaddr.mask(PAGE_MASK);
- size = page_round_up(size);
- if (vaddr.is_null())
- return page_directory().range_allocator().allocate_anywhere(size, alignment);
- return page_directory().range_allocator().allocate_specific(vaddr, size);
-}
-
-KResultOr<Region*> Space::try_allocate_split_region(Region const& source_region, Range const& range, size_t offset_in_vmobject)
-{
- auto new_region = Region::try_create_user_accessible(
- range, source_region.vmobject(), offset_in_vmobject, KString::try_create(source_region.name()), source_region.access(), source_region.is_cacheable() ? Region::Cacheable::Yes : Region::Cacheable::No, source_region.is_shared());
- if (!new_region)
- return ENOMEM;
- auto* region = add_region(new_region.release_nonnull());
- if (!region)
- return ENOMEM;
- region->set_syscall_region(source_region.is_syscall_region());
- region->set_mmap(source_region.is_mmap());
- region->set_stack(source_region.is_stack());
- size_t page_offset_in_source_region = (offset_in_vmobject - source_region.offset_in_vmobject()) / PAGE_SIZE;
- for (size_t i = 0; i < region->page_count(); ++i) {
- if (source_region.should_cow(page_offset_in_source_region + i))
- region->set_should_cow(i, true);
- }
- return region;
-}
-
-KResultOr<Region*> Space::allocate_region(Range const& range, StringView name, int prot, AllocationStrategy strategy)
-{
- VERIFY(range.is_valid());
- auto vmobject = AnonymousVMObject::try_create_with_size(range.size(), strategy);
- if (!vmobject)
- return ENOMEM;
- auto region = Region::try_create_user_accessible(range, vmobject.release_nonnull(), 0, KString::try_create(name), prot_to_region_access_flags(prot), Region::Cacheable::Yes, false);
- if (!region)
- return ENOMEM;
- if (!region->map(page_directory()))
- return ENOMEM;
- auto* added_region = add_region(region.release_nonnull());
- if (!added_region)
- return ENOMEM;
- return added_region;
-}
-
-KResultOr<Region*> Space::allocate_region_with_vmobject(Range const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, StringView name, int prot, bool shared)
-{
- VERIFY(range.is_valid());
- size_t end_in_vmobject = offset_in_vmobject + range.size();
- if (end_in_vmobject <= offset_in_vmobject) {
- dbgln("allocate_region_with_vmobject: Overflow (offset + size)");
- return EINVAL;
- }
- if (offset_in_vmobject >= vmobject->size()) {
- dbgln("allocate_region_with_vmobject: Attempt to allocate a region with an offset past the end of its VMObject.");
- return EINVAL;
- }
- if (end_in_vmobject > vmobject->size()) {
- dbgln("allocate_region_with_vmobject: Attempt to allocate a region with an end past the end of its VMObject.");
- return EINVAL;
- }
- offset_in_vmobject &= PAGE_MASK;
- auto region = Region::try_create_user_accessible(range, move(vmobject), offset_in_vmobject, KString::try_create(name), prot_to_region_access_flags(prot), Region::Cacheable::Yes, shared);
- if (!region) {
- dbgln("allocate_region_with_vmobject: Unable to allocate Region");
- return ENOMEM;
- }
- auto* added_region = add_region(region.release_nonnull());
- if (!added_region)
- return ENOMEM;
- if (!added_region->map(page_directory()))
- return ENOMEM;
- return added_region;
-}
-
-void Space::deallocate_region(Region& region)
-{
- take_region(region);
-}
-
-NonnullOwnPtr<Region> Space::take_region(Region& region)
-{
- ScopedSpinLock lock(m_lock);
-
- if (m_region_lookup_cache.region.unsafe_ptr() == &region)
- m_region_lookup_cache.region = nullptr;
-
- auto found_region = m_regions.unsafe_remove(region.vaddr().get());
- VERIFY(found_region.ptr() == &region);
- return found_region;
-}
-
-Region* Space::find_region_from_range(const Range& range)
-{
- ScopedSpinLock lock(m_lock);
- if (m_region_lookup_cache.range.has_value() && m_region_lookup_cache.range.value() == range && m_region_lookup_cache.region)
- return m_region_lookup_cache.region.unsafe_ptr();
-
- auto found_region = m_regions.find(range.base().get());
- if (!found_region)
- return nullptr;
- auto& region = *found_region;
- size_t size = page_round_up(range.size());
- if (region->size() != size)
- return nullptr;
- m_region_lookup_cache.range = range;
- m_region_lookup_cache.region = *region;
- return region;
-}
-
-Region* Space::find_region_containing(const Range& range)
-{
- ScopedSpinLock lock(m_lock);
- auto candidate = m_regions.find_largest_not_above(range.base().get());
- if (!candidate)
- return nullptr;
- return (*candidate)->range().contains(range) ? candidate->ptr() : nullptr;
-}
-
-Vector<Region*> Space::find_regions_intersecting(const Range& range)
-{
- Vector<Region*> regions = {};
- size_t total_size_collected = 0;
-
- ScopedSpinLock lock(m_lock);
-
- auto found_region = m_regions.find_largest_not_above(range.base().get());
- if (!found_region)
- return regions;
- for (auto iter = m_regions.begin_from((*found_region)->vaddr().get()); !iter.is_end(); ++iter) {
- if ((*iter)->range().base() < range.end() && (*iter)->range().end() > range.base()) {
- regions.append(*iter);
-
- total_size_collected += (*iter)->size() - (*iter)->range().intersect(range).size();
- if (total_size_collected == range.size())
- break;
- }
- }
-
- return regions;
-}
-
-Region* Space::add_region(NonnullOwnPtr<Region> region)
-{
- auto* ptr = region.ptr();
- ScopedSpinLock lock(m_lock);
- auto success = m_regions.try_insert(region->vaddr().get(), move(region));
- return success ? ptr : nullptr;
-}
-
-// Carve out a virtual address range from a region and return the two regions on either side
-KResultOr<Vector<Region*, 2>> Space::try_split_region_around_range(const Region& source_region, const Range& desired_range)
-{
- Range old_region_range = source_region.range();
- auto remaining_ranges_after_unmap = old_region_range.carve(desired_range);
-
- VERIFY(!remaining_ranges_after_unmap.is_empty());
- auto try_make_replacement_region = [&](const Range& new_range) -> KResultOr<Region*> {
- VERIFY(old_region_range.contains(new_range));
- size_t new_range_offset_in_vmobject = source_region.offset_in_vmobject() + (new_range.base().get() - old_region_range.base().get());
- return try_allocate_split_region(source_region, new_range, new_range_offset_in_vmobject);
- };
- Vector<Region*, 2> new_regions;
- for (auto& new_range : remaining_ranges_after_unmap) {
- auto new_region_or_error = try_make_replacement_region(new_range);
- if (new_region_or_error.is_error())
- return new_region_or_error.error();
- new_regions.unchecked_append(new_region_or_error.value());
- }
- return new_regions;
-}
-
-void Space::dump_regions()
-{
- dbgln("Process regions:");
-#if ARCH(I386)
- auto addr_padding = "";
-#else
- auto addr_padding = " ";
-#endif
- dbgln("BEGIN{} END{} SIZE{} ACCESS NAME",
- addr_padding, addr_padding, addr_padding);
-
- ScopedSpinLock lock(m_lock);
-
- for (auto& sorted_region : m_regions) {
- auto& region = *sorted_region;
- dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}", region.vaddr().get(), region.vaddr().offset(region.size() - 1).get(), region.size(),
- region.is_readable() ? 'R' : ' ',
- region.is_writable() ? 'W' : ' ',
- region.is_executable() ? 'X' : ' ',
- region.is_shared() ? 'S' : ' ',
- region.is_stack() ? 'T' : ' ',
- region.is_syscall_region() ? 'C' : ' ',
- region.name());
- }
- MM.dump_kernel_regions();
-}
-
-void Space::remove_all_regions(Badge<Process>)
-{
- ScopedSpinLock lock(m_lock);
- m_regions.clear();
-}
-
-size_t Space::amount_dirty_private() const
-{
- ScopedSpinLock lock(m_lock);
- // FIXME: This gets a bit more complicated for Regions sharing the same underlying VMObject.
- // The main issue I'm thinking of is when the VMObject has physical pages that none of the Regions are mapping.
- // That's probably a situation that needs to be looked at in general.
- size_t amount = 0;
- for (auto& region : m_regions) {
- if (!region->is_shared())
- amount += region->amount_dirty();
- }
- return amount;
-}
-
-size_t Space::amount_clean_inode() const
-{
- ScopedSpinLock lock(m_lock);
- HashTable<const InodeVMObject*> vmobjects;
- for (auto& region : m_regions) {
- if (region->vmobject().is_inode())
- vmobjects.set(&static_cast<const InodeVMObject&>(region->vmobject()));
- }
- size_t amount = 0;
- for (auto& vmobject : vmobjects)
- amount += vmobject->amount_clean();
- return amount;
-}
-
-size_t Space::amount_virtual() const
-{
- ScopedSpinLock lock(m_lock);
- size_t amount = 0;
- for (auto& region : m_regions) {
- amount += region->size();
- }
- return amount;
-}
-
-size_t Space::amount_resident() const
-{
- ScopedSpinLock lock(m_lock);
- // FIXME: This will double count if multiple regions use the same physical page.
- size_t amount = 0;
- for (auto& region : m_regions) {
- amount += region->amount_resident();
- }
- return amount;
-}
-
-size_t Space::amount_shared() const
-{
- ScopedSpinLock lock(m_lock);
- // FIXME: This will double count if multiple regions use the same physical page.
- // FIXME: It doesn't work at the moment, since it relies on PhysicalPage ref counts,
- // and each PhysicalPage is only reffed by its VMObject. This needs to be refactored
- // so that every Region contributes +1 ref to each of its PhysicalPages.
- size_t amount = 0;
- for (auto& region : m_regions) {
- amount += region->amount_shared();
- }
- return amount;
-}
-
-size_t Space::amount_purgeable_volatile() const
-{
- ScopedSpinLock lock(m_lock);
- size_t amount = 0;
- for (auto& region : m_regions) {
- if (!region->vmobject().is_anonymous())
- continue;
- auto const& vmobject = static_cast<AnonymousVMObject const&>(region->vmobject());
- if (vmobject.is_purgeable() && vmobject.is_volatile())
- amount += region->amount_resident();
- }
- return amount;
-}
-
-size_t Space::amount_purgeable_nonvolatile() const
-{
- ScopedSpinLock lock(m_lock);
- size_t amount = 0;
- for (auto& region : m_regions) {
- if (!region->vmobject().is_anonymous())
- continue;
- auto const& vmobject = static_cast<AnonymousVMObject const&>(region->vmobject());
- if (vmobject.is_purgeable() && !vmobject.is_volatile())
- amount += region->amount_resident();
- }
- return amount;
-}
-
-}
diff --git a/Kernel/VM/Space.h b/Kernel/VM/Space.h
deleted file mode 100644
index 4efa5f53fa..0000000000
--- a/Kernel/VM/Space.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
- * Copyright (c) 2021, Leon Albrecht <leon2002.la@gmail.com>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#pragma once
-
-#include <AK/RedBlackTree.h>
-#include <AK/Vector.h>
-#include <AK/WeakPtr.h>
-#include <Kernel/UnixTypes.h>
-#include <Kernel/VM/AllocationStrategy.h>
-#include <Kernel/VM/PageDirectory.h>
-
-namespace Kernel {
-
-class Space {
-public:
- static OwnPtr<Space> try_create(Process&, Space const* parent);
- ~Space();
-
- PageDirectory& page_directory() { return *m_page_directory; }
- const PageDirectory& page_directory() const { return *m_page_directory; }
-
- Region* add_region(NonnullOwnPtr<Region>);
-
- size_t region_count() const { return m_regions.size(); }
-
- RedBlackTree<FlatPtr, NonnullOwnPtr<Region>>& regions() { return m_regions; }
- const RedBlackTree<FlatPtr, NonnullOwnPtr<Region>>& regions() const { return m_regions; }
-
- void dump_regions();
-
- KResult unmap_mmap_range(VirtualAddress, size_t);
-
- Optional<Range> allocate_range(VirtualAddress, size_t, size_t alignment = PAGE_SIZE);
-
- KResultOr<Region*> allocate_region_with_vmobject(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, StringView name, int prot, bool shared);
- KResultOr<Region*> allocate_region(const Range&, StringView name, int prot = PROT_READ | PROT_WRITE, AllocationStrategy strategy = AllocationStrategy::Reserve);
- void deallocate_region(Region& region);
- NonnullOwnPtr<Region> take_region(Region& region);
-
- KResultOr<Region*> try_allocate_split_region(Region const& source_region, Range const&, size_t offset_in_vmobject);
- KResultOr<Vector<Region*, 2>> try_split_region_around_range(Region const& source_region, Range const&);
-
- Region* find_region_from_range(const Range&);
- Region* find_region_containing(const Range&);
-
- Vector<Region*> find_regions_intersecting(const Range&);
-
- bool enforces_syscall_regions() const { return m_enforces_syscall_regions; }
- void set_enforces_syscall_regions(bool b) { m_enforces_syscall_regions = b; }
-
- void remove_all_regions(Badge<Process>);
-
- RecursiveSpinLock& get_lock() const { return m_lock; }
-
- size_t amount_clean_inode() const;
- size_t amount_dirty_private() const;
- size_t amount_virtual() const;
- size_t amount_resident() const;
- size_t amount_shared() const;
- size_t amount_purgeable_volatile() const;
- size_t amount_purgeable_nonvolatile() const;
-
-private:
- Space(Process&, NonnullRefPtr<PageDirectory>);
-
- Process* m_process { nullptr };
- mutable RecursiveSpinLock m_lock;
-
- RefPtr<PageDirectory> m_page_directory;
-
- RedBlackTree<FlatPtr, NonnullOwnPtr<Region>> m_regions;
-
- struct RegionLookupCache {
- Optional<Range> range;
- WeakPtr<Region> region;
- };
- RegionLookupCache m_region_lookup_cache;
-
- bool m_enforces_syscall_regions { false };
-};
-
-}
diff --git a/Kernel/VM/TypedMapping.h b/Kernel/VM/TypedMapping.h
deleted file mode 100644
index b9cc3ac36c..0000000000
--- a/Kernel/VM/TypedMapping.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#pragma once
-
-#include <AK/StringView.h>
-#include <Kernel/VM/MemoryManager.h>
-
-namespace Kernel {
-
-template<typename T>
-struct TypedMapping {
- const T* ptr() const { return reinterpret_cast<const T*>(region->vaddr().offset(offset).as_ptr()); }
- T* ptr() { return reinterpret_cast<T*>(region->vaddr().offset(offset).as_ptr()); }
- const T* operator->() const { return ptr(); }
- T* operator->() { return ptr(); }
- const T& operator*() const { return *ptr(); }
- T& operator*() { return *ptr(); }
- OwnPtr<Region> region;
- size_t offset { 0 };
-};
-
-template<typename T>
-static TypedMapping<T> map_typed(PhysicalAddress paddr, size_t length, Region::Access access = Region::Access::Read)
-{
- TypedMapping<T> table;
- size_t mapping_length = page_round_up(paddr.offset_in_page() + length);
- table.region = MM.allocate_kernel_region(paddr.page_base(), mapping_length, {}, access);
- table.offset = paddr.offset_in_page();
- return table;
-}
-
-template<typename T>
-static TypedMapping<T> map_typed(PhysicalAddress paddr)
-{
- return map_typed<T>(paddr, sizeof(T));
-}
-
-template<typename T>
-static TypedMapping<T> map_typed_writable(PhysicalAddress paddr)
-{
- return map_typed<T>(paddr, sizeof(T), Region::Access::Read | Region::Access::Write);
-}
-
-}
diff --git a/Kernel/VM/VMObject.cpp b/Kernel/VM/VMObject.cpp
deleted file mode 100644
index fd22c92c7f..0000000000
--- a/Kernel/VM/VMObject.cpp
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#include <Kernel/VM/MemoryManager.h>
-#include <Kernel/VM/VMObject.h>
-
-namespace Kernel {
-
-VMObject::VMObject(VMObject const& other)
- : m_physical_pages(other.m_physical_pages)
-{
- MM.register_vmobject(*this);
-}
-
-VMObject::VMObject(size_t size)
- : m_physical_pages(ceil_div(size, static_cast<size_t>(PAGE_SIZE)))
-{
- MM.register_vmobject(*this);
-}
-
-VMObject::~VMObject()
-{
- {
- ScopedSpinLock lock(m_on_deleted_lock);
- for (auto& it : m_on_deleted)
- it->vmobject_deleted(*this);
- m_on_deleted.clear();
- }
-
- MM.unregister_vmobject(*this);
- VERIFY(m_regions.is_empty());
-}
-
-}
diff --git a/Kernel/VM/VMObject.h b/Kernel/VM/VMObject.h
deleted file mode 100644
index 45a5976af8..0000000000
--- a/Kernel/VM/VMObject.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
- *
- * SPDX-License-Identifier: BSD-2-Clause
- */
-
-#pragma once
-
-#include <AK/FixedArray.h>
-#include <AK/HashTable.h>
-#include <AK/IntrusiveList.h>
-#include <AK/RefCounted.h>
-#include <AK/RefPtr.h>
-#include <AK/Vector.h>
-#include <AK/Weakable.h>
-#include <Kernel/Forward.h>
-#include <Kernel/Mutex.h>
-#include <Kernel/VM/Region.h>
-
-namespace Kernel {
-
-class VMObjectDeletedHandler {
-public:
- virtual ~VMObjectDeletedHandler() = default;
- virtual void vmobject_deleted(VMObject&) = 0;
-};
-
-class VMObject : public RefCounted<VMObject>
- , public Weakable<VMObject> {
- friend class MemoryManager;
- friend class Region;
-
-public:
- virtual ~VMObject();
-
- virtual RefPtr<VMObject> try_clone() = 0;
-
- virtual bool is_anonymous() const { return false; }
- virtual bool is_inode() const { return false; }
- virtual bool is_shared_inode() const { return false; }
- virtual bool is_private_inode() const { return false; }
- virtual bool is_contiguous() const { return false; }
-
- size_t page_count() const { return m_physical_pages.size(); }
- Span<RefPtr<PhysicalPage> const> physical_pages() const { return m_physical_pages.span(); }
- Span<RefPtr<PhysicalPage>> physical_pages() { return m_physical_pages.span(); }
-
- size_t size() const { return m_physical_pages.size() * PAGE_SIZE; }
-
- virtual StringView class_name() const = 0;
-
- ALWAYS_INLINE void add_region(Region& region)
- {
- ScopedSpinLock locker(m_lock);
- m_regions.append(region);
- }
-
- ALWAYS_INLINE void remove_region(Region& region)
- {
- ScopedSpinLock locker(m_lock);
- m_regions.remove(region);
- }
-
- void register_on_deleted_handler(VMObjectDeletedHandler& handler)
- {
- ScopedSpinLock locker(m_on_deleted_lock);
- m_on_deleted.set(&handler);
- }
- void unregister_on_deleted_handler(VMObjectDeletedHandler& handler)
- {
- ScopedSpinLock locker(m_on_deleted_lock);
- m_on_deleted.remove(&handler);
- }
-
-protected:
- explicit VMObject(size_t);
- explicit VMObject(VMObject const&);
-
- template<typename Callback>
- void for_each_region(Callback);
-
- IntrusiveListNode<VMObject> m_list_node;
- FixedArray<RefPtr<PhysicalPage>> m_physical_pages;
-
- mutable RecursiveSpinLock m_lock;
-
-private:
- VMObject& operator=(VMObject const&) = delete;
- VMObject& operator=(VMObject&&) = delete;
- VMObject(VMObject&&) = delete;
-
- HashTable<VMObjectDeletedHandler*> m_on_deleted;
- SpinLock<u8> m_on_deleted_lock;
-
- Region::ListInVMObject m_regions;
-
-public:
- using List = IntrusiveList<VMObject, RawPtr<VMObject>, &VMObject::m_list_node>;
-};
-
-template<typename Callback>
-inline void VMObject::for_each_region(Callback callback)
-{
- ScopedSpinLock lock(m_lock);
- for (auto& region : m_regions) {
- callback(region);
- }
-}
-
-inline PhysicalPage const* Region::physical_page(size_t index) const
-{
- VERIFY(index < page_count());
- return vmobject().physical_pages()[first_page_index() + index];
-}
-
-inline RefPtr<PhysicalPage>& Region::physical_page_slot(size_t index)
-{
- VERIFY(index < page_count());
- return vmobject().physical_pages()[first_page_index() + index];
-}
-
-}