diff options
author | Andreas Kling <kling@serenityos.org> | 2021-08-06 13:54:48 +0200 |
---|---|---|
committer | Andreas Kling <kling@serenityos.org> | 2021-08-06 14:05:58 +0200 |
commit | cd5faf4e425443730695cb5d2e04906fe8bfb691 (patch) | |
tree | deacbbef0959fc42d5bce3d61a0ea4bdd42dc1c8 /Kernel | |
parent | 93d98d49769de22695f8cb4c96c5ad6f7ac39d83 (diff) | |
download | serenity-cd5faf4e425443730695cb5d2e04906fe8bfb691.zip |
Kernel: Rename Range => VirtualRange
...and also RangeAllocator => VirtualRangeAllocator.
This clarifies that the ranges we're dealing with are *virtual* memory
ranges and not anything else.
Diffstat (limited to 'Kernel')
39 files changed, 207 insertions, 207 deletions
diff --git a/Kernel/CMakeLists.txt b/Kernel/CMakeLists.txt index b41596d8ee..6e37718508 100644 --- a/Kernel/CMakeLists.txt +++ b/Kernel/CMakeLists.txt @@ -140,14 +140,14 @@ set(KERNEL_SOURCES Memory/PhysicalZone.cpp Memory/PrivateInodeVMObject.cpp Memory/ProcessPagingScope.cpp - Memory/Range.cpp - Memory/RangeAllocator.cpp Memory/Region.cpp Memory/RingBuffer.cpp Memory/ScatterGatherList.cpp Memory/SharedInodeVMObject.cpp Memory/Space.cpp Memory/VMObject.cpp + Memory/VirtualRange.cpp + Memory/VirtualRangeAllocator.cpp MiniStdLib.cpp Mutex.cpp Net/E1000ENetworkAdapter.cpp diff --git a/Kernel/Devices/KCOVDevice.cpp b/Kernel/Devices/KCOVDevice.cpp index 1b69f02960..063f810b61 100644 --- a/Kernel/Devices/KCOVDevice.cpp +++ b/Kernel/Devices/KCOVDevice.cpp @@ -129,7 +129,7 @@ KResult KCOVDevice::ioctl(FileDescription&, unsigned request, Userspace<void*> a return return_value; } -KResultOr<Memory::Region*> KCOVDevice::mmap(Process& process, FileDescription&, Memory::Range const& range, u64 offset, int prot, bool shared) +KResultOr<Memory::Region*> KCOVDevice::mmap(Process& process, FileDescription&, Memory::VirtualRange const& range, u64 offset, int prot, bool shared) { auto pid = process.pid(); auto maybe_kcov_instance = proc_instance->get(pid); diff --git a/Kernel/Devices/KCOVDevice.h b/Kernel/Devices/KCOVDevice.h index ba2dd92bec..f2492797c7 100644 --- a/Kernel/Devices/KCOVDevice.h +++ b/Kernel/Devices/KCOVDevice.h @@ -22,7 +22,7 @@ public: static void free_process(); // ^File - KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::Range const&, u64 offset, int prot, bool shared) override; + KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::VirtualRange const&, u64 offset, int prot, bool shared) override; KResultOr<NonnullRefPtr<FileDescription>> open(int options) override; // ^Device diff --git a/Kernel/Devices/MemoryDevice.cpp b/Kernel/Devices/MemoryDevice.cpp index 8ce29f1345..0dce9e87d6 100644 --- a/Kernel/Devices/MemoryDevice.cpp +++ b/Kernel/Devices/MemoryDevice.cpp @@ -37,7 +37,7 @@ void MemoryDevice::did_seek(FileDescription&, off_t) TODO(); } -KResultOr<Memory::Region*> MemoryDevice::mmap(Process& process, FileDescription&, Memory::Range const& range, u64 offset, int prot, bool shared) +KResultOr<Memory::Region*> MemoryDevice::mmap(Process& process, FileDescription&, Memory::VirtualRange const& range, u64 offset, int prot, bool shared) { auto viewed_address = PhysicalAddress(offset); diff --git a/Kernel/Devices/MemoryDevice.h b/Kernel/Devices/MemoryDevice.h index 366453599c..e688390600 100644 --- a/Kernel/Devices/MemoryDevice.h +++ b/Kernel/Devices/MemoryDevice.h @@ -19,7 +19,7 @@ public: static NonnullRefPtr<MemoryDevice> must_create(); ~MemoryDevice(); - virtual KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::Range const&, u64 offset, int prot, bool shared) override; + virtual KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::VirtualRange const&, u64 offset, int prot, bool shared) override; // ^Device virtual mode_t required_mode() const override { return 0660; } @@ -36,7 +36,7 @@ private: virtual void did_seek(FileDescription&, off_t) override; - bool is_allowed_range(PhysicalAddress, Memory::Range const&) const; + bool is_allowed_range(PhysicalAddress, Memory::VirtualRange const&) const; }; } diff --git a/Kernel/FileSystem/AnonymousFile.cpp b/Kernel/FileSystem/AnonymousFile.cpp index e21c329832..066c129392 100644 --- a/Kernel/FileSystem/AnonymousFile.cpp +++ b/Kernel/FileSystem/AnonymousFile.cpp @@ -19,7 +19,7 @@ AnonymousFile::~AnonymousFile() { } -KResultOr<Memory::Region*> AnonymousFile::mmap(Process& process, FileDescription&, Memory::Range const& range, u64 offset, int prot, bool shared) +KResultOr<Memory::Region*> AnonymousFile::mmap(Process& process, FileDescription&, Memory::VirtualRange const& range, u64 offset, int prot, bool shared) { if (offset != 0) return EINVAL; diff --git a/Kernel/FileSystem/AnonymousFile.h b/Kernel/FileSystem/AnonymousFile.h index e6b25fc15e..7b275b4771 100644 --- a/Kernel/FileSystem/AnonymousFile.h +++ b/Kernel/FileSystem/AnonymousFile.h @@ -20,7 +20,7 @@ public: virtual ~AnonymousFile() override; - virtual KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::Range const&, u64 offset, int prot, bool shared) override; + virtual KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::VirtualRange const&, u64 offset, int prot, bool shared) override; private: virtual StringView class_name() const override { return "AnonymousFile"; } diff --git a/Kernel/FileSystem/File.cpp b/Kernel/FileSystem/File.cpp index 07226edbcf..0551f1a390 100644 --- a/Kernel/FileSystem/File.cpp +++ b/Kernel/FileSystem/File.cpp @@ -40,7 +40,7 @@ KResult File::ioctl(FileDescription&, unsigned, Userspace<void*>) return ENOTTY; } -KResultOr<Memory::Region*> File::mmap(Process&, FileDescription&, Memory::Range const&, u64, int, bool) +KResultOr<Memory::Region*> File::mmap(Process&, FileDescription&, Memory::VirtualRange const&, u64, int, bool) { return ENODEV; } diff --git a/Kernel/FileSystem/File.h b/Kernel/FileSystem/File.h index 0e719f291f..cd29de6e5c 100644 --- a/Kernel/FileSystem/File.h +++ b/Kernel/FileSystem/File.h @@ -88,7 +88,7 @@ public: virtual KResultOr<size_t> read(FileDescription&, u64, UserOrKernelBuffer&, size_t) = 0; virtual KResultOr<size_t> write(FileDescription&, u64, const UserOrKernelBuffer&, size_t) = 0; virtual KResult ioctl(FileDescription&, unsigned request, Userspace<void*> arg); - virtual KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::Range const&, u64 offset, int prot, bool shared); + virtual KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::VirtualRange const&, u64 offset, int prot, bool shared); virtual KResult stat(::stat&) const { return EBADF; } virtual String absolute_path(const FileDescription&) const = 0; diff --git a/Kernel/FileSystem/FileDescription.cpp b/Kernel/FileSystem/FileDescription.cpp index 19822a8309..a569c6186c 100644 --- a/Kernel/FileSystem/FileDescription.cpp +++ b/Kernel/FileSystem/FileDescription.cpp @@ -380,7 +380,7 @@ InodeMetadata FileDescription::metadata() const return {}; } -KResultOr<Memory::Region*> FileDescription::mmap(Process& process, Memory::Range const& range, u64 offset, int prot, bool shared) +KResultOr<Memory::Region*> FileDescription::mmap(Process& process, Memory::VirtualRange const& range, u64 offset, int prot, bool shared) { MutexLocker locker(m_lock); return m_file->mmap(process, *this, range, offset, prot, shared); diff --git a/Kernel/FileSystem/FileDescription.h b/Kernel/FileSystem/FileDescription.h index e1a05d293d..b91121a9ed 100644 --- a/Kernel/FileSystem/FileDescription.h +++ b/Kernel/FileSystem/FileDescription.h @@ -96,7 +96,7 @@ public: Custody* custody() { return m_custody.ptr(); } const Custody* custody() const { return m_custody.ptr(); } - KResultOr<Memory::Region*> mmap(Process&, Memory::Range const&, u64 offset, int prot, bool shared); + KResultOr<Memory::Region*> mmap(Process&, Memory::VirtualRange const&, u64 offset, int prot, bool shared); bool is_blocking() const { return m_is_blocking; } void set_blocking(bool b) { m_is_blocking = b; } diff --git a/Kernel/FileSystem/InodeFile.cpp b/Kernel/FileSystem/InodeFile.cpp index 9b40bc730d..65b3a35efe 100644 --- a/Kernel/FileSystem/InodeFile.cpp +++ b/Kernel/FileSystem/InodeFile.cpp @@ -93,7 +93,7 @@ KResult InodeFile::ioctl(FileDescription& description, unsigned request, Userspa } } -KResultOr<Memory::Region*> InodeFile::mmap(Process& process, FileDescription& description, Memory::Range const& range, u64 offset, int prot, bool shared) +KResultOr<Memory::Region*> InodeFile::mmap(Process& process, FileDescription& description, Memory::VirtualRange const& range, u64 offset, int prot, bool shared) { // FIXME: If PROT_EXEC, check that the underlying file system isn't mounted noexec. RefPtr<Memory::InodeVMObject> vmobject; diff --git a/Kernel/FileSystem/InodeFile.h b/Kernel/FileSystem/InodeFile.h index 2a20673593..d60cc1584d 100644 --- a/Kernel/FileSystem/InodeFile.h +++ b/Kernel/FileSystem/InodeFile.h @@ -33,7 +33,7 @@ public: virtual KResultOr<size_t> read(FileDescription&, u64, UserOrKernelBuffer&, size_t) override; virtual KResultOr<size_t> write(FileDescription&, u64, const UserOrKernelBuffer&, size_t) override; virtual KResult ioctl(FileDescription&, unsigned request, Userspace<void*> arg) override; - virtual KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::Range const&, u64 offset, int prot, bool shared) override; + virtual KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::VirtualRange const&, u64 offset, int prot, bool shared) override; virtual KResult stat(::stat& buffer) const override { return inode().metadata().stat(buffer); } virtual String absolute_path(const FileDescription&) const override; diff --git a/Kernel/Forward.h b/Kernel/Forward.h index e7047343e8..c44087389b 100644 --- a/Kernel/Forward.h +++ b/Kernel/Forward.h @@ -74,8 +74,8 @@ class PageDirectory; class PhysicalPage; class PhysicalRegion; class PrivateInodeVMObject; -class Range; -class RangeAllocator; +class VirtualRange; +class VirtualRangeAllocator; class Region; class SharedInodeVMObject; class Space; diff --git a/Kernel/Graphics/FramebufferDevice.cpp b/Kernel/Graphics/FramebufferDevice.cpp index e9e7def17a..2e62deaf32 100644 --- a/Kernel/Graphics/FramebufferDevice.cpp +++ b/Kernel/Graphics/FramebufferDevice.cpp @@ -25,7 +25,7 @@ NonnullRefPtr<FramebufferDevice> FramebufferDevice::create(const GraphicsDevice& return adopt_ref(*new FramebufferDevice(adapter, output_port_index, paddr, width, height, pitch)); } -KResultOr<Memory::Region*> FramebufferDevice::mmap(Process& process, FileDescription&, Memory::Range const& range, u64 offset, int prot, bool shared) +KResultOr<Memory::Region*> FramebufferDevice::mmap(Process& process, FileDescription&, Memory::VirtualRange const& range, u64 offset, int prot, bool shared) { ScopedSpinLock lock(m_activation_lock); REQUIRE_PROMISE(video); diff --git a/Kernel/Graphics/FramebufferDevice.h b/Kernel/Graphics/FramebufferDevice.h index 26a39da693..9e013502ef 100644 --- a/Kernel/Graphics/FramebufferDevice.h +++ b/Kernel/Graphics/FramebufferDevice.h @@ -23,7 +23,7 @@ public: static NonnullRefPtr<FramebufferDevice> create(const GraphicsDevice&, size_t, PhysicalAddress, size_t, size_t, size_t); virtual KResult ioctl(FileDescription&, unsigned request, Userspace<void*> arg) override; - virtual KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::Range const&, u64 offset, int prot, bool shared) override; + virtual KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::VirtualRange const&, u64 offset, int prot, bool shared) override; // ^Device virtual mode_t required_mode() const override { return 0660; } diff --git a/Kernel/Graphics/VirtIOGPU/FrameBufferDevice.cpp b/Kernel/Graphics/VirtIOGPU/FrameBufferDevice.cpp index df8f8ea94b..e8e51c9e0f 100644 --- a/Kernel/Graphics/VirtIOGPU/FrameBufferDevice.cpp +++ b/Kernel/Graphics/VirtIOGPU/FrameBufferDevice.cpp @@ -241,7 +241,7 @@ KResult FrameBufferDevice::ioctl(FileDescription&, unsigned request, Userspace<v }; } -KResultOr<Memory::Region*> FrameBufferDevice::mmap(Process& process, FileDescription&, Memory::Range const& range, u64 offset, int prot, bool shared) +KResultOr<Memory::Region*> FrameBufferDevice::mmap(Process& process, FileDescription&, Memory::VirtualRange const& range, u64 offset, int prot, bool shared) { REQUIRE_PROMISE(video); if (!shared) diff --git a/Kernel/Graphics/VirtIOGPU/FrameBufferDevice.h b/Kernel/Graphics/VirtIOGPU/FrameBufferDevice.h index efbc98aff6..f972a65c6b 100644 --- a/Kernel/Graphics/VirtIOGPU/FrameBufferDevice.h +++ b/Kernel/Graphics/VirtIOGPU/FrameBufferDevice.h @@ -61,7 +61,7 @@ private: void set_buffer(int); virtual KResult ioctl(FileDescription&, unsigned request, Userspace<void*> arg) override; - virtual KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::Range const&, u64 offset, int prot, bool shared) override; + virtual KResultOr<Memory::Region*> mmap(Process&, FileDescription&, Memory::VirtualRange const&, u64 offset, int prot, bool shared) override; virtual bool can_read(const FileDescription&, size_t) const override { return true; } virtual KResultOr<size_t> read(FileDescription&, u64, UserOrKernelBuffer&, size_t) override { return EINVAL; } virtual bool can_write(const FileDescription&, size_t) const override { return true; } diff --git a/Kernel/Memory/MemoryManager.cpp b/Kernel/Memory/MemoryManager.cpp index 58fc808b7d..95c61f9e25 100644 --- a/Kernel/Memory/MemoryManager.cpp +++ b/Kernel/Memory/MemoryManager.cpp @@ -154,12 +154,12 @@ void MemoryManager::unmap_ksyms_after_init() UNMAP_AFTER_INIT void MemoryManager::register_reserved_ranges() { VERIFY(!m_physical_memory_ranges.is_empty()); - ContiguousReservedMemoryRange range; + ContiguousReservedMemoryVirtualRange range; for (auto& current_range : m_physical_memory_ranges) { - if (current_range.type != PhysicalMemoryRangeType::Reserved) { + if (current_range.type != PhysicalMemoryVirtualRangeType::Reserved) { if (range.start.is_null()) continue; - m_reserved_memory_ranges.append(ContiguousReservedMemoryRange { range.start, current_range.start.get() - range.start.get() }); + m_reserved_memory_ranges.append(ContiguousReservedMemoryVirtualRange { range.start, current_range.start.get() - range.start.get() }); range.start.set((FlatPtr) nullptr); continue; } @@ -168,14 +168,14 @@ UNMAP_AFTER_INIT void MemoryManager::register_reserved_ranges() } range.start = current_range.start; } - if (m_physical_memory_ranges.last().type != PhysicalMemoryRangeType::Reserved) + if (m_physical_memory_ranges.last().type != PhysicalMemoryVirtualRangeType::Reserved) return; if (range.start.is_null()) return; - m_reserved_memory_ranges.append(ContiguousReservedMemoryRange { range.start, m_physical_memory_ranges.last().start.get() + m_physical_memory_ranges.last().length - range.start.get() }); + m_reserved_memory_ranges.append(ContiguousReservedMemoryVirtualRange { range.start, m_physical_memory_ranges.last().start.get() + m_physical_memory_ranges.last().length - range.start.get() }); } -bool MemoryManager::is_allowed_to_mmap_to_userspace(PhysicalAddress start_address, Range const& range) const +bool MemoryManager::is_allowed_to_mmap_to_userspace(PhysicalAddress start_address, VirtualRange const& range) const { VERIFY(!m_reserved_memory_ranges.is_empty()); for (auto& current_range : m_reserved_memory_ranges) { @@ -194,28 +194,28 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map() { // Register used memory regions that we know of. m_used_memory_ranges.ensure_capacity(4); - m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::LowMemory, PhysicalAddress(0x00000000), PhysicalAddress(1 * MiB) }); - m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Prekernel, start_of_prekernel_image, end_of_prekernel_image }); - m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Kernel, PhysicalAddress(virtual_to_low_physical((FlatPtr)start_of_kernel_image)), PhysicalAddress(page_round_up(virtual_to_low_physical((FlatPtr)end_of_kernel_image))) }); + m_used_memory_ranges.append(UsedMemoryVirtualRange { UsedMemoryVirtualRangeType::LowMemory, PhysicalAddress(0x00000000), PhysicalAddress(1 * MiB) }); + m_used_memory_ranges.append(UsedMemoryVirtualRange { UsedMemoryVirtualRangeType::Prekernel, start_of_prekernel_image, end_of_prekernel_image }); + m_used_memory_ranges.append(UsedMemoryVirtualRange { UsedMemoryVirtualRangeType::Kernel, PhysicalAddress(virtual_to_low_physical((FlatPtr)start_of_kernel_image)), PhysicalAddress(page_round_up(virtual_to_low_physical((FlatPtr)end_of_kernel_image))) }); if (multiboot_flags & 0x4) { auto* bootmods_start = multiboot_copy_boot_modules_array; auto* bootmods_end = bootmods_start + multiboot_copy_boot_modules_count; for (auto* bootmod = bootmods_start; bootmod < bootmods_end; bootmod++) { - m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::BootModule, PhysicalAddress(bootmod->start), PhysicalAddress(bootmod->end) }); + m_used_memory_ranges.append(UsedMemoryVirtualRange { UsedMemoryVirtualRangeType::BootModule, PhysicalAddress(bootmod->start), PhysicalAddress(bootmod->end) }); } } auto* mmap_begin = multiboot_memory_map; auto* mmap_end = multiboot_memory_map + multiboot_memory_map_count; - struct ContiguousPhysicalRange { + struct ContiguousPhysicalVirtualRange { PhysicalAddress lower; PhysicalAddress upper; }; - Vector<ContiguousPhysicalRange> contiguous_physical_ranges; + Vector<ContiguousPhysicalVirtualRange> contiguous_physical_ranges; for (auto* mmap = mmap_begin; mmap < mmap_end; mmap++) { dmesgln("MM: Multiboot mmap: address={:p}, length={}, type={}", mmap->addr, mmap->len, mmap->type); @@ -224,24 +224,24 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map() auto length = mmap->len; switch (mmap->type) { case (MULTIBOOT_MEMORY_AVAILABLE): - m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Usable, start_address, length }); + m_physical_memory_ranges.append(PhysicalMemoryVirtualRange { PhysicalMemoryVirtualRangeType::Usable, start_address, length }); break; case (MULTIBOOT_MEMORY_RESERVED): - m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Reserved, start_address, length }); + m_physical_memory_ranges.append(PhysicalMemoryVirtualRange { PhysicalMemoryVirtualRangeType::Reserved, start_address, length }); break; case (MULTIBOOT_MEMORY_ACPI_RECLAIMABLE): - m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::ACPI_Reclaimable, start_address, length }); + m_physical_memory_ranges.append(PhysicalMemoryVirtualRange { PhysicalMemoryVirtualRangeType::ACPI_Reclaimable, start_address, length }); break; case (MULTIBOOT_MEMORY_NVS): - m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::ACPI_NVS, start_address, length }); + m_physical_memory_ranges.append(PhysicalMemoryVirtualRange { PhysicalMemoryVirtualRangeType::ACPI_NVS, start_address, length }); break; case (MULTIBOOT_MEMORY_BADRAM): dmesgln("MM: Warning, detected bad memory range!"); - m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::BadMemory, start_address, length }); + m_physical_memory_ranges.append(PhysicalMemoryVirtualRange { PhysicalMemoryVirtualRangeType::BadMemory, start_address, length }); break; default: dbgln("MM: Unknown range!"); - m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Unknown, start_address, length }); + m_physical_memory_ranges.append(PhysicalMemoryVirtualRange { PhysicalMemoryVirtualRangeType::Unknown, start_address, length }); break; } @@ -280,7 +280,7 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map() continue; if (contiguous_physical_ranges.is_empty() || contiguous_physical_ranges.last().upper.offset(PAGE_SIZE) != addr) { - contiguous_physical_ranges.append(ContiguousPhysicalRange { + contiguous_physical_ranges.append(ContiguousPhysicalVirtualRange { .lower = addr, .upper = addr, }); @@ -322,7 +322,7 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map() m_system_memory_info.user_physical_pages_uncommitted = m_system_memory_info.user_physical_pages; for (auto& used_range : m_used_memory_ranges) { - dmesgln("MM: {} range @ {} - {} (size {:#x})", UserMemoryRangeTypeNames[to_underlying(used_range.type)], used_range.start, used_range.end.offset(-1), used_range.end.as_ptr() - used_range.start.as_ptr()); + dmesgln("MM: {} range @ {} - {} (size {:#x})", UserMemoryVirtualRangeTypeNames[to_underlying(used_range.type)], used_range.start, used_range.end.offset(-1), used_range.end.as_ptr() - used_range.start.as_ptr()); } dmesgln("MM: Super physical region: {} - {} (size {:#x})", m_super_physical_region->lower(), m_super_physical_region->upper().offset(-1), PAGE_SIZE * m_super_physical_region->size()); @@ -389,7 +389,7 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages() } else { m_physical_pages_region = found_region->try_take_pages_from_beginning(physical_page_array_pages_and_page_tables_count); } - m_used_memory_ranges.append({ UsedMemoryRangeType::PhysicalPages, m_physical_pages_region->lower(), m_physical_pages_region->upper() }); + m_used_memory_ranges.append({ UsedMemoryVirtualRangeType::PhysicalPages, m_physical_pages_region->lower(), m_physical_pages_region->upper() }); // Create the bare page directory. This is not a fully constructed page directory and merely contains the allocators! m_kernel_page_directory = PageDirectory::must_create_kernel_page_directory(); @@ -746,7 +746,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_identity(PhysicalAddress pa return allocate_kernel_region_with_vmobject(range.value(), *vm_object, name, access, cacheable); } -OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(Range const& range, VMObject& vmobject, StringView name, Region::Access access, Region::Cacheable cacheable) +OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VirtualRange const& range, VMObject& vmobject, StringView name, Region::Access access, Region::Cacheable cacheable) { ScopedSpinLock lock(s_mm_lock); auto region = Region::try_create_kernel_only(range, vmobject, 0, KString::try_create(name), access, cacheable); diff --git a/Kernel/Memory/MemoryManager.h b/Kernel/Memory/MemoryManager.h index 1ac00f5812..96a209852b 100644 --- a/Kernel/Memory/MemoryManager.h +++ b/Kernel/Memory/MemoryManager.h @@ -46,7 +46,7 @@ inline FlatPtr virtual_to_low_physical(FlatPtr virtual_) return virtual_ - physical_to_virtual_offset; } -enum class UsedMemoryRangeType { +enum class UsedMemoryVirtualRangeType { LowMemory = 0, Prekernel, Kernel, @@ -54,7 +54,7 @@ enum class UsedMemoryRangeType { PhysicalPages, }; -static constexpr StringView UserMemoryRangeTypeNames[] { +static constexpr StringView UserMemoryVirtualRangeTypeNames[] { "Low memory", "Prekernel", "Kernel", @@ -62,18 +62,18 @@ static constexpr StringView UserMemoryRangeTypeNames[] { "Physical Pages" }; -struct UsedMemoryRange { - UsedMemoryRangeType type {}; +struct UsedMemoryVirtualRange { + UsedMemoryVirtualRangeType type {}; PhysicalAddress start; PhysicalAddress end; }; -struct ContiguousReservedMemoryRange { +struct ContiguousReservedMemoryVirtualRange { PhysicalAddress start; PhysicalSize length {}; }; -enum class PhysicalMemoryRangeType { +enum class PhysicalMemoryVirtualRangeType { Usable = 0, Reserved, ACPI_Reclaimable, @@ -82,8 +82,8 @@ enum class PhysicalMemoryRangeType { Unknown, }; -struct PhysicalMemoryRange { - PhysicalMemoryRangeType type { PhysicalMemoryRangeType::Unknown }; +struct PhysicalMemoryVirtualRange { + PhysicalMemoryVirtualRangeType type { PhysicalMemoryVirtualRangeType::Unknown }; PhysicalAddress start; PhysicalSize length {}; }; @@ -185,7 +185,7 @@ public: OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); OwnPtr<Region> allocate_kernel_region_identity(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); - OwnPtr<Region> allocate_kernel_region_with_vmobject(Range const&, VMObject&, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); + OwnPtr<Region> allocate_kernel_region_with_vmobject(VirtualRange const&, VMObject&, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); struct SystemMemoryInfo { PhysicalSize user_physical_pages { 0 }; @@ -230,8 +230,8 @@ public: PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; } - Vector<UsedMemoryRange> const& used_memory_ranges() { return m_used_memory_ranges; } - bool is_allowed_to_mmap_to_userspace(PhysicalAddress, Range const&) const; + Vector<UsedMemoryVirtualRange> const& used_memory_ranges() { return m_used_memory_ranges; } + bool is_allowed_to_mmap_to_userspace(PhysicalAddress, VirtualRange const&) const; PhysicalPageEntry& get_physical_page_entry(PhysicalAddress); PhysicalAddress get_physical_address(PhysicalPage const&); @@ -288,9 +288,9 @@ private: Region::ListInMemoryManager m_user_regions; Region::ListInMemoryManager m_kernel_regions; - Vector<UsedMemoryRange> m_used_memory_ranges; - Vector<PhysicalMemoryRange> m_physical_memory_ranges; - Vector<ContiguousReservedMemoryRange> m_reserved_memory_ranges; + Vector<UsedMemoryVirtualRange> m_used_memory_ranges; + Vector<PhysicalMemoryVirtualRange> m_physical_memory_ranges; + Vector<ContiguousReservedMemoryVirtualRange> m_reserved_memory_ranges; VMObject::List m_vmobjects; }; @@ -307,7 +307,7 @@ inline bool is_user_range(VirtualAddress vaddr, size_t size) return is_user_address(vaddr) && is_user_address(vaddr.offset(size)); } -inline bool is_user_range(Range const& range) +inline bool is_user_range(VirtualRange const& range) { return is_user_range(range.base(), range.size()); } diff --git a/Kernel/Memory/PageDirectory.cpp b/Kernel/Memory/PageDirectory.cpp index cf317ac5e0..6aca487b3e 100644 --- a/Kernel/Memory/PageDirectory.cpp +++ b/Kernel/Memory/PageDirectory.cpp @@ -43,7 +43,7 @@ UNMAP_AFTER_INIT NonnullRefPtr<PageDirectory> PageDirectory::must_create_kernel_ return directory; } -RefPtr<PageDirectory> PageDirectory::try_create_for_userspace(RangeAllocator const* parent_range_allocator) +RefPtr<PageDirectory> PageDirectory::try_create_for_userspace(VirtualRangeAllocator const* parent_range_allocator) { constexpr FlatPtr userspace_range_base = 0x00800000; FlatPtr const userspace_range_ceiling = USER_RANGE_CEILING; diff --git a/Kernel/Memory/PageDirectory.h b/Kernel/Memory/PageDirectory.h index 6963681046..9141fc4329 100644 --- a/Kernel/Memory/PageDirectory.h +++ b/Kernel/Memory/PageDirectory.h @@ -11,7 +11,7 @@ #include <AK/RefPtr.h> #include <Kernel/Forward.h> #include <Kernel/Memory/PhysicalPage.h> -#include <Kernel/Memory/RangeAllocator.h> +#include <Kernel/Memory/VirtualRangeAllocator.h> namespace Kernel::Memory { @@ -19,7 +19,7 @@ class PageDirectory : public RefCounted<PageDirectory> { friend class MemoryManager; public: - static RefPtr<PageDirectory> try_create_for_userspace(RangeAllocator const* parent_range_allocator = nullptr); + static RefPtr<PageDirectory> try_create_for_userspace(VirtualRangeAllocator const* parent_range_allocator = nullptr); static NonnullRefPtr<PageDirectory> must_create_kernel_page_directory(); static RefPtr<PageDirectory> find_by_cr3(FlatPtr); @@ -36,10 +36,10 @@ public: #endif } - RangeAllocator& range_allocator() { return m_range_allocator; } - const RangeAllocator& range_allocator() const { return m_range_allocator; } + VirtualRangeAllocator& range_allocator() { return m_range_allocator; } + VirtualRangeAllocator const& range_allocator() const { return m_range_allocator; } - RangeAllocator& identity_range_allocator() { return m_identity_range_allocator; } + VirtualRangeAllocator& identity_range_allocator() { return m_identity_range_allocator; } Space* space() { return m_space; } const Space* space() const { return m_space; } @@ -52,8 +52,8 @@ private: PageDirectory(); Space* m_space { nullptr }; - RangeAllocator m_range_allocator; - RangeAllocator m_identity_range_allocator; + VirtualRangeAllocator m_range_allocator; + VirtualRangeAllocator m_identity_range_allocator; #if ARCH(X86_64) RefPtr<PhysicalPage> m_pml4t; #endif diff --git a/Kernel/Memory/RangeAllocator.h b/Kernel/Memory/RangeAllocator.h deleted file mode 100644 index cc6bcf36bb..0000000000 --- a/Kernel/Memory/RangeAllocator.h +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org> - * - * SPDX-License-Identifier: BSD-2-Clause - */ - -#pragma once - -#include <AK/RedBlackTree.h> -#include <AK/Traits.h> -#include <Kernel/Memory/Range.h> -#include <Kernel/SpinLock.h> - -namespace Kernel::Memory { - -class RangeAllocator { -public: - RangeAllocator(); - ~RangeAllocator() = default; - - void initialize_with_range(VirtualAddress, size_t); - void initialize_from_parent(RangeAllocator const&); - - Optional<Range> allocate_anywhere(size_t, size_t alignment = PAGE_SIZE); - Optional<Range> allocate_specific(VirtualAddress, size_t); - Optional<Range> allocate_randomized(size_t, size_t alignment); - void deallocate(Range const&); - - void dump() const; - - bool contains(Range const& range) const { return m_total_range.contains(range); } - -private: - void carve_at_iterator(auto&, Range const&); - - RedBlackTree<FlatPtr, Range> m_available_ranges; - Range m_total_range; - mutable SpinLock<u8> m_lock; -}; - -} - -namespace AK { -template<> -struct Traits<Kernel::Memory::Range> : public GenericTraits<Kernel::Memory::Range> { - static constexpr bool is_trivial() { return true; } -}; -} diff --git a/Kernel/Memory/Region.cpp b/Kernel/Memory/Region.cpp index f080d5b30e..07f16d7a93 100644 --- a/Kernel/Memory/Region.cpp +++ b/Kernel/Memory/Region.cpp @@ -19,7 +19,7 @@ namespace Kernel::Memory { -Region::Region(Range const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared) +Region::Region(VirtualRange const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared) : m_range(range) , m_offset_in_vmobject(offset_in_vmobject) , m_vmobject(move(vmobject)) @@ -41,11 +41,11 @@ Region::~Region() m_vmobject->remove_region(*this); // Make sure we disable interrupts so we don't get interrupted between unmapping and unregistering. - // Unmapping the region will give the VM back to the RangeAllocator, so an interrupt handler would + // Unmapping the region will give the VM back to the VirtualRangeAllocator, so an interrupt handler would // find the address<->region mappings in an invalid state there. ScopedSpinLock lock(s_mm_lock); if (m_page_directory) { - unmap(ShouldDeallocateVirtualMemoryRange::Yes); + unmap(ShouldDeallocateVirtualMemoryVirtualRange::Yes); VERIFY(!m_page_directory); } @@ -147,7 +147,7 @@ size_t Region::amount_shared() const return bytes; } -OwnPtr<Region> Region::try_create_user_accessible(Range const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared) +OwnPtr<Region> Region::try_create_user_accessible(VirtualRange const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared) { auto region = adopt_own_if_nonnull(new (nothrow) Region(range, move(vmobject), offset_in_vmobject, move(name), access, cacheable, shared)); if (!region) @@ -155,7 +155,7 @@ OwnPtr<Region> Region::try_create_user_accessible(Range const& range, NonnullRef return region; } -OwnPtr<Region> Region::try_create_kernel_only(Range const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable) +OwnPtr<Region> Region::try_create_kernel_only(VirtualRange const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable) { return adopt_own_if_nonnull(new (nothrow) Region(range, move(vmobject), offset_in_vmobject, move(name), access, cacheable, false)); } @@ -234,7 +234,7 @@ bool Region::remap_vmobject_page(size_t page_index, bool with_flush) return success; } -void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range) +void Region::unmap(ShouldDeallocateVirtualMemoryVirtualRange deallocate_range) { ScopedSpinLock lock(s_mm_lock); if (!m_page_directory) @@ -246,7 +246,7 @@ void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range) MM.release_pte(*m_page_directory, vaddr, i == count - 1); } MM.flush_tlb(m_page_directory, vaddr(), page_count()); - if (deallocate_range == ShouldDeallocateVirtualMemoryRange::Yes) { + if (deallocate_range == ShouldDeallocateVirtualMemoryVirtualRange::Yes) { if (m_page_directory->range_allocator().contains(range())) m_page_directory->range_allocator().deallocate(range()); else diff --git a/Kernel/Memory/Region.h b/Kernel/Memory/Region.h index f4d1c7b300..4cad527889 100644 --- a/Kernel/Memory/Region.h +++ b/Kernel/Memory/Region.h @@ -14,7 +14,7 @@ #include <Kernel/Heap/SlabAllocator.h> #include <Kernel/KString.h> #include <Kernel/Memory/PageFaultResponse.h> -#include <Kernel/Memory/RangeAllocator.h> +#include <Kernel/Memory/VirtualRangeAllocator.h> #include <Kernel/Sections.h> #include <Kernel/UnixTypes.h> @@ -46,12 +46,12 @@ public: Yes, }; - static OwnPtr<Region> try_create_user_accessible(Range const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable, bool shared); - static OwnPtr<Region> try_create_kernel_only(Range const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable = Cacheable::Yes); + static OwnPtr<Region> try_create_user_accessible(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable, bool shared); + static OwnPtr<Region> try_create_kernel_only(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable = Cacheable::Yes); ~Region(); - Range const& range() const { return m_range; } + VirtualRange const& range() const { return m_range; } VirtualAddress vaddr() const { return m_range.base(); } size_t size() const { return m_range.size(); } bool is_readable() const { return m_access & Access::Read; } @@ -94,7 +94,7 @@ public: return m_range.contains(vaddr); } - bool contains(Range const& range) const + bool contains(VirtualRange const& range) const { return m_range.contains(range); } @@ -168,11 +168,11 @@ public: void set_page_directory(PageDirectory&); bool map(PageDirectory&, ShouldFlushTLB = ShouldFlushTLB::Yes); - enum class ShouldDeallocateVirtualMemoryRange { + enum class ShouldDeallocateVirtualMemoryVirtualRange { No, Yes, }; - void unmap(ShouldDeallocateVirtualMemoryRange = ShouldDeallocateVirtualMemoryRange::Yes); + void unmap(ShouldDeallocateVirtualMemoryVirtualRange = ShouldDeallocateVirtualMemoryVirtualRange::Yes); void remap(); @@ -180,7 +180,7 @@ public: void set_syscall_region(bool b) { m_syscall_region = b; } private: - Region(Range const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString>, Region::Access access, Cacheable, bool shared); + Region(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString>, Region::Access access, Cacheable, bool shared); bool remap_vmobject_page(size_t page_index, bool with_flush = true); bool do_remap_vmobject_page(size_t page_index, bool with_flush = true); @@ -200,7 +200,7 @@ private: bool map_individual_page_impl(size_t page_index); RefPtr<PageDirectory> m_page_directory; - Range m_range; + VirtualRange m_range; size_t m_offset_in_vmobject { 0 }; NonnullRefPtr<VMObject> m_vmobject; OwnPtr<KString> m_name; diff --git a/Kernel/Memory/Space.cpp b/Kernel/Memory/Space.cpp index dfb6a56a2a..97c48100c1 100644 --- a/Kernel/Memory/Space.cpp +++ b/Kernel/Memory/Space.cpp @@ -42,7 +42,7 @@ KResult Space::unmap_mmap_range(VirtualAddress addr, size_t size) if (!size) return EINVAL; - auto range_or_error = Range::expand_to_page_boundaries(addr.get(), size); + auto range_or_error = VirtualRange::expand_to_page_boundaries(addr.get(), size); if (range_or_error.is_error()) return range_or_error.error(); auto range_to_unmap = range_or_error.value(); @@ -69,7 +69,7 @@ KResult Space::unmap_mmap_range(VirtualAddress addr, size_t size) auto region = take_region(*old_region); // We manually unmap the old region here, specifying that we *don't* want the VM deallocated. - region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No); + region->unmap(Region::ShouldDeallocateVirtualMemoryVirtualRange::No); auto new_regions_or_error = try_split_region_around_range(*region, range_to_unmap); if (new_regions_or_error.is_error()) @@ -115,7 +115,7 @@ KResult Space::unmap_mmap_range(VirtualAddress addr, size_t size) auto region = take_region(*old_region); // We manually unmap the old region here, specifying that we *don't* want the VM deallocated. - region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No); + region->unmap(Region::ShouldDeallocateVirtualMemoryVirtualRange::No); // Otherwise, split the regions and collect them for future mapping. auto split_regions_or_error = try_split_region_around_range(*region, range_to_unmap); @@ -139,7 +139,7 @@ KResult Space::unmap_mmap_range(VirtualAddress addr, size_t size) return KSuccess; } -Optional<Range> Space::allocate_range(VirtualAddress vaddr, size_t size, size_t alignment) +Optional<VirtualRange> Space::allocate_range(VirtualAddress vaddr, size_t size, size_t alignment) { vaddr.mask(PAGE_MASK); size = page_round_up(size); @@ -148,7 +148,7 @@ Optional<Range> Space::allocate_range(VirtualAddress vaddr, size_t size, size_t return page_directory().range_allocator().allocate_specific(vaddr, size); } -KResultOr<Region*> Space::try_allocate_split_region(Region const& source_region, Range const& range, size_t offset_in_vmobject) +KResultOr<Region*> Space::try_allocate_split_region(Region const& source_region, VirtualRange const& range, size_t offset_in_vmobject) { auto new_region = Region::try_create_user_accessible( range, source_region.vmobject(), offset_in_vmobject, KString::try_create(source_region.name()), source_region.access(), source_region.is_cacheable() ? Region::Cacheable::Yes : Region::Cacheable::No, source_region.is_shared()); @@ -168,7 +168,7 @@ KResultOr<Region*> Space::try_allocate_split_region(Region const& source_region, return region; } -KResultOr<Region*> Space::allocate_region(Range const& range, StringView name, int prot, AllocationStrategy strategy) +KResultOr<Region*> Space::allocate_region(VirtualRange const& range, StringView name, int prot, AllocationStrategy strategy) { VERIFY(range.is_valid()); auto vmobject = AnonymousVMObject::try_create_with_size(range.size(), strategy); @@ -185,7 +185,7 @@ KResultOr<Region*> Space::allocate_region(Range const& range, StringView name, i return added_region; } -KResultOr<Region*> Space::allocate_region_with_vmobject(Range const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, StringView name, int prot, bool shared) +KResultOr<Region*> Space::allocate_region_with_vmobject(VirtualRange const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, StringView name, int prot, bool shared) { VERIFY(range.is_valid()); size_t end_in_vmobject = offset_in_vmobject + range.size(); @@ -232,7 +232,7 @@ NonnullOwnPtr<Region> Space::take_region(Region& region) return found_region; } -Region* Space::find_region_from_range(const Range& range) +Region* Space::find_region_from_range(VirtualRange const& range) { ScopedSpinLock lock(m_lock); if (m_region_lookup_cache.range.has_value() && m_region_lookup_cache.range.value() == range && m_region_lookup_cache.region) @@ -250,7 +250,7 @@ Region* Space::find_region_from_range(const Range& range) return region; } -Region* Space::find_region_containing(const Range& range) +Region* Space::find_region_containing(VirtualRange const& range) { ScopedSpinLock lock(m_lock); auto candidate = m_regions.find_largest_not_above(range.base().get()); @@ -259,7 +259,7 @@ Region* Space::find_region_containing(const Range& range) return (*candidate)->range().contains(range) ? candidate->ptr() : nullptr; } -Vector<Region*> Space::find_regions_intersecting(const Range& range) +Vector<Region*> Space::find_regions_intersecting(VirtualRange const& range) { Vector<Region*> regions = {}; size_t total_size_collected = 0; @@ -291,13 +291,13 @@ Region* Space::add_region(NonnullOwnPtr<Region> region) } // Carve out a virtual address range from a region and return the two regions on either side -KResultOr<Vector<Region*, 2>> Space::try_split_region_around_range(const Region& source_region, const Range& desired_range) +KResultOr<Vector<Region*, 2>> Space::try_split_region_around_range(const Region& source_region, VirtualRange const& desired_range) { - Range old_region_range = source_region.range(); + VirtualRange old_region_range = source_region.range(); auto remaining_ranges_after_unmap = old_region_range.carve(desired_range); VERIFY(!remaining_ranges_after_unmap.is_empty()); - auto try_make_replacement_region = [&](const Range& new_range) -> KResultOr<Region*> { + auto try_make_replacement_region = [&](VirtualRange const& new_range) -> KResultOr<Region*> { VERIFY(old_region_range.contains(new_range)); size_t new_range_offset_in_vmobject = source_region.offset_in_vmobject() + (new_range.base().get() - old_region_range.base().get()); return try_allocate_split_region(source_region, new_range, new_range_offset_in_vmobject); diff --git a/Kernel/Memory/Space.h b/Kernel/Memory/Space.h index 5cfb7e5530..b8bf401779 100644 --- a/Kernel/Memory/Space.h +++ b/Kernel/Memory/Space.h @@ -35,20 +35,20 @@ public: KResult unmap_mmap_range(VirtualAddress, size_t); - Optional<Range> allocate_range(VirtualAddress, size_t, size_t alignment = PAGE_SIZE); + Optional<VirtualRange> allocate_range(VirtualAddress, size_t, size_t alignment = PAGE_SIZE); - KResultOr<Region*> allocate_region_with_vmobject(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, StringView name, int prot, bool shared); - KResultOr<Region*> allocate_region(const Range&, StringView name, int prot = PROT_READ | PROT_WRITE, AllocationStrategy strategy = AllocationStrategy::Reserve); + KResultOr<Region*> allocate_region_with_vmobject(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, StringView name, int prot, bool shared); + KResultOr<Region*> allocate_region(VirtualRange const&, StringView name, int prot = PROT_READ | PROT_WRITE, AllocationStrategy strategy = AllocationStrategy::Reserve); void deallocate_region(Region& region); NonnullOwnPtr<Region> take_region(Region& region); - KResultOr<Region*> try_allocate_split_region(Region const& source_region, Range const&, size_t offset_in_vmobject); - KResultOr<Vector<Region*, 2>> try_split_region_around_range(Region const& source_region, Range const&); + KResultOr<Region*> try_allocate_split_region(Region const& source_region, VirtualRange const&, size_t offset_in_vmobject); + KResultOr<Vector<Region*, 2>> try_split_region_around_range(Region const& source_region, VirtualRange const&); - Region* find_region_from_range(const Range&); - Region* find_region_containing(const Range&); + Region* find_region_from_range(VirtualRange const&); + Region* find_region_containing(VirtualRange const&); - Vector<Region*> find_regions_intersecting(const Range&); + Vector<Region*> find_regions_intersecting(VirtualRange const&); bool enforces_syscall_regions() const { return m_enforces_syscall_regions; } void set_enforces_syscall_regions(bool b) { m_enforces_syscall_regions = b; } @@ -76,7 +76,7 @@ private: RedBlackTree<FlatPtr, NonnullOwnPtr<Region>> m_regions; struct RegionLookupCache { - Optional<Range> range; + Optional<VirtualRange> range; WeakPtr<Region> region; }; RegionLookupCache m_region_lookup_cache; diff --git a/Kernel/Memory/Range.cpp b/Kernel/Memory/VirtualRange.cpp index ad5ebdc9ea..c049ddf1bc 100644 --- a/Kernel/Memory/Range.cpp +++ b/Kernel/Memory/VirtualRange.cpp @@ -7,16 +7,16 @@ #include <AK/Vector.h> #include <Kernel/Memory/MemoryManager.h> -#include <Kernel/Memory/Range.h> +#include <Kernel/Memory/VirtualRange.h> #include <LibC/limits.h> namespace Kernel::Memory { -Vector<Range, 2> Range::carve(const Range& taken) const +Vector<VirtualRange, 2> VirtualRange::carve(VirtualRange const& taken) const { VERIFY((taken.size() % PAGE_SIZE) == 0); - Vector<Range, 2> parts; + Vector<VirtualRange, 2> parts; if (taken == *this) return {}; if (taken.base() > base()) @@ -25,7 +25,7 @@ Vector<Range, 2> Range::carve(const Range& taken) const parts.append({ taken.end(), end().get() - taken.end().get() }); return parts; } -Range Range::intersect(const Range& other) const +VirtualRange VirtualRange::intersect(VirtualRange const& other) const { if (*this == other) { return *this; @@ -33,10 +33,10 @@ Range Range::intersect(const Range& other) const auto new_base = max(base(), other.base()); auto new_end = min(end(), other.end()); VERIFY(new_base < new_end); - return Range(new_base, (new_end - new_base).get()); + return VirtualRange(new_base, (new_end - new_base).get()); } -KResultOr<Range> Range::expand_to_page_boundaries(FlatPtr address, size_t size) +KResultOr<VirtualRange> VirtualRange::expand_to_page_boundaries(FlatPtr address, size_t size) { if (page_round_up_would_wrap(size)) return EINVAL; @@ -50,7 +50,7 @@ KResultOr<Range> Range::expand_to_page_boundaries(FlatPtr address, size_t size) auto base = VirtualAddress { address }.page_base(); auto end = page_round_up(address + size); - return Range { base, end - base.get() }; + return VirtualRange { base, end - base.get() }; } } diff --git a/Kernel/Memory/Range.h b/Kernel/Memory/VirtualRange.h index 41b206af12..2fd6433f4e 100644 --- a/Kernel/Memory/Range.h +++ b/Kernel/Memory/VirtualRange.h @@ -12,12 +12,12 @@ namespace Kernel::Memory { -class Range { - friend class RangeAllocator; +class VirtualRange { + friend class VirtualRangeAllocator; public: - Range() = delete; - Range(VirtualAddress base, size_t size) + VirtualRange() = delete; + VirtualRange(VirtualAddress base, size_t size) : m_base(base) , m_size(size) { @@ -31,7 +31,7 @@ public: VirtualAddress end() const { return m_base.offset(m_size); } - bool operator==(const Range& other) const + bool operator==(VirtualRange const& other) const { return m_base == other.m_base && m_size == other.m_size; } @@ -43,15 +43,15 @@ public: return base >= m_base && base.offset(size) <= end(); } - bool contains(const Range& other) const + bool contains(VirtualRange const& other) const { return contains(other.base(), other.size()); } - Vector<Range, 2> carve(const Range&) const; - Range intersect(const Range&) const; + Vector<VirtualRange, 2> carve(VirtualRange const&) const; + VirtualRange intersect(VirtualRange const&) const; - static KResultOr<Range> expand_to_page_boundaries(FlatPtr address, size_t size); + static KResultOr<VirtualRange> expand_to_page_boundaries(FlatPtr address, size_t size); private: VirtualAddress m_base; @@ -61,8 +61,8 @@ private: } template<> -struct AK::Formatter<Kernel::Memory::Range> : Formatter<FormatString> { - void format(FormatBuilder& builder, Kernel::Memory::Range value) +struct AK::Formatter<Kernel::Memory::VirtualRange> : Formatter<FormatString> { + void format(FormatBuilder& builder, Kernel::Memory::VirtualRange value) { return Formatter<FormatString>::format(builder, "{} - {} (size {:p})", value.base().as_ptr(), value.base().offset(value.size() - 1).as_ptr(), value.size()); } diff --git a/Kernel/Memory/RangeAllocator.cpp b/Kernel/Memory/VirtualRangeAllocator.cpp index 5f931222b0..16bccdfb6e 100644 --- a/Kernel/Memory/RangeAllocator.cpp +++ b/Kernel/Memory/VirtualRangeAllocator.cpp @@ -5,25 +5,25 @@ */ #include <AK/Checked.h> -#include <Kernel/Memory/RangeAllocator.h> +#include <Kernel/Memory/VirtualRangeAllocator.h> #include <Kernel/Random.h> #define VM_GUARD_PAGES namespace Kernel::Memory { -RangeAllocator::RangeAllocator() +VirtualRangeAllocator::VirtualRangeAllocator() : m_total_range({}, 0) { } -void RangeAllocator::initialize_with_range(VirtualAddress base, size_t size) +void VirtualRangeAllocator::initialize_with_range(VirtualAddress base, size_t size) { m_total_range = { base, size }; - m_available_ranges.insert(base.get(), Range { base, size }); + m_available_ranges.insert(base.get(), VirtualRange { base, size }); } -void RangeAllocator::initialize_from_parent(RangeAllocator const& parent_allocator) +void VirtualRangeAllocator::initialize_from_parent(VirtualRangeAllocator const& parent_allocator) { ScopedSpinLock lock(parent_allocator.m_lock); m_total_range = parent_allocator.m_total_range; @@ -33,16 +33,16 @@ void RangeAllocator::initialize_from_parent(RangeAllocator const& parent_allocat } } -void RangeAllocator::dump() const +void VirtualRangeAllocator::dump() const { VERIFY(m_lock.is_locked()); - dbgln("RangeAllocator({})", this); + dbgln("VirtualRangeAllocator({})", this); for (auto& range : m_available_ranges) { dbgln(" {:x} -> {:x}", range.base().get(), range.end().get() - 1); } } -void RangeAllocator::carve_at_iterator(auto& it, Range const& range) +void VirtualRangeAllocator::carve_at_iterator(auto& it, VirtualRange const& range) { VERIFY(m_lock.is_locked()); auto remaining_parts = (*it).carve(range); @@ -56,7 +56,7 @@ void RangeAllocator::carve_at_iterator(auto& it, Range const& range) } } -Optional<Range> RangeAllocator::allocate_randomized(size_t size, size_t alignment) +Optional<VirtualRange> VirtualRangeAllocator::allocate_randomized(size_t size, size_t alignment) { if (!size) return {}; @@ -80,7 +80,7 @@ Optional<Range> RangeAllocator::allocate_randomized(size_t size, size_t alignmen return allocate_anywhere(size, alignment); } -Optional<Range> RangeAllocator::allocate_anywhere(size_t size, size_t alignment) +Optional<VirtualRange> VirtualRangeAllocator::allocate_anywhere(size_t size, size_t alignment) { if (!size) return {}; @@ -114,7 +114,7 @@ Optional<Range> RangeAllocator::allocate_anywhere(size_t size, size_t alignment) FlatPtr initial_base = available_range.base().offset(offset_from_effective_base).get(); FlatPtr aligned_base = round_up_to_power_of_two(initial_base, alignment); - Range const allocated_range(VirtualAddress(aligned_base), size); + VirtualRange const allocated_range(VirtualAddress(aligned_base), size); VERIFY(m_total_range.contains(allocated_range)); @@ -125,11 +125,11 @@ Optional<Range> RangeAllocator::allocate_anywhere(size_t size, size_t alignment) carve_at_iterator(it, allocated_range); return allocated_range; } - dmesgln("RangeAllocator: Failed to allocate anywhere: size={}, alignment={}", size, alignment); + dmesgln("VirtualRangeAllocator: Failed to allocate anywhere: size={}, alignment={}", size, alignment); return {}; } -Optional<Range> RangeAllocator::allocate_specific(VirtualAddress base, size_t size) +Optional<VirtualRange> VirtualRangeAllocator::allocate_specific(VirtualAddress base, size_t size) { if (!size) return {}; @@ -137,7 +137,7 @@ Optional<Range> RangeAllocator::allocate_specific(VirtualAddress base, size_t si VERIFY(base.is_page_aligned()); VERIFY((size % PAGE_SIZE) == 0); - Range const allocated_range(base, size); + VirtualRange const allocated_range(base, size); if (!m_total_range.contains(allocated_range)) { return {}; } @@ -157,7 +157,7 @@ Optional<Range> RangeAllocator::allocate_specific(VirtualAddress base, size_t si return {}; } -void RangeAllocator::deallocate(Range const& range) +void VirtualRangeAllocator::deallocate(VirtualRange const& range) { ScopedSpinLock lock(m_lock); VERIFY(m_total_range.contains(range)); @@ -166,7 +166,7 @@ void RangeAllocator::deallocate(Range const& range) VERIFY(range.base() < range.end()); VERIFY(!m_available_ranges.is_empty()); - Range merged_range = range; + VirtualRange merged_range = range; { // Try merging with preceding range. diff --git a/Kernel/Memory/VirtualRangeAllocator.h b/Kernel/Memory/VirtualRangeAllocator.h new file mode 100644 index 0000000000..025dd41bf6 --- /dev/null +++ b/Kernel/Memory/VirtualRangeAllocator.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org> + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#pragma once + +#include <AK/RedBlackTree.h> +#include <AK/Traits.h> +#include <Kernel/Memory/VirtualRange.h> +#include <Kernel/SpinLock.h> + +namespace Kernel::Memory { + +class VirtualRangeAllocator { +public: + VirtualRangeAllocator(); + ~VirtualRangeAllocator() = default; + + void initialize_with_range(VirtualAddress, size_t); + void initialize_from_parent(VirtualRangeAllocator const&); + + Optional<VirtualRange> allocate_anywhere(size_t, size_t alignment = PAGE_SIZE); + Optional<VirtualRange> allocate_specific(VirtualAddress, size_t); + Optional<VirtualRange> allocate_randomized(size_t, size_t alignment); + void deallocate(VirtualRange const&); + + void dump() const; + + bool contains(VirtualRange const& range) const { return m_total_range.contains(range); } + +private: + void carve_at_iterator(auto&, VirtualRange const&); + + RedBlackTree<FlatPtr, VirtualRange> m_available_ranges; + VirtualRange m_total_range; + mutable SpinLock<u8> m_lock; +}; + +} + +namespace AK { +template<> +struct Traits<Kernel::Memory::VirtualRange> : public GenericTraits<Kernel::Memory::VirtualRange> { + static constexpr bool is_trivial() { return true; } +}; +} diff --git a/Kernel/PerformanceManager.h b/Kernel/PerformanceManager.h index dda2dbf75c..7157d0e655 100644 --- a/Kernel/PerformanceManager.h +++ b/Kernel/PerformanceManager.h @@ -74,7 +74,7 @@ public: } } - inline static void add_unmap_perf_event(Process& current_process, Memory::Range const& region) + inline static void add_unmap_perf_event(Process& current_process, Memory::VirtualRange const& region) { if (auto* event_buffer = current_process.current_perf_events_buffer()) { [[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_MUNMAP, region.base().get(), region.size(), nullptr); diff --git a/Kernel/Storage/RamdiskController.cpp b/Kernel/Storage/RamdiskController.cpp index dca4f083d7..b2ee3ab6a5 100644 --- a/Kernel/Storage/RamdiskController.cpp +++ b/Kernel/Storage/RamdiskController.cpp @@ -47,7 +47,7 @@ RamdiskController::RamdiskController() // Populate ramdisk controllers from Multiboot boot modules, if any. size_t count = 0; for (auto& used_memory_range : MM.used_memory_ranges()) { - if (used_memory_range.type == Memory::UsedMemoryRangeType::BootModule) { + if (used_memory_range.type == Memory::UsedMemoryVirtualRangeType::BootModule) { size_t length = Memory::page_round_up(used_memory_range.end.get()) - used_memory_range.start.get(); auto region = MM.allocate_kernel_region(used_memory_range.start, length, "Ramdisk", Memory::Region::Access::Read | Memory::Region::Access::Write); if (!region) diff --git a/Kernel/Syscalls/execve.cpp b/Kernel/Syscalls/execve.cpp index a8b4b10807..692b2e674b 100644 --- a/Kernel/Syscalls/execve.cpp +++ b/Kernel/Syscalls/execve.cpp @@ -154,12 +154,12 @@ static KResultOr<FlatPtr> make_userspace_context_for_main_thread([[maybe_unused] return new_sp; } -struct RequiredLoadRange { +struct RequiredLoadVirtualRange { FlatPtr start { 0 }; FlatPtr end { 0 }; }; -static KResultOr<RequiredLoadRange> get_required_load_range(FileDescription& program_description) +static KResultOr<RequiredLoadVirtualRange> get_required_load_range(FileDescription& program_description) { auto& inode = *(program_description.inode()); auto vmobject = Memory::SharedInodeVMObject::try_create_with_inode(inode); @@ -181,7 +181,7 @@ static KResultOr<RequiredLoadRange> get_required_load_range(FileDescription& pro return EINVAL; } - RequiredLoadRange range {}; + RequiredLoadVirtualRange range {}; elf_image.for_each_program_header([&range](const auto& pheader) { if (pheader.type() != PT_LOAD) return; @@ -221,7 +221,7 @@ static KResultOr<FlatPtr> get_load_offset(const ElfW(Ehdr) & main_program_header auto main_program_load_range = main_program_load_range_result.value(); - RequiredLoadRange selected_range {}; + RequiredLoadVirtualRange selected_range {}; if (interpreter_description) { auto interpreter_load_range_result = get_required_load_range(*interpreter_description); @@ -235,8 +235,8 @@ static KResultOr<FlatPtr> get_load_offset(const ElfW(Ehdr) & main_program_header if (main_program_load_range.end < load_range_start || main_program_load_range.start > interpreter_load_range_end) return random_load_offset_in_range(load_range_start, load_range_size); - RequiredLoadRange first_available_part = { load_range_start, main_program_load_range.start }; - RequiredLoadRange second_available_part = { main_program_load_range.end, interpreter_load_range_end }; + RequiredLoadVirtualRange first_available_part = { load_range_start, main_program_load_range.start }; + RequiredLoadVirtualRange second_available_part = { main_program_load_range.end, interpreter_load_range_end }; // Select larger part if (first_available_part.end - first_available_part.start > second_available_part.end - second_available_part.start) diff --git a/Kernel/Syscalls/futex.cpp b/Kernel/Syscalls/futex.cpp index 2f43ce4f68..e1df9aaf30 100644 --- a/Kernel/Syscalls/futex.cpp +++ b/Kernel/Syscalls/futex.cpp @@ -129,7 +129,7 @@ KResultOr<FlatPtr> Process::sys$futex(Userspace<const Syscall::SC_futex_params*> // acquiring the queue lock RefPtr<Memory::VMObject> vmobject, vmobject2; if (!is_private) { - auto region = space().find_region_containing(Memory::Range { VirtualAddress { user_address_or_offset }, sizeof(u32) }); + auto region = space().find_region_containing(Memory::VirtualRange { VirtualAddress { user_address_or_offset }, sizeof(u32) }); if (!region) return EFAULT; vmobject = region->vmobject(); @@ -139,7 +139,7 @@ KResultOr<FlatPtr> Process::sys$futex(Userspace<const Syscall::SC_futex_params*> case FUTEX_REQUEUE: case FUTEX_CMP_REQUEUE: case FUTEX_WAKE_OP: { - auto region2 = space().find_region_containing(Memory::Range { VirtualAddress { user_address_or_offset2 }, sizeof(u32) }); + auto region2 = space().find_region_containing(Memory::VirtualRange { VirtualAddress { user_address_or_offset2 }, sizeof(u32) }); if (!region2) return EFAULT; vmobject2 = region2->vmobject(); diff --git a/Kernel/Syscalls/get_stack_bounds.cpp b/Kernel/Syscalls/get_stack_bounds.cpp index 7c680c008c..f0f5104eec 100644 --- a/Kernel/Syscalls/get_stack_bounds.cpp +++ b/Kernel/Syscalls/get_stack_bounds.cpp @@ -14,7 +14,7 @@ KResultOr<FlatPtr> Process::sys$get_stack_bounds(Userspace<FlatPtr*> user_stack_ VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this); auto& regs = Thread::current()->get_register_dump_from_stack(); FlatPtr stack_pointer = regs.userspace_sp(); - auto* stack_region = space().find_region_containing(Memory::Range { VirtualAddress(stack_pointer), 1 }); + auto* stack_region = space().find_region_containing(Memory::VirtualRange { VirtualAddress(stack_pointer), 1 }); // The syscall handler should have killed us if we had an invalid stack pointer. VERIFY(stack_region); diff --git a/Kernel/Syscalls/mmap.cpp b/Kernel/Syscalls/mmap.cpp index aeab526447..df4075e133 100644 --- a/Kernel/Syscalls/mmap.cpp +++ b/Kernel/Syscalls/mmap.cpp @@ -199,7 +199,7 @@ KResultOr<FlatPtr> Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> u return EINVAL; Memory::Region* region = nullptr; - Optional<Memory::Range> range; + Optional<Memory::VirtualRange> range; if (map_randomized) { range = space().page_directory().range_allocator().allocate_randomized(Memory::page_round_up(size), alignment); @@ -272,7 +272,7 @@ KResultOr<FlatPtr> Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> u return region->vaddr().get(); } -static KResultOr<Memory::Range> expand_range_to_page_boundaries(FlatPtr address, size_t size) +static KResultOr<Memory::VirtualRange> expand_range_to_page_boundaries(FlatPtr address, size_t size) { if (Memory::page_round_up_would_wrap(size)) return EINVAL; @@ -286,7 +286,7 @@ static KResultOr<Memory::Range> expand_range_to_page_boundaries(FlatPtr address, auto base = VirtualAddress { address }.page_base(); auto end = Memory::page_round_up(address + size); - return Memory::Range { base, end - base.get() }; + return Memory::VirtualRange { base, end - base.get() }; } KResultOr<FlatPtr> Process::sys$mprotect(Userspace<void*> addr, size_t size, int prot) @@ -346,7 +346,7 @@ KResultOr<FlatPtr> Process::sys$mprotect(Userspace<void*> addr, size_t size, int auto region = space().take_region(*old_region); // Unmap the old region here, specifying that we *don't* want the VM deallocated. - region->unmap(Memory::Region::ShouldDeallocateVirtualMemoryRange::No); + region->unmap(Memory::Region::ShouldDeallocateVirtualMemoryVirtualRange::No); // This vector is the region(s) adjacent to our range. // We need to allocate a new region for the range we wanted to change permission bits on. @@ -409,7 +409,7 @@ KResultOr<FlatPtr> Process::sys$mprotect(Userspace<void*> addr, size_t size, int auto region = space().take_region(*old_region); // Unmap the old region here, specifying that we *don't* want the VM deallocated. - region->unmap(Memory::Region::ShouldDeallocateVirtualMemoryRange::No); + region->unmap(Memory::Region::ShouldDeallocateVirtualMemoryVirtualRange::No); // This vector is the region(s) adjacent to our range. // We need to allocate a new region for the range we wanted to change permission bits on. @@ -566,7 +566,7 @@ KResultOr<FlatPtr> Process::sys$mremap(Userspace<const Syscall::SC_mremap_params auto old_name = old_region->take_name(); // Unmap without deallocating the VM range since we're going to reuse it. - old_region->unmap(Memory::Region::ShouldDeallocateVirtualMemoryRange::No); + old_region->unmap(Memory::Region::ShouldDeallocateVirtualMemoryVirtualRange::No); space().deallocate_region(*old_region); auto new_region_or_error = space().allocate_region_with_vmobject(range, new_vmobject.release_nonnull(), old_offset, old_name->view(), old_prot, false); @@ -657,7 +657,7 @@ KResultOr<FlatPtr> Process::sys$msyscall(Userspace<void*> address) if (!Memory::is_user_address(VirtualAddress { address })) return EFAULT; - auto* region = space().find_region_containing(Memory::Range { VirtualAddress { address }, 1 }); + auto* region = space().find_region_containing(Memory::VirtualRange { VirtualAddress { address }, 1 }); if (!region) return EINVAL; diff --git a/Kernel/Syscalls/ptrace.cpp b/Kernel/Syscalls/ptrace.cpp index 2166fd80d3..06ccc8e02d 100644 --- a/Kernel/Syscalls/ptrace.cpp +++ b/Kernel/Syscalls/ptrace.cpp @@ -194,7 +194,7 @@ KResultOr<u32> Process::peek_user_data(Userspace<const u32*> address) KResult Process::poke_user_data(Userspace<u32*> address, u32 data) { - Memory::Range range = { VirtualAddress(address), sizeof(u32) }; + Memory::VirtualRange range = { VirtualAddress(address), sizeof(u32) }; auto* region = space().find_region_containing(range); if (!region) return EFAULT; diff --git a/Kernel/Thread.h b/Kernel/Thread.h index a958c855ed..d75efbce34 100644 --- a/Kernel/Thread.h +++ b/Kernel/Thread.h @@ -28,7 +28,7 @@ #include <Kernel/KResult.h> #include <Kernel/KString.h> #include <Kernel/LockMode.h> -#include <Kernel/Memory/Range.h> +#include <Kernel/Memory/VirtualRange.h> #include <Kernel/Scheduler.h> #include <Kernel/TimerQueue.h> #include <Kernel/UnixTypes.h> @@ -1308,7 +1308,7 @@ private: FlatPtr m_kernel_stack_top { 0 }; OwnPtr<Memory::Region> m_kernel_stack_region; VirtualAddress m_thread_specific_data; - Optional<Memory::Range> m_thread_specific_range; + Optional<Memory::VirtualRange> m_thread_specific_range; Array<SignalActionData, NSIG> m_signal_action_data; Blocker* m_blocker { nullptr }; Kernel::Mutex* m_blocking_lock { nullptr }; |