summaryrefslogtreecommitdiff
path: root/Kernel
diff options
context:
space:
mode:
authorLiav A <liavalb@gmail.com>2020-03-07 19:24:41 +0200
committerAndreas Kling <kling@serenityos.org>2020-03-08 14:13:30 +0100
commitd6e122fd3a2719bdd532523435a73c7a04fa9fc3 (patch)
tree23d1d656f10deec78275edb1ad1fdadad9c4d9a9 /Kernel
parentb066586355581f34ad9981bb6974066f8eb40446 (diff)
downloadserenity-d6e122fd3a2719bdd532523435a73c7a04fa9fc3.zip
Kernel: Allow contiguous allocations in physical memory
For that, we have a new type of VMObject, called ContiguousVMObject, that is responsible for allocating contiguous physical pages.
Diffstat (limited to 'Kernel')
-rw-r--r--Kernel/Makefile1
-rw-r--r--Kernel/VM/ContiguousVMObject.cpp66
-rw-r--r--Kernel/VM/ContiguousVMObject.h53
-rw-r--r--Kernel/VM/MemoryManager.cpp44
-rw-r--r--Kernel/VM/MemoryManager.h2
-rw-r--r--Kernel/VM/PhysicalRegion.cpp64
-rw-r--r--Kernel/VM/PhysicalRegion.h5
-rw-r--r--Kernel/VM/VMObject.h1
8 files changed, 216 insertions, 20 deletions
diff --git a/Kernel/Makefile b/Kernel/Makefile
index 8e9b7201ee..b06c9f9bbf 100644
--- a/Kernel/Makefile
+++ b/Kernel/Makefile
@@ -103,6 +103,7 @@ OBJS = \
TTY/VirtualConsole.o \
Thread.o \
VM/AnonymousVMObject.o \
+ VM/ContiguousVMObject.o \
VM/InodeVMObject.o \
VM/MemoryManager.o \
VM/PageDirectory.o \
diff --git a/Kernel/VM/ContiguousVMObject.cpp b/Kernel/VM/ContiguousVMObject.cpp
new file mode 100644
index 0000000000..41a188f553
--- /dev/null
+++ b/Kernel/VM/ContiguousVMObject.cpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2020, Liav A. <liavalb@hotmail.co.il>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <Kernel/VM/ContiguousVMObject.h>
+#include <Kernel/VM/MemoryManager.h>
+#include <Kernel/VM/PhysicalPage.h>
+
+namespace Kernel {
+
+//#define CONTIGUOUS_VMOBJECT_DEBUG
+
+NonnullRefPtr<ContiguousVMObject> ContiguousVMObject::create_with_size(size_t size)
+{
+ return adopt(*new ContiguousVMObject(size));
+}
+
+ContiguousVMObject::ContiguousVMObject(size_t size)
+ : VMObject(size)
+{
+ auto contiguous_physical_pages = MM.allocate_contiguous_supervisor_physical_pages(size);
+ for (size_t i = 0; i < page_count(); i++) {
+ physical_pages()[i] = contiguous_physical_pages[i];
+#ifdef CONTIGUOUS_VMOBJECT_DEBUG
+ dbg() << "Contiguous page[" << i << "]: " << physical_pages()[i]->paddr();
+#endif
+ }
+}
+
+ContiguousVMObject::ContiguousVMObject(const ContiguousVMObject& other)
+ : VMObject(other)
+{
+}
+
+ContiguousVMObject::~ContiguousVMObject()
+{
+}
+
+NonnullRefPtr<VMObject> ContiguousVMObject::clone()
+{
+ ASSERT_NOT_REACHED();
+}
+
+}
diff --git a/Kernel/VM/ContiguousVMObject.h b/Kernel/VM/ContiguousVMObject.h
new file mode 100644
index 0000000000..b25d6dc1c0
--- /dev/null
+++ b/Kernel/VM/ContiguousVMObject.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2020, Liav A. <liavalb@hotmail.co.il>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <Kernel/VM/VMObject.h>
+#include <LibBareMetal/Memory/PhysicalAddress.h>
+
+namespace Kernel {
+
+class ContiguousVMObject final : public VMObject {
+public:
+ virtual ~ContiguousVMObject() override;
+
+ static NonnullRefPtr<ContiguousVMObject> create_with_size(size_t);
+
+private:
+ explicit ContiguousVMObject(size_t);
+ explicit ContiguousVMObject(const ContiguousVMObject&);
+
+ virtual const char* class_name() const override { return "ContiguousVMObject"; }
+ virtual NonnullRefPtr<VMObject> clone() override;
+
+ ContiguousVMObject& operator=(const ContiguousVMObject&) = delete;
+ ContiguousVMObject& operator=(ContiguousVMObject&&) = delete;
+ ContiguousVMObject(ContiguousVMObject&&) = delete;
+
+ virtual bool is_contiguous() const override { return true; }
+};
+}
diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp
index aff6ad2dbc..b03f1fe1bc 100644
--- a/Kernel/VM/MemoryManager.cpp
+++ b/Kernel/VM/MemoryManager.cpp
@@ -32,6 +32,7 @@
#include <Kernel/FileSystem/Inode.h>
#include <Kernel/Multiboot.h>
#include <Kernel/VM/AnonymousVMObject.h>
+#include <Kernel/VM/ContiguousVMObject.h>
#include <Kernel/VM/MemoryManager.h>
#include <Kernel/VM/PageDirectory.h>
#include <Kernel/VM/PhysicalRegion.h>
@@ -302,6 +303,19 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
return region->handle_fault(fault);
}
+OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable)
+{
+ ASSERT(!(size % PAGE_SIZE));
+ auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
+ if (!range.is_valid())
+ return nullptr;
+ auto vmobject = ContiguousVMObject::create_with_size(size);
+ auto region = allocate_kernel_region_with_vmobject(range, vmobject, name, access, user_accessible, cacheable);
+ if (!region)
+ return nullptr;
+ return region;
+}
+
OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringView& name, u8 access, bool user_accessible, bool should_commit, bool cacheable)
{
ASSERT(!(size % PAGE_SIZE));
@@ -447,6 +461,36 @@ void MemoryManager::deallocate_supervisor_physical_page(PhysicalPage&& page)
ASSERT_NOT_REACHED();
}
+Vector<RefPtr<PhysicalPage>> MemoryManager::allocate_contiguous_supervisor_physical_pages(size_t size)
+{
+ ASSERT(!(size % PAGE_SIZE));
+ InterruptDisabler disabler;
+ size_t count = ceil_div(size, PAGE_SIZE);
+ Vector<RefPtr<PhysicalPage>> physical_pages;
+ physical_pages.ensure_capacity(count);
+
+ for (auto& region : m_super_physical_regions) {
+ physical_pages = region.take_contiguous_free_pages((count), true);
+ if (physical_pages.is_empty())
+ continue;
+ }
+
+ if (physical_pages.is_empty()) {
+ if (m_super_physical_regions.is_empty()) {
+ klog() << "MM: no super physical regions available (?)";
+ }
+
+ klog() << "MM: no super physical pages available";
+ ASSERT_NOT_REACHED();
+ return {};
+ }
+
+ auto cleanup_region = MM.allocate_kernel_region(physical_pages[0]->paddr(), PAGE_SIZE * count, "MemoryManager Allocation Sanitization", Region::Access::Read | Region::Access::Write);
+ fast_u32_fill((u32*)cleanup_region->vaddr().as_ptr(), 0, (PAGE_SIZE * count) / sizeof(u32));
+ m_super_physical_pages_used += count;
+ return physical_pages;
+}
+
RefPtr<PhysicalPage> MemoryManager::allocate_supervisor_physical_page()
{
InterruptDisabler disabler;
diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h
index 748a67a2ca..02d3f64f37 100644
--- a/Kernel/VM/MemoryManager.h
+++ b/Kernel/VM/MemoryManager.h
@@ -100,9 +100,11 @@ public:
RefPtr<PhysicalPage> allocate_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes);
RefPtr<PhysicalPage> allocate_supervisor_physical_page();
+ Vector<RefPtr<PhysicalPage>> allocate_contiguous_supervisor_physical_pages(size_t size);
void deallocate_user_physical_page(PhysicalPage&&);
void deallocate_supervisor_physical_page(PhysicalPage&&);
+ OwnPtr<Region> allocate_contiguous_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
OwnPtr<Region> allocate_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, bool should_commit = true, bool cacheable = true);
OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
diff --git a/Kernel/VM/PhysicalRegion.cpp b/Kernel/VM/PhysicalRegion.cpp
index 1a0880bdff..8434e849a9 100644
--- a/Kernel/VM/PhysicalRegion.cpp
+++ b/Kernel/VM/PhysicalRegion.cpp
@@ -27,6 +27,7 @@
#include <AK/Bitmap.h>
#include <AK/NonnullRefPtr.h>
#include <AK/RefPtr.h>
+#include <AK/Vector.h>
#include <Kernel/Assertions.h>
#include <Kernel/VM/PhysicalPage.h>
#include <Kernel/VM/PhysicalRegion.h>
@@ -63,36 +64,59 @@ unsigned PhysicalRegion::finalize_capacity()
return size();
}
-RefPtr<PhysicalPage> PhysicalRegion::take_free_page(bool supervisor)
+Vector<RefPtr<PhysicalPage>> PhysicalRegion::take_contiguous_free_pages(size_t count, bool supervisor)
{
ASSERT(m_pages);
+ ASSERT(m_used != m_pages);
- if (m_used == m_pages)
- return nullptr;
+ Vector<RefPtr<PhysicalPage>> physical_pages;
+ physical_pages.ensure_capacity(count);
- // search from the last page we allocated
- for (unsigned page = m_last; page < m_pages; page++) {
- if (!m_bitmap.get(page)) {
- m_bitmap.set(page, true);
- m_used++;
- m_last = page + 1;
- return PhysicalPage::create(m_lower.offset(page * PAGE_SIZE), supervisor);
- }
+ auto first_contiguous_page = find_contiguous_free_pages(count);
+
+ for (size_t index = 0; index < count; index++) {
+ physical_pages.append(PhysicalPage::create(m_lower.offset(PAGE_SIZE * (index + first_contiguous_page)), supervisor));
}
+ return physical_pages;
+}
- // wrap back around to the start in case we missed something
- for (unsigned page = 0; page < m_last; page++) {
- if (!m_bitmap.get(page)) {
- m_bitmap.set(page, true);
- m_used++;
- m_last = page + 1;
- return PhysicalPage::create(m_lower.offset(page * PAGE_SIZE), supervisor);
+unsigned PhysicalRegion::find_contiguous_free_pages(size_t count)
+{
+ ASSERT(count != 0);
+ // search from the last page we allocated
+ auto range = find_and_allocate_contiguous_range(count);
+ ASSERT(range.has_value());
+ return range.value();
+}
+
+Optional<unsigned> PhysicalRegion::find_and_allocate_contiguous_range(size_t count)
+{
+ ASSERT(count != 0);
+ size_t found_pages_count = 0;
+ auto first_index = m_bitmap.find_longest_range_of_unset_bits(count, found_pages_count);
+ if (!first_index.has_value())
+ return {};
+
+ auto page = first_index.value();
+ if (count == found_pages_count) {
+ for (unsigned page_index = page; page_index < (page + count); page_index++) {
+ m_bitmap.set(page_index, true);
}
+ m_used += count;
+ m_last = page + count;
+ return page;
}
+ return {};
+}
- ASSERT_NOT_REACHED();
+RefPtr<PhysicalPage> PhysicalRegion::take_free_page(bool supervisor)
+{
+ ASSERT(m_pages);
+
+ if (m_used == m_pages)
+ return nullptr;
- return nullptr;
+ return PhysicalPage::create(m_lower.offset(find_contiguous_free_pages(1) * PAGE_SIZE), supervisor);
}
void PhysicalRegion::return_page_at(PhysicalAddress addr)
diff --git a/Kernel/VM/PhysicalRegion.h b/Kernel/VM/PhysicalRegion.h
index fcc7d0f19b..7f411e284c 100644
--- a/Kernel/VM/PhysicalRegion.h
+++ b/Kernel/VM/PhysicalRegion.h
@@ -28,6 +28,7 @@
#include <AK/Bitmap.h>
#include <AK/NonnullRefPtr.h>
+#include <AK/Optional.h>
#include <AK/RefCounted.h>
#include <Kernel/VM/PhysicalPage.h>
@@ -51,10 +52,14 @@ public:
bool contains(PhysicalPage& page) const { return page.paddr() >= m_lower && page.paddr() <= m_upper; }
RefPtr<PhysicalPage> take_free_page(bool supervisor);
+ Vector<RefPtr<PhysicalPage>> take_contiguous_free_pages(size_t count, bool supervisor);
void return_page_at(PhysicalAddress addr);
void return_page(PhysicalPage&& page) { return_page_at(page.paddr()); }
private:
+ unsigned find_contiguous_free_pages(size_t count);
+ Optional<unsigned> find_and_allocate_contiguous_range(size_t count);
+
PhysicalRegion(PhysicalAddress lower, PhysicalAddress upper);
PhysicalAddress m_lower;
diff --git a/Kernel/VM/VMObject.h b/Kernel/VM/VMObject.h
index c108ea95a6..79ee9dadad 100644
--- a/Kernel/VM/VMObject.h
+++ b/Kernel/VM/VMObject.h
@@ -54,6 +54,7 @@ public:
virtual bool is_inode() const { return false; }
virtual bool is_shared_inode() const { return false; }
virtual bool is_private_inode() const { return false; }
+ virtual bool is_contiguous() const { return false; }
size_t page_count() const { return m_physical_pages.size(); }
const FixedArray<RefPtr<PhysicalPage>>& physical_pages() const { return m_physical_pages; }