summaryrefslogtreecommitdiff
path: root/Kernel/VM
diff options
context:
space:
mode:
authorTom <tomut@yahoo.com>2020-06-01 22:55:09 -0600
committerAndreas Kling <kling@serenityos.org>2020-06-04 18:15:23 +0200
commit841364b6096fad9bb5a9f48d98b6cd94ce1f720d (patch)
tree64803ba7e042b3c6df90cad77dc5b9ae11c9cd4d /Kernel/VM
parent19190267a604031523355efb28c22954466a3a94 (diff)
downloadserenity-841364b6096fad9bb5a9f48d98b6cd94ce1f720d.zip
Kernel: Add mechanism to identity map the lowest 2MB
Diffstat (limited to 'Kernel/VM')
-rw-r--r--Kernel/VM/MemoryManager.cpp18
-rw-r--r--Kernel/VM/MemoryManager.h5
-rw-r--r--Kernel/VM/PageDirectory.cpp1
-rw-r--r--Kernel/VM/PageDirectory.h2
-rw-r--r--Kernel/VM/RangeAllocator.h5
-rw-r--r--Kernel/VM/Region.cpp15
-rw-r--r--Kernel/VM/Region.h6
7 files changed, 40 insertions, 12 deletions
diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp
index 2611f63d4e..ce59995c6e 100644
--- a/Kernel/VM/MemoryManager.cpp
+++ b/Kernel/VM/MemoryManager.cpp
@@ -211,8 +211,6 @@ void MemoryManager::initialize()
Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress vaddr)
{
- if (vaddr.get() < 0xc0000000)
- return nullptr;
for (auto& region : MM.m_kernel_regions) {
if (region.contains(vaddr))
return &region;
@@ -318,6 +316,18 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size
return allocate_kernel_region_with_vmobject(range, *vmobject, name, access, user_accessible, cacheable);
}
+OwnPtr<Region> MemoryManager::allocate_kernel_region_identity(PhysicalAddress paddr, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable)
+{
+ ASSERT(!(size % PAGE_SIZE));
+ auto range = kernel_page_directory().identity_range_allocator().allocate_specific(VirtualAddress(paddr.get()), size);
+ if (!range.is_valid())
+ return nullptr;
+ auto vmobject = AnonymousVMObject::create_for_physical_range(paddr, size);
+ if (!vmobject)
+ return nullptr;
+ return allocate_kernel_region_with_vmobject(range, *vmobject, name, access, user_accessible, cacheable);
+}
+
OwnPtr<Region> MemoryManager::allocate_user_accessible_kernel_region(size_t size, const StringView& name, u8 access, bool cacheable)
{
return allocate_kernel_region(size, name, access, true, true, cacheable);
@@ -665,7 +675,7 @@ void MemoryManager::unregister_vmobject(VMObject& vmobject)
void MemoryManager::register_region(Region& region)
{
InterruptDisabler disabler;
- if (region.vaddr().get() >= 0xc0000000)
+ if (region.is_kernel())
m_kernel_regions.append(&region);
else
m_user_regions.append(&region);
@@ -674,7 +684,7 @@ void MemoryManager::register_region(Region& region)
void MemoryManager::unregister_region(Region& region)
{
InterruptDisabler disabler;
- if (region.vaddr().get() >= 0xc0000000)
+ if (region.is_kernel())
m_kernel_regions.remove(&region);
else
m_user_regions.remove(&region);
diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h
index 35c954df5b..d58602fa11 100644
--- a/Kernel/VM/MemoryManager.h
+++ b/Kernel/VM/MemoryManager.h
@@ -107,6 +107,7 @@ public:
OwnPtr<Region> allocate_contiguous_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
OwnPtr<Region> allocate_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, bool should_commit = true, bool cacheable = true);
OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
+ OwnPtr<Region> allocate_kernel_region_identity(PhysicalAddress, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
OwnPtr<Region> allocate_kernel_region_with_vmobject(const Range&, VMObject&, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
OwnPtr<Region> allocate_user_accessible_kernel_region(size_t, const StringView& name, u8 access, bool cacheable = true);
@@ -143,6 +144,8 @@ public:
PhysicalPage& shared_zero_page() { return *m_shared_zero_page; }
+ PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; }
+
private:
MemoryManager();
~MemoryManager();
@@ -177,8 +180,6 @@ private:
PageDirectoryEntry* quickmap_pd(PageDirectory&, size_t pdpt_index);
PageTableEntry* quickmap_pt(PhysicalAddress);
- PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; }
-
const PageTableEntry* pte(const PageDirectory&, VirtualAddress);
PageTableEntry& ensure_pte(PageDirectory&, VirtualAddress);
diff --git a/Kernel/VM/PageDirectory.cpp b/Kernel/VM/PageDirectory.cpp
index 1a9d6b2ea7..75f934f2c9 100644
--- a/Kernel/VM/PageDirectory.cpp
+++ b/Kernel/VM/PageDirectory.cpp
@@ -59,6 +59,7 @@ extern "C" PageDirectoryEntry boot_pd3[1024];
PageDirectory::PageDirectory()
{
m_range_allocator.initialize_with_range(VirtualAddress(0xc0800000), 0x3f000000);
+ m_identity_range_allocator.initialize_with_range(VirtualAddress(FlatPtr(0x00000000)), 0x00200000);
// Adopt the page tables already set up by boot.S
PhysicalAddress boot_pdpt_paddr(virtual_to_low_physical((FlatPtr)boot_pdpt));
diff --git a/Kernel/VM/PageDirectory.h b/Kernel/VM/PageDirectory.h
index 45a4b909c2..7602a8ccd7 100644
--- a/Kernel/VM/PageDirectory.h
+++ b/Kernel/VM/PageDirectory.h
@@ -52,6 +52,7 @@ public:
u32 cr3() const { return m_directory_table->paddr().get(); }
RangeAllocator& range_allocator() { return m_range_allocator; }
+ RangeAllocator& identity_range_allocator() { return m_identity_range_allocator; }
Process* process() { return m_process; }
const Process* process() const { return m_process; }
@@ -62,6 +63,7 @@ private:
Process* m_process { nullptr };
RangeAllocator m_range_allocator;
+ RangeAllocator m_identity_range_allocator;
RefPtr<PhysicalPage> m_directory_table;
RefPtr<PhysicalPage> m_directory_pages[4];
HashMap<unsigned, RefPtr<PhysicalPage>> m_physical_pages;
diff --git a/Kernel/VM/RangeAllocator.h b/Kernel/VM/RangeAllocator.h
index c6fb3f9931..677a150335 100644
--- a/Kernel/VM/RangeAllocator.h
+++ b/Kernel/VM/RangeAllocator.h
@@ -90,6 +90,11 @@ public:
void dump() const;
+ bool contains(const Range& range) const
+ {
+ return m_total_range.contains(range);
+ }
+
private:
void carve_at_index(int, const Range&);
diff --git a/Kernel/VM/Region.cpp b/Kernel/VM/Region.cpp
index b744bf16de..d6ba32dea4 100644
--- a/Kernel/VM/Region.cpp
+++ b/Kernel/VM/Region.cpp
@@ -40,13 +40,14 @@
namespace Kernel {
-Region::Region(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const String& name, u8 access, bool cacheable)
+Region::Region(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const String& name, u8 access, bool cacheable, bool kernel)
: m_range(range)
, m_offset_in_vmobject(offset_in_vmobject)
, m_vmobject(move(vmobject))
, m_name(name)
, m_access(access)
, m_cacheable(cacheable)
+ , m_kernel(kernel)
{
MM.register_region(*this);
}
@@ -186,14 +187,14 @@ size_t Region::amount_shared() const
NonnullOwnPtr<Region> Region::create_user_accessible(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const StringView& name, u8 access, bool cacheable)
{
- auto region = make<Region>(range, move(vmobject), offset_in_vmobject, name, access, cacheable);
+ auto region = make<Region>(range, move(vmobject), offset_in_vmobject, name, access, cacheable, false);
region->m_user_accessible = true;
return region;
}
NonnullOwnPtr<Region> Region::create_kernel_only(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const StringView& name, u8 access, bool cacheable)
{
- auto region = make<Region>(range, move(vmobject), offset_in_vmobject, name, access, cacheable);
+ auto region = make<Region>(range, move(vmobject), offset_in_vmobject, name, access, cacheable, true);
region->m_user_accessible = false;
return region;
}
@@ -268,8 +269,12 @@ void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
dbg() << "MM: >> Unmapped " << vaddr << " => P" << String::format("%p", page ? page->paddr().get() : 0) << " <<";
#endif
}
- if (deallocate_range == ShouldDeallocateVirtualMemoryRange::Yes)
- m_page_directory->range_allocator().deallocate(range());
+ if (deallocate_range == ShouldDeallocateVirtualMemoryRange::Yes) {
+ if (m_page_directory->range_allocator().contains(range()))
+ m_page_directory->range_allocator().deallocate(range());
+ else
+ m_page_directory->identity_range_allocator().deallocate(range());
+ }
m_page_directory = nullptr;
}
diff --git a/Kernel/VM/Region.h b/Kernel/VM/Region.h
index 546702da61..445f0a2825 100644
--- a/Kernel/VM/Region.h
+++ b/Kernel/VM/Region.h
@@ -95,6 +95,9 @@ public:
bool is_user_accessible() const { return m_user_accessible; }
void set_user_accessible(bool b) { m_user_accessible = b; }
+ bool is_kernel() const { return m_kernel || vaddr().get() >= 0xc0000000; }
+ void set_kernel(bool kernel) { m_kernel = kernel; }
+
PageFaultResponse handle_fault(const PageFault&);
NonnullOwnPtr<Region> clone();
@@ -178,7 +181,7 @@ public:
Region* m_prev { nullptr };
// NOTE: These are public so we can make<> them.
- Region(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const String&, u8 access, bool cacheable);
+ Region(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const String&, u8 access, bool cacheable, bool kernel);
void set_inherit_mode(InheritMode inherit_mode) { m_inherit_mode = inherit_mode; }
@@ -211,6 +214,7 @@ private:
bool m_cacheable : 1 { false };
bool m_stack : 1 { false };
bool m_mmap : 1 { false };
+ bool m_kernel : 1 { false };
mutable OwnPtr<Bitmap> m_cow_map;
};