summaryrefslogtreecommitdiff
path: root/Kernel
diff options
context:
space:
mode:
authorAndreas Kling <awesomekling@gmail.com>2019-07-19 16:09:34 +0200
committerAndreas Kling <awesomekling@gmail.com>2019-07-19 16:11:52 +0200
commit5b2447a27be235d6f0a0debb4e53bade05d32a75 (patch)
tree9b25527e4a8aaf6b34e98e2820024b4d5df1b98a /Kernel
parent4547a301c4c4ba761a73f58dccb76075f2fcba12 (diff)
downloadserenity-5b2447a27be235d6f0a0debb4e53bade05d32a75.zip
Kernel: Track user accessibility per Region.
Region now has is_user_accessible(), which informs the memory manager how to map these pages. Previously, we were just passing a "bool user_allowed" to various functions and I'm not at all sure that any of that was correct. All the Region constructors are now hidden, and you must go through one of these helpers to construct a region: - Region::create_user_accessible(...) - Region::create_kernel_only(...) That ensures that we don't accidentally create a Region without specifying user accessibility. :^)
Diffstat (limited to 'Kernel')
-rw-r--r--Kernel/Process.cpp8
-rw-r--r--Kernel/Process.h2
-rw-r--r--Kernel/VM/MemoryManager.cpp29
-rw-r--r--Kernel/VM/MemoryManager.h4
-rw-r--r--Kernel/VM/Region.cpp45
-rw-r--r--Kernel/VM/Region.h15
6 files changed, 73 insertions, 30 deletions
diff --git a/Kernel/Process.cpp b/Kernel/Process.cpp
index a119dad55b..723b01b18e 100644
--- a/Kernel/Process.cpp
+++ b/Kernel/Process.cpp
@@ -99,19 +99,19 @@ Region* Process::allocate_region(VirtualAddress vaddr, size_t size, const String
auto range = allocate_range(vaddr, size);
if (!range.is_valid())
return nullptr;
- m_regions.append(adopt(*new Region(range, move(name), prot_to_region_access_flags(prot))));
+ m_regions.append(Region::create_user_accessible(range, name, prot_to_region_access_flags(prot)));
MM.map_region(*this, m_regions.last());
if (commit)
m_regions.last().commit();
return &m_regions.last();
}
-Region* Process::allocate_file_backed_region(VirtualAddress vaddr, size_t size, RefPtr<Inode>&& inode, const String& name, int prot)
+Region* Process::allocate_file_backed_region(VirtualAddress vaddr, size_t size, NonnullRefPtr<Inode> inode, const String& name, int prot)
{
auto range = allocate_range(vaddr, size);
if (!range.is_valid())
return nullptr;
- m_regions.append(adopt(*new Region(range, move(inode), name, prot_to_region_access_flags(prot))));
+ m_regions.append(Region::create_user_accessible(range, inode, name, prot_to_region_access_flags(prot)));
MM.map_region(*this, m_regions.last());
return &m_regions.last();
}
@@ -122,7 +122,7 @@ Region* Process::allocate_region_with_vmo(VirtualAddress vaddr, size_t size, Non
if (!range.is_valid())
return nullptr;
offset_in_vmo &= PAGE_MASK;
- m_regions.append(adopt(*new Region(range, move(vmo), offset_in_vmo, name, prot_to_region_access_flags(prot))));
+ m_regions.append(Region::create_user_accessible(range, move(vmo), offset_in_vmo, name, prot_to_region_access_flags(prot)));
MM.map_region(*this, m_regions.last());
return &m_regions.last();
}
diff --git a/Kernel/Process.h b/Kernel/Process.h
index d5e3c8e239..5e02e738f6 100644
--- a/Kernel/Process.h
+++ b/Kernel/Process.h
@@ -253,7 +253,7 @@ public:
bool is_superuser() const { return m_euid == 0; }
Region* allocate_region_with_vmo(VirtualAddress, size_t, NonnullRefPtr<VMObject>, size_t offset_in_vmo, const String& name, int prot);
- Region* allocate_file_backed_region(VirtualAddress, size_t, RefPtr<Inode>&&, const String& name, int prot);
+ Region* allocate_file_backed_region(VirtualAddress, size_t, NonnullRefPtr<Inode>, const String& name, int prot);
Region* allocate_region(VirtualAddress, size_t, const String& name, int prot = PROT_READ | PROT_WRITE, bool commit = true);
bool deallocate_region(Region& region);
diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp
index adccf9a7a7..aa58797031 100644
--- a/Kernel/VM/MemoryManager.cpp
+++ b/Kernel/VM/MemoryManager.cpp
@@ -305,7 +305,7 @@ bool MemoryManager::zero_page(Region& region, unsigned page_index_in_region)
#ifdef PAGE_FAULT_DEBUG
dbgprintf("MM: zero_page() but page already present. Fine with me!\n");
#endif
- remap_region_page(region, page_index_in_region, true);
+ remap_region_page(region, page_index_in_region);
return true;
}
auto physical_page = allocate_user_physical_page(ShouldZeroFill::Yes);
@@ -314,7 +314,7 @@ bool MemoryManager::zero_page(Region& region, unsigned page_index_in_region)
#endif
region.set_should_cow(page_index_in_region, false);
vmo.physical_pages()[page_index_in_region] = move(physical_page);
- remap_region_page(region, page_index_in_region, true);
+ remap_region_page(region, page_index_in_region);
return true;
}
@@ -327,7 +327,7 @@ bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region)
dbgprintf(" >> It's a COW page but nobody is sharing it anymore. Remap r/w\n");
#endif
region.set_should_cow(page_index_in_region, false);
- remap_region_page(region, page_index_in_region, true);
+ remap_region_page(region, page_index_in_region);
return true;
}
@@ -345,7 +345,7 @@ bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region)
vmo.physical_pages()[page_index_in_region] = move(physical_page);
unquickmap_page();
region.set_should_cow(page_index_in_region, false);
- remap_region_page(region, page_index_in_region, true);
+ remap_region_page(region, page_index_in_region);
return true;
}
@@ -366,7 +366,7 @@ bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_re
if (!vmo_page.is_null()) {
dbgprintf("MM: page_in_from_inode() but page already present. Fine with me!\n");
- remap_region_page(region, page_index_in_region, true);
+ remap_region_page(region, page_index_in_region);
return true;
}
@@ -391,7 +391,7 @@ bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_re
kprintf("MM: page_in_from_inode was unable to allocate a physical page\n");
return false;
}
- remap_region_page(region, page_index_in_region, true);
+ remap_region_page(region, page_index_in_region);
u8* dest_ptr = region.vaddr().offset(page_index_in_region * PAGE_SIZE).as_ptr();
memcpy(dest_ptr, page_buffer, PAGE_SIZE);
return true;
@@ -457,8 +457,9 @@ RefPtr<Region> MemoryManager::allocate_kernel_region(size_t size, String&& name)
ASSERT(!(size % PAGE_SIZE));
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
ASSERT(range.is_valid());
- auto region = adopt(*new Region(range, move(name), PROT_READ | PROT_WRITE | PROT_EXEC, false));
- MM.map_region_at_address(*m_kernel_page_directory, *region, range.base(), false);
+ auto region = Region::create_kernel_only(range, move(name), PROT_READ | PROT_WRITE | PROT_EXEC, false);
+ region->set_user_accessible(false);
+ MM.map_region_at_address(*m_kernel_page_directory, *region, range.base());
// FIXME: It would be cool if these could zero-fill on demand instead.
region->commit();
return region;
@@ -641,7 +642,7 @@ void MemoryManager::unquickmap_page()
m_quickmap_in_use = false;
}
-void MemoryManager::remap_region_page(Region& region, unsigned page_index_in_region, bool user_allowed)
+void MemoryManager::remap_region_page(Region& region, unsigned page_index_in_region)
{
ASSERT(region.page_directory());
InterruptDisabler disabler;
@@ -657,7 +658,7 @@ void MemoryManager::remap_region_page(Region& region, unsigned page_index_in_reg
pte.set_writable(region.is_writable());
pte.set_cache_disabled(!region.vmo().m_allow_cpu_caching);
pte.set_write_through(!region.vmo().m_allow_cpu_caching);
- pte.set_user_allowed(user_allowed);
+ pte.set_user_allowed(region.is_user_accessible());
region.page_directory()->flush(page_vaddr);
#ifdef MM_DEBUG
dbgprintf("MM: >> remap_region_page (PD=%x, PTE=P%x) '%s' L%x => P%x (@%p)\n", region.page_directory()->cr3(), pte.ptr(), region.name().characters(), page_vaddr.get(), physical_page->paddr().get(), physical_page.ptr());
@@ -668,10 +669,10 @@ void MemoryManager::remap_region(PageDirectory& page_directory, Region& region)
{
InterruptDisabler disabler;
ASSERT(region.page_directory() == &page_directory);
- map_region_at_address(page_directory, region, region.vaddr(), true);
+ map_region_at_address(page_directory, region, region.vaddr());
}
-void MemoryManager::map_region_at_address(PageDirectory& page_directory, Region& region, VirtualAddress vaddr, bool user_allowed)
+void MemoryManager::map_region_at_address(PageDirectory& page_directory, Region& region, VirtualAddress vaddr)
{
InterruptDisabler disabler;
region.set_page_directory(page_directory);
@@ -698,7 +699,7 @@ void MemoryManager::map_region_at_address(PageDirectory& page_directory, Region&
pte.set_present(false);
pte.set_writable(region.is_writable());
}
- pte.set_user_allowed(user_allowed);
+ pte.set_user_allowed(region.is_user_accessible());
page_directory.flush(page_vaddr);
#ifdef MM_DEBUG
dbgprintf("MM: >> map_region_at_address (PD=%x) '%s' L%x => P%x (@%p)\n", &page_directory, region.name().characters(), page_vaddr, physical_page ? physical_page->paddr().get() : 0, physical_page.ptr());
@@ -729,7 +730,7 @@ bool MemoryManager::unmap_region(Region& region)
bool MemoryManager::map_region(Process& process, Region& region)
{
- map_region_at_address(process.page_directory(), region, region.vaddr(), true);
+ map_region_at_address(process.page_directory(), region, region.vaddr());
return true;
}
diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h
index 002911baae..cfca8387ce 100644
--- a/Kernel/VM/MemoryManager.h
+++ b/Kernel/VM/MemoryManager.h
@@ -71,7 +71,7 @@ public:
void map_for_kernel(VirtualAddress, PhysicalAddress);
RefPtr<Region> allocate_kernel_region(size_t, String&& name);
- void map_region_at_address(PageDirectory&, Region&, VirtualAddress, bool user_accessible);
+ void map_region_at_address(PageDirectory&, Region&, VirtualAddress);
unsigned user_physical_pages() const { return m_user_physical_pages; }
unsigned user_physical_pages_used() const { return m_user_physical_pages_used; }
@@ -87,7 +87,7 @@ private:
void register_region(Region&);
void unregister_region(Region&);
- void remap_region_page(Region&, unsigned page_index_in_region, bool user_allowed);
+ void remap_region_page(Region&, unsigned page_index_in_region);
void initialize_paging();
void flush_entire_tlb();
diff --git a/Kernel/VM/Region.cpp b/Kernel/VM/Region.cpp
index d0c8426239..20db77fd4e 100644
--- a/Kernel/VM/Region.cpp
+++ b/Kernel/VM/Region.cpp
@@ -1,3 +1,4 @@
+#include <Kernel/FileSystem/Inode.h>
#include <Kernel/Process.h>
#include <Kernel/Thread.h>
#include <Kernel/VM/MemoryManager.h>
@@ -15,12 +16,12 @@ Region::Region(const Range& range, const String& name, u8 access, bool cow)
MM.register_region(*this);
}
-Region::Region(const Range& range, RefPtr<Inode>&& inode, const String& name, u8 access)
+Region::Region(const Range& range, RefPtr<Inode>&& inode, const String& name, u8 access, bool cow)
: m_range(range)
, m_vmo(VMObject::create_file_backed(move(inode)))
, m_name(name)
, m_access(access)
- , m_cow_map(Bitmap::create(m_vmo->page_count()))
+ , m_cow_map(Bitmap::create(m_vmo->page_count(), cow))
{
MM.register_region(*this);
}
@@ -61,7 +62,7 @@ bool Region::page_in()
return false;
continue;
}
- MM.remap_region_page(*this, i, true);
+ MM.remap_region_page(*this, i);
}
return true;
}
@@ -69,6 +70,10 @@ bool Region::page_in()
NonnullRefPtr<Region> Region::clone()
{
ASSERT(current);
+
+ // NOTE: Kernel-only regions should never be cloned.
+ ASSERT(is_user_accessible());
+
if (m_shared || (is_readable() && !is_writable())) {
#ifdef MM_DEBUG
dbgprintf("%s<%u> Region::clone(): sharing %s (L%x)\n",
@@ -78,7 +83,7 @@ NonnullRefPtr<Region> Region::clone()
vaddr().get());
#endif
// Create a new region backed by the same VMObject.
- return adopt(*new Region(m_range, m_vmo, m_offset_in_vmo, String(m_name), m_access));
+ return Region::create_user_accessible(m_range, m_vmo, m_offset_in_vmo, m_name, m_access);
}
#ifdef MM_DEBUG
@@ -91,7 +96,7 @@ NonnullRefPtr<Region> Region::clone()
// Set up a COW region. The parent (this) region becomes COW as well!
m_cow_map.fill(true);
MM.remap_region(current->process().page_directory(), *this);
- return adopt(*new Region(m_range, m_vmo->clone(), m_offset_in_vmo, String(m_name), m_access, true));
+ return Region::create_user_accessible(m_range, m_vmo->clone(), m_offset_in_vmo, m_name, m_access, true);
}
int Region::commit()
@@ -109,7 +114,7 @@ int Region::commit()
return -ENOMEM;
}
vmo().physical_pages()[i] = move(physical_page);
- MM.remap_region_page(*this, i, true);
+ MM.remap_region_page(*this, i);
}
return 0;
}
@@ -134,3 +139,31 @@ size_t Region::amount_shared() const
}
return bytes;
}
+
+NonnullRefPtr<Region> Region::create_user_accessible(const Range& range, const StringView& name, u8 access, bool cow)
+{
+ auto region = adopt(*new Region(range, name, access, cow));
+ region->m_user_accessible = true;
+ return region;
+}
+
+NonnullRefPtr<Region> Region::create_user_accessible(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const StringView& name, u8 access, bool cow)
+{
+ auto region = adopt(*new Region(range, move(vmobject), offset_in_vmobject, name, access, cow));
+ region->m_user_accessible = true;
+ return region;
+}
+
+NonnullRefPtr<Region> Region::create_user_accessible(const Range& range, NonnullRefPtr<Inode> inode, const StringView& name, u8 access, bool cow)
+{
+ auto region = adopt(*new Region(range, move(inode), name, access, cow));
+ region->m_user_accessible = true;
+ return region;
+}
+
+NonnullRefPtr<Region> Region::create_kernel_only(const Range& range, const StringView& name, u8 access, bool cow)
+{
+ auto region = adopt(*new Region(range, name, access, cow));
+ region->m_user_accessible = false;
+ return region;
+}
diff --git a/Kernel/VM/Region.h b/Kernel/VM/Region.h
index c5c26d5c2c..32a24f293a 100644
--- a/Kernel/VM/Region.h
+++ b/Kernel/VM/Region.h
@@ -18,9 +18,11 @@ public:
Execute = 4,
};
- Region(const Range&, const String&, u8 access, bool cow = false);
- Region(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmo, const String&, u8 access, bool cow = false);
- Region(const Range&, RefPtr<Inode>&&, const String&, u8 access);
+ static NonnullRefPtr<Region> create_user_accessible(const Range&, const StringView& name, u8 access, bool cow = false);
+ static NonnullRefPtr<Region> create_user_accessible(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const StringView& name, u8 access, bool cow = false);
+ static NonnullRefPtr<Region> create_user_accessible(const Range&, NonnullRefPtr<Inode>, const StringView& name, u8 access, bool cow = false);
+ static NonnullRefPtr<Region> create_kernel_only(const Range&, const StringView& name, u8 access, bool cow = false);
+
~Region();
VirtualAddress vaddr() const { return m_range.base(); }
@@ -38,6 +40,8 @@ public:
bool is_shared() const { return m_shared; }
void set_shared(bool shared) { m_shared = shared; }
+ bool is_user_accessible() const { return m_user_accessible; }
+
NonnullRefPtr<Region> clone();
bool contains(VirtualAddress vaddr) const
@@ -97,6 +101,10 @@ public:
}
private:
+ Region(const Range&, const String&, u8 access, bool cow = false);
+ Region(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmo, const String&, u8 access, bool cow = false);
+ Region(const Range&, RefPtr<Inode>&&, const String&, u8 access, bool cow = false);
+
RefPtr<PageDirectory> m_page_directory;
Range m_range;
size_t m_offset_in_vmo { 0 };
@@ -104,5 +112,6 @@ private:
String m_name;
u8 m_access { 0 };
bool m_shared { false };
+ bool m_user_accessible { false };
Bitmap m_cow_map;
};