summaryrefslogtreecommitdiff
path: root/Kernel/MemoryManager.h
diff options
context:
space:
mode:
authorAndreas Kling <awesomekling@gmail.com>2019-01-01 02:09:43 +0100
committerAndreas Kling <awesomekling@gmail.com>2019-01-01 02:09:43 +0100
commit683185e4aa148c91c2963712f76f693ff363e9db (patch)
treea479760f620d6724240b30d501198b062b50665e /Kernel/MemoryManager.h
parenta5ffa2eec796c0e4a856cd718000e4657cd4b213 (diff)
downloadserenity-683185e4aa148c91c2963712f76f693ff363e9db.zip
MM: Allocate page tables from a separate set of physical pages.
The old approach only worked because of an overpermissive accident. There's now a concept of supervisor physical pages that can be allocated. They all sit in the low 4 MB of physical memory and are identity mapped, shared between all processes, and only ring 0 can access them.
Diffstat (limited to 'Kernel/MemoryManager.h')
-rw-r--r--Kernel/MemoryManager.h26
1 files changed, 9 insertions, 17 deletions
diff --git a/Kernel/MemoryManager.h b/Kernel/MemoryManager.h
index 1e29a2254a..c5732a3ea8 100644
--- a/Kernel/MemoryManager.h
+++ b/Kernel/MemoryManager.h
@@ -39,15 +39,16 @@ public:
return_to_freelist();
}
- unsigned retain_count() const { return m_retain_count; }
+ unsigned short retain_count() const { return m_retain_count; }
private:
- explicit PhysicalPage(PhysicalAddress paddr);
+ PhysicalPage(PhysicalAddress paddr, bool supervisor);
~PhysicalPage() = delete;
void return_to_freelist();
- unsigned m_retain_count { 1 };
+ unsigned short m_retain_count { 1 };
+ bool m_supervisor { false };
PhysicalAddress m_paddr;
};
@@ -174,16 +175,13 @@ public:
void populate_page_directory(PageDirectory&);
- byte* create_kernel_alias_for_region(Region&);
- void remove_kernel_alias_for_region(Region&, byte*);
-
- void enter_kernel_paging_scope();
void enter_process_paging_scope(Process&);
bool validate_user_read(const Process&, LinearAddress) const;
bool validate_user_write(const Process&, LinearAddress) const;
RetainPtr<PhysicalPage> allocate_physical_page();
+ RetainPtr<PhysicalPage> allocate_supervisor_physical_page();
void remap_region(Process&, Region&);
@@ -196,7 +194,6 @@ private:
void register_region(Region&);
void unregister_region(Region&);
- LinearAddress allocate_linear_address_range(size_t);
void map_region_at_address(PageDirectory&, Region&, LinearAddress, bool user_accessible);
void unmap_range(PageDirectory&, LinearAddress, size_t);
void remap_region_page(PageDirectory&, Region&, unsigned page_index_in_region, bool user_allowed);
@@ -209,8 +206,8 @@ private:
void map_protected(LinearAddress, size_t length);
- void create_identity_mapping(LinearAddress, size_t length);
- void remove_identity_mapping(LinearAddress, size_t);
+ void create_identity_mapping(PageDirectory&, LinearAddress, size_t length);
+ void remove_identity_mapping(PageDirectory&, LinearAddress, size_t);
static Region* region_from_laddr(Process&, LinearAddress);
@@ -305,21 +302,16 @@ private:
OwnPtr<PageDirectory> m_kernel_page_directory;
dword* m_page_table_zero;
- dword* m_page_table_one;
- LinearAddress m_next_laddr;
+ LinearAddress m_quickmap_addr;
Vector<RetainPtr<PhysicalPage>> m_free_physical_pages;
+ Vector<RetainPtr<PhysicalPage>> m_free_supervisor_physical_pages;
HashTable<VMObject*> m_vmos;
HashTable<Region*> m_regions;
};
-struct KernelPagingScope {
- KernelPagingScope() { MM.enter_kernel_paging_scope(); }
- ~KernelPagingScope() { MM.enter_process_paging_scope(*current); }
-};
-
struct ProcessPagingScope {
ProcessPagingScope(Process& process) { MM.enter_process_paging_scope(process); }
~ProcessPagingScope() { MM.enter_process_paging_scope(*current); }