diff options
author | Andreas Kling <awesomekling@gmail.com> | 2018-11-01 11:30:48 +0100 |
---|---|---|
committer | Andreas Kling <awesomekling@gmail.com> | 2018-11-01 11:36:25 +0100 |
commit | c45f166c6323224ea1ffc20914a003ef96d3a72b (patch) | |
tree | 4f24a21b4194cb86b127b2574817ca67d6607ded /Kernel/MemoryManager.h | |
parent | 1da0a7c949d03c7cb96208b7779e48f5241371ae (diff) | |
download | serenity-c45f166c6323224ea1ffc20914a003ef96d3a72b.zip |
More work on per-process page directories. It basically works now!
I spent some time stuck on a problem where processes would clobber each
other's stacks. Took me a moment to figure out that their stacks
were allocated in the sub-4MB linear address range which is shared
between all processes. Oops!
Diffstat (limited to 'Kernel/MemoryManager.h')
-rw-r--r-- | Kernel/MemoryManager.h | 18 |
1 files changed, 15 insertions, 3 deletions
diff --git a/Kernel/MemoryManager.h b/Kernel/MemoryManager.h index 6a2f5a8c99..34d173d343 100644 --- a/Kernel/MemoryManager.h +++ b/Kernel/MemoryManager.h @@ -41,7 +41,7 @@ class MemoryManager { public: static MemoryManager& the() PURE; - PhysicalAddress pageDirectoryBase() const { return PhysicalAddress(reinterpret_cast<dword>(m_pageDirectory)); } + PhysicalAddress pageDirectoryBase() const { return PhysicalAddress(reinterpret_cast<dword>(m_kernel_page_directory)); } static void initialize(); @@ -65,12 +65,22 @@ public: void registerZone(Zone&); void unregisterZone(Zone&); - void populatePageDirectory(Task&); + void populate_page_directory(Task&); + + byte* create_kernel_alias_for_region(Task::Region&); + void remove_kernel_alias_for_region(Task::Region&, byte*); + + void enter_kernel_paging_scope(); + void enter_task_paging_scope(Task&); private: MemoryManager(); ~MemoryManager(); + LinearAddress allocate_linear_address_range(size_t); + void map_region_at_address(dword* page_directory, Task::Region&, LinearAddress); + void unmap_range(dword* page_directory, LinearAddress, size_t); + void initializePaging(); void flushEntireTLB(); void flushTLB(LinearAddress); @@ -162,10 +172,12 @@ private: PageTableEntry ensurePTE(dword* pageDirectory, LinearAddress); - dword* m_pageDirectory; + dword* m_kernel_page_directory; dword* m_pageTableZero; dword* m_pageTableOne; + LinearAddress m_next_laddr; + HashTable<Zone*> m_zones; Vector<PhysicalAddress> m_freePages; |