diff options
author | Liav A <liavalb@gmail.com> | 2020-01-09 23:29:31 +0200 |
---|---|---|
committer | Andreas Kling <awesomekling@gmail.com> | 2020-01-14 15:38:58 +0100 |
commit | d2b41010c550e7574698dc7e74c6f24958ef1737 (patch) | |
tree | 4b7d4e29d01f96a9e2e881a6f5f9a87fd57d7b27 /Kernel/VM | |
parent | b913e300111c6dc403a8d0d6691890de14ea9de7 (diff) | |
download | serenity-d2b41010c550e7574698dc7e74c6f24958ef1737.zip |
Kernel: Change Region allocation helpers
We now can create a cacheable Region, so when map() is called, if a
Region is cacheable then all the virtual memory space being allocated
to it will be marked as not cache disabled.
In addition to that, OS components can create a Region that will be
mapped to a specific physical address by using the appropriate helper
method.
Diffstat (limited to 'Kernel/VM')
-rw-r--r-- | Kernel/VM/MemoryManager.cpp | 40 | ||||
-rw-r--r-- | Kernel/VM/MemoryManager.h | 7 | ||||
-rw-r--r-- | Kernel/VM/PageDirectory.cpp | 13 | ||||
-rw-r--r-- | Kernel/VM/PageDirectory.h | 2 | ||||
-rw-r--r-- | Kernel/VM/Region.cpp | 43 | ||||
-rw-r--r-- | Kernel/VM/Region.h | 19 |
6 files changed, 73 insertions, 51 deletions
diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index 3cde372f64..c764a4c133 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -321,7 +321,7 @@ void MemoryManager::create_identity_mapping(PageDirectory& page_directory, Virtu pte.set_user_allowed(false); pte.set_present(true); pte.set_writable(true); - page_directory.flush(pte_address); + flush_tlb(pte_address); } } @@ -394,7 +394,7 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault) return region->handle_fault(fault); } -OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringView& name, u8 access, bool user_accessible, bool should_commit) +OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringView& name, u8 access, bool user_accessible, bool should_commit, bool cacheable) { InterruptDisabler disabler; ASSERT(!(size % PAGE_SIZE)); @@ -402,28 +402,47 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringVi ASSERT(range.is_valid()); OwnPtr<Region> region; if (user_accessible) - region = Region::create_user_accessible(range, name, access); + region = Region::create_user_accessible(range, name, access, cacheable); else - region = Region::create_kernel_only(range, name, access); - region->map(kernel_page_directory()); + region = Region::create_kernel_only(range, name, access, cacheable); + region->set_page_directory(kernel_page_directory()); // FIXME: It would be cool if these could zero-fill on demand instead. if (should_commit) region->commit(); return region; } -OwnPtr<Region> MemoryManager::allocate_user_accessible_kernel_region(size_t size, const StringView& name, u8 access) +OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable) { - return allocate_kernel_region(size, name, access, true); + InterruptDisabler disabler; + ASSERT(!(size % PAGE_SIZE)); + auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); + ASSERT(range.is_valid()); + OwnPtr<Region> region; + if (user_accessible) + region = Region::create_user_accessible(range, AnonymousVMObject::create_for_physical_range(paddr, size), 0, name, access, cacheable); + else + region = Region::create_kernel_only(range, AnonymousVMObject::create_for_physical_range(paddr, size), 0, name, access, cacheable); + region->map(kernel_page_directory()); + return region; } -OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, const StringView& name, u8 access) +OwnPtr<Region> MemoryManager::allocate_user_accessible_kernel_region(size_t size, const StringView& name, u8 access, bool cacheable) +{ + return allocate_kernel_region(size, name, access, true, true, cacheable); +} + +OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable) { InterruptDisabler disabler; ASSERT(!(size % PAGE_SIZE)); auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); ASSERT(range.is_valid()); - auto region = make<Region>(range, vmobject, 0, name, access); + OwnPtr<Region> region; + if (user_accessible) + region = Region::create_user_accessible(range, vmobject, 0, name, access, cacheable); + else + region = Region::create_kernel_only(range, vmobject, 0, name, access, cacheable); region->map(kernel_page_directory()); return region; } @@ -573,6 +592,9 @@ void MemoryManager::flush_entire_tlb() void MemoryManager::flush_tlb(VirtualAddress vaddr) { +#ifdef MM_DEBUG + dbgprintf("MM: Flush page V%p\n", vaddr.get()); +#endif asm volatile("invlpg %0" : : "m"(*(char*)vaddr.get()) diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h index 282bfd13ae..d3af2325af 100644 --- a/Kernel/VM/MemoryManager.h +++ b/Kernel/VM/MemoryManager.h @@ -62,9 +62,10 @@ public: void map_for_kernel(VirtualAddress, PhysicalAddress, bool cache_disabled = false); - OwnPtr<Region> allocate_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, bool should_commit = true); - OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, const StringView& name, u8 access); - OwnPtr<Region> allocate_user_accessible_kernel_region(size_t, const StringView& name, u8 access); + OwnPtr<Region> allocate_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, bool should_commit = true, bool cacheable = true); + OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = false); + OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = false); + OwnPtr<Region> allocate_user_accessible_kernel_region(size_t, const StringView& name, u8 access, bool cacheable = false); unsigned user_physical_pages() const { return m_user_physical_pages; } unsigned user_physical_pages_used() const { return m_user_physical_pages_used; } diff --git a/Kernel/VM/PageDirectory.cpp b/Kernel/VM/PageDirectory.cpp index 729cf4c9be..de230e8c35 100644 --- a/Kernel/VM/PageDirectory.cpp +++ b/Kernel/VM/PageDirectory.cpp @@ -74,15 +74,4 @@ PageDirectory::~PageDirectory() #endif InterruptDisabler disabler; cr3_map().remove(cr3()); -} - -void PageDirectory::flush(VirtualAddress vaddr) -{ -#ifdef MM_DEBUG - dbgprintf("MM: Flush page V%p\n", vaddr.get()); -#endif - if (!current) - return; - if (this == &MM.kernel_page_directory() || ¤t->process().page_directory() == this) - MM.flush_tlb(vaddr); -} +}
\ No newline at end of file diff --git a/Kernel/VM/PageDirectory.h b/Kernel/VM/PageDirectory.h index 2d70eb0fe1..53b4d5d410 100644 --- a/Kernel/VM/PageDirectory.h +++ b/Kernel/VM/PageDirectory.h @@ -24,8 +24,6 @@ public: u32 cr3() const { return m_directory_table->paddr().get(); } PageDirectoryPointerTable& table() { return *reinterpret_cast<PageDirectoryPointerTable*>(cr3()); } - void flush(VirtualAddress); - RangeAllocator& range_allocator() { return m_range_allocator; } Process* process() { return m_process; } diff --git a/Kernel/VM/Region.cpp b/Kernel/VM/Region.cpp index 244103a4f2..f189089cb2 100644 --- a/Kernel/VM/Region.cpp +++ b/Kernel/VM/Region.cpp @@ -9,30 +9,33 @@ //#define MM_DEBUG //#define PAGE_FAULT_DEBUG -Region::Region(const Range& range, const String& name, u8 access) +Region::Region(const Range& range, const String& name, u8 access, bool cacheable) : m_range(range) , m_vmobject(AnonymousVMObject::create_with_size(size())) , m_name(name) , m_access(access) + , m_cacheable(cacheable) { MM.register_region(*this); } -Region::Region(const Range& range, NonnullRefPtr<Inode> inode, const String& name, u8 access) +Region::Region(const Range& range, NonnullRefPtr<Inode> inode, const String& name, u8 access, bool cacheable) : m_range(range) , m_vmobject(InodeVMObject::create_with_inode(*inode)) , m_name(name) , m_access(access) + , m_cacheable(cacheable) { MM.register_region(*this); } -Region::Region(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const String& name, u8 access) +Region::Region(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const String& name, u8 access, bool cacheable) : m_range(range) , m_offset_in_vmobject(offset_in_vmobject) , m_vmobject(move(vmobject)) , m_name(name) , m_access(access) + , m_cacheable(cacheable) { MM.register_region(*this); } @@ -164,37 +167,37 @@ size_t Region::amount_shared() const return bytes; } -NonnullOwnPtr<Region> Region::create_user_accessible(const Range& range, const StringView& name, u8 access) +NonnullOwnPtr<Region> Region::create_user_accessible(const Range& range, const StringView& name, u8 access, bool cacheable) { - auto region = make<Region>(range, name, access); + auto region = make<Region>(range, name, access, cacheable); region->m_user_accessible = true; return region; } -NonnullOwnPtr<Region> Region::create_user_accessible(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const StringView& name, u8 access) +NonnullOwnPtr<Region> Region::create_user_accessible(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const StringView& name, u8 access, bool cacheable) { - auto region = make<Region>(range, move(vmobject), offset_in_vmobject, name, access); + auto region = make<Region>(range, move(vmobject), offset_in_vmobject, name, access, cacheable); region->m_user_accessible = true; return region; } -NonnullOwnPtr<Region> Region::create_user_accessible(const Range& range, NonnullRefPtr<Inode> inode, const StringView& name, u8 access) +NonnullOwnPtr<Region> Region::create_user_accessible(const Range& range, NonnullRefPtr<Inode> inode, const StringView& name, u8 access, bool cacheable) { - auto region = make<Region>(range, move(inode), name, access); + auto region = make<Region>(range, move(inode), name, access, cacheable); region->m_user_accessible = true; return region; } -NonnullOwnPtr<Region> Region::create_kernel_only(const Range& range, const StringView& name, u8 access) +NonnullOwnPtr<Region> Region::create_kernel_only(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const StringView& name, u8 access, bool cacheable) { - auto region = make<Region>(range, name, access); + auto region = make<Region>(range, move(vmobject), offset_in_vmobject, name, access, cacheable); region->m_user_accessible = false; return region; } -NonnullOwnPtr<Region> Region::create_kernel_only(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const StringView& name, u8 access) +NonnullOwnPtr<Region> Region::create_kernel_only(const Range& range, const StringView& name, u8 access, bool cacheable) { - auto region = make<Region>(range, move(vmobject), offset_in_vmobject, name, access); + auto region = make<Region>(range, name, access, cacheable); region->m_user_accessible = false; return region; } @@ -228,6 +231,7 @@ void Region::map_individual_page_impl(size_t page_index) pte.set_physical_page_base(0); pte.set_present(false); } else { + pte.set_cache_disabled(!m_cacheable); pte.set_physical_page_base(physical_page->paddr().get()); pte.set_present(is_readable()); if (should_cow(page_index)) @@ -237,11 +241,11 @@ void Region::map_individual_page_impl(size_t page_index) if (g_cpu_supports_nx) pte.set_execute_disabled(!is_executable()); pte.set_user_allowed(is_user_accessible()); - } - m_page_directory->flush(page_vaddr); #ifdef MM_DEBUG dbg() << "MM: >> region map (PD=" << m_page_directory->cr3() << ", PTE=" << (void*)pte.raw() << "{" << &pte << "}) " << name() << " " << page_vaddr << " => " << physical_page->paddr() << " (@" << physical_page.ptr() << ")"; #endif + } + MM.flush_tlb(page_vaddr); } void Region::remap_page(size_t page_index) @@ -263,7 +267,7 @@ void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range) pte.set_present(false); pte.set_writable(false); pte.set_user_allowed(false); - m_page_directory->flush(vaddr); + MM.flush_tlb(vaddr); #ifdef MM_DEBUG auto& physical_page = vmobject().physical_pages()[first_page_index() + i]; dbgprintf("MM: >> Unmapped V%p => P%p <<\n", vaddr.get(), physical_page ? physical_page->paddr().get() : 0); @@ -274,11 +278,16 @@ void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range) m_page_directory = nullptr; } -void Region::map(PageDirectory& page_directory) +void Region::set_page_directory(PageDirectory& page_directory) { ASSERT(!m_page_directory || m_page_directory == &page_directory); InterruptDisabler disabler; m_page_directory = page_directory; +} +void Region::map(PageDirectory& page_directory) +{ + set_page_directory(page_directory); + InterruptDisabler disabler; #ifdef MM_DEBUG dbgprintf("MM: Region::map() will map VMO pages %u - %u (VMO page count: %u)\n", first_page_index(), last_page_index(), vmobject().page_count()); #endif diff --git a/Kernel/VM/Region.h b/Kernel/VM/Region.h index 3af580af79..377344d348 100644 --- a/Kernel/VM/Region.h +++ b/Kernel/VM/Region.h @@ -26,11 +26,11 @@ public: Execute = 4, }; - static NonnullOwnPtr<Region> create_user_accessible(const Range&, const StringView& name, u8 access); - static NonnullOwnPtr<Region> create_user_accessible(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const StringView& name, u8 access); - static NonnullOwnPtr<Region> create_user_accessible(const Range&, NonnullRefPtr<Inode>, const StringView& name, u8 access); - static NonnullOwnPtr<Region> create_kernel_only(const Range&, const StringView& name, u8 access); - static NonnullOwnPtr<Region> create_kernel_only(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const StringView& name, u8 access); + static NonnullOwnPtr<Region> create_user_accessible(const Range&, const StringView& name, u8 access, bool cacheable = true); + static NonnullOwnPtr<Region> create_user_accessible(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const StringView& name, u8 access, bool cacheable = true); + static NonnullOwnPtr<Region> create_user_accessible(const Range&, NonnullRefPtr<Inode>, const StringView& name, u8 access, bool cacheable = true); + static NonnullOwnPtr<Region> create_kernel_only(const Range&, const StringView& name, u8 access, bool cacheable = true); + static NonnullOwnPtr<Region> create_kernel_only(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const StringView& name, u8 access, bool cacheable = true); ~Region(); @@ -40,6 +40,7 @@ public: bool is_readable() const { return m_access & Access::Read; } bool is_writable() const { return m_access & Access::Write; } bool is_executable() const { return m_access & Access::Execute; } + bool is_cacheable() const { return m_cacheable; } const String& name() const { return m_name; } unsigned access() const { return m_access; } @@ -115,6 +116,7 @@ public: void set_writable(bool b) { set_access_bit(Access::Write, b); } void set_executable(bool b) { set_access_bit(Access::Execute, b); } + void set_page_directory(PageDirectory&); void map(PageDirectory&); enum class ShouldDeallocateVirtualMemoryRange { No, @@ -130,9 +132,9 @@ public: Region* m_prev { nullptr }; // NOTE: These are public so we can make<> them. - Region(const Range&, const String&, u8 access); - Region(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const String&, u8 access); - Region(const Range&, NonnullRefPtr<Inode>, const String&, u8 access); + Region(const Range&, const String&, u8 access, bool cacheable); + Region(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const String&, u8 access, bool cacheable); + Region(const Range&, NonnullRefPtr<Inode>, const String&, u8 access, bool cacheable); private: Bitmap& ensure_cow_map() const; @@ -159,6 +161,7 @@ private: u8 m_access { 0 }; bool m_shared { false }; bool m_user_accessible { false }; + bool m_cacheable { false }; bool m_stack { false }; bool m_mmap { false }; mutable OwnPtr<Bitmap> m_cow_map; |