diff options
author | Andreas Kling <kling@serenityos.org> | 2021-01-11 22:08:50 +0100 |
---|---|---|
committer | Andreas Kling <kling@serenityos.org> | 2021-01-11 22:09:40 +0100 |
commit | f7435dd95f019941edc70c0ec3205489fbc708ca (patch) | |
tree | bcd89ff903a22702d5380bedbdcf818b65e7c77d /Kernel/VM | |
parent | 7c4ddecacbdb3d8279dbe346e7f7ba17fcdbc109 (diff) | |
download | serenity-f7435dd95f019941edc70c0ec3205489fbc708ca.zip |
Kernel: Remove MM_DEBUG debug spam code
This was too spammy to ever actually be used anyway.
Diffstat (limited to 'Kernel/VM')
-rw-r--r-- | Kernel/VM/MemoryManager.cpp | 43 | ||||
-rw-r--r-- | Kernel/VM/PageDirectory.cpp | 3 | ||||
-rw-r--r-- | Kernel/VM/PhysicalPage.cpp | 4 | ||||
-rw-r--r-- | Kernel/VM/Region.cpp | 23 |
4 files changed, 1 insertions, 72 deletions
diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index 742a267c1b..a21f854c00 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -41,7 +41,6 @@ #include <Kernel/VM/PhysicalRegion.h> #include <Kernel/VM/SharedInodeVMObject.h> -//#define MM_DEBUG //#define PAGE_FAULT_DEBUG extern u8* start_of_kernel_image; @@ -159,10 +158,6 @@ void MemoryManager::parse_memory_map() continue; } -#ifdef MM_DEBUG - klog() << "MM: considering memory at " << String::format("%p", (void*)mmap->addr) << " - " << String::format("%p", (void*)(mmap->addr + mmap->len)); -#endif - for (size_t page_base = mmap->addr; page_base <= (mmap->addr + mmap->len); page_base += PAGE_SIZE) { auto addr = PhysicalAddress(page_base); @@ -237,9 +232,6 @@ PageTableEntry* MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual auto* pd = quickmap_pd(page_directory, page_directory_table_index); PageDirectoryEntry& pde = pd[page_directory_index]; if (!pde.is_present()) { -#ifdef MM_DEBUG - dbg() << "MM: PDE " << page_directory_index << " not present (requested for " << vaddr << "), allocating"; -#endif bool did_purge = false; auto page_table = allocate_user_physical_page(ShouldZeroFill::Yes, &did_purge); if (!page_table) { @@ -255,9 +247,6 @@ PageTableEntry* MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual ASSERT(!pde.is_present()); // Should have not changed } -#ifdef MM_DEBUG - dbg() << "MM: PD K" << &page_directory << " (" << (&page_directory == m_kernel_page_directory ? "Kernel" : "User") << ") at " << PhysicalAddress(page_directory.cr3()) << " allocated page table #" << page_directory_index << " (for " << vaddr << ") at " << page_table->paddr(); -#endif pde.set_page_table_base(page_table->paddr().get()); pde.set_user_allowed(true); pde.set_present(true); @@ -303,9 +292,6 @@ void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress va auto result = page_directory.m_page_tables.remove(vaddr.get() & ~0x1fffff); ASSERT(result); -#ifdef MM_DEBUG - dbg() << "MM: Released page table for " << VirtualAddress(vaddr.get() & ~0x1fffff); -#endif } } } @@ -314,9 +300,6 @@ void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress va void MemoryManager::initialize(u32 cpu) { auto mm_data = new MemoryManagerData; -#ifdef MM_DEBUG - dbg() << "MM: Processor #" << cpu << " specific data at " << VirtualAddress(mm_data); -#endif Processor::current().set_mm_data(*mm_data); if (cpu == 0) { @@ -343,9 +326,6 @@ Region* MemoryManager::user_region_from_vaddr(Process& process, VirtualAddress v if (region.contains(vaddr)) return ®ion; } -#ifdef MM_DEBUG - dbg() << process << " Couldn't find user region for " << vaddr; -#endif return nullptr; } @@ -588,10 +568,6 @@ RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill s } } -#ifdef MM_DEBUG - dbg() << "MM: allocate_user_physical_page vending " << page->paddr(); -#endif - if (should_zero_fill == ShouldZeroFill::Yes) { auto* ptr = quickmap_page(*page); memset(ptr, 0, PAGE_SIZE); @@ -671,10 +647,6 @@ RefPtr<PhysicalPage> MemoryManager::allocate_supervisor_physical_page() return {}; } -#ifdef MM_DEBUG - dbg() << "MM: allocate_supervisor_physical_page vending " << page->paddr(); -#endif - fast_u32_fill((u32*)page->paddr().offset(0xc0000000).as_ptr(), 0, PAGE_SIZE / sizeof(u32)); ++m_super_physical_pages_used; return page; @@ -692,17 +664,11 @@ void MemoryManager::enter_process_paging_scope(Process& process) void MemoryManager::flush_tlb_local(VirtualAddress vaddr, size_t page_count) { -#ifdef MM_DEBUG - dbg() << "MM: Flush " << page_count << " pages at " << vaddr << " on CPU#" << Processor::current().id(); -#endif Processor::flush_tlb_local(vaddr, page_count); } void MemoryManager::flush_tlb(const PageDirectory* page_directory, VirtualAddress vaddr, size_t page_count) { -#ifdef MM_DEBUG - dbg() << "MM: Flush " << page_count << " pages at " << vaddr; -#endif Processor::flush_tlb(page_directory, vaddr, page_count); } @@ -715,9 +681,6 @@ PageDirectoryEntry* MemoryManager::quickmap_pd(PageDirectory& directory, size_t auto& pte = boot_pd3_pt1023[4]; auto pd_paddr = directory.m_directory_pages[pdpt_index]->paddr(); if (pte.physical_page_base() != pd_paddr.as_ptr()) { -#ifdef MM_DEBUG - dbg() << "quickmap_pd: Mapping P" << (void*)directory.m_directory_pages[pdpt_index]->paddr().as_ptr() << " at 0xffe04000 in pte @ " << &pte; -#endif pte.set_physical_page_base(pd_paddr.get()); pte.set_present(true); pte.set_writable(true); @@ -743,9 +706,6 @@ PageTableEntry* MemoryManager::quickmap_pt(PhysicalAddress pt_paddr) auto& mm_data = get_data(); auto& pte = boot_pd3_pt1023[0]; if (pte.physical_page_base() != pt_paddr.as_ptr()) { -#ifdef MM_DEBUG - dbg() << "quickmap_pt: Mapping P" << (void*)pt_paddr.as_ptr() << " at 0xffe00000 in pte @ " << &pte; -#endif pte.set_physical_page_base(pt_paddr.get()); pte.set_present(true); pte.set_writable(true); @@ -777,9 +737,6 @@ u8* MemoryManager::quickmap_page(PhysicalPage& physical_page) auto& pte = boot_pd3_pt1023[pte_idx]; if (pte.physical_page_base() != physical_page.paddr().as_ptr()) { -#ifdef MM_DEBUG - dbg() << "quickmap_page: Mapping P" << (void*)physical_page.paddr().as_ptr() << " at 0xffe08000 in pte @ " << &pte; -#endif pte.set_physical_page_base(physical_page.paddr().get()); pte.set_present(true); pte.set_writable(true); diff --git a/Kernel/VM/PageDirectory.cpp b/Kernel/VM/PageDirectory.cpp index b75d42261f..939853810f 100644 --- a/Kernel/VM/PageDirectory.cpp +++ b/Kernel/VM/PageDirectory.cpp @@ -150,9 +150,6 @@ PageDirectory::PageDirectory(Process& process, const RangeAllocator* parent_rang PageDirectory::~PageDirectory() { -#ifdef MM_DEBUG - dbg() << "MM: ~PageDirectory K" << this; -#endif ScopedSpinLock lock(s_mm_lock); if (m_process) cr3_map().remove(cr3()); diff --git a/Kernel/VM/PhysicalPage.cpp b/Kernel/VM/PhysicalPage.cpp index 50cbda1669..957ab9007d 100644 --- a/Kernel/VM/PhysicalPage.cpp +++ b/Kernel/VM/PhysicalPage.cpp @@ -50,10 +50,6 @@ void PhysicalPage::return_to_freelist() const MM.deallocate_supervisor_physical_page(*this); else MM.deallocate_user_physical_page(*this); - -#ifdef MM_DEBUG - dbgln("MM: {} released to freelist", m_paddr); -#endif } } diff --git a/Kernel/VM/Region.cpp b/Kernel/VM/Region.cpp index 215b3c05d4..007c207717 100644 --- a/Kernel/VM/Region.cpp +++ b/Kernel/VM/Region.cpp @@ -35,7 +35,6 @@ #include <Kernel/VM/Region.h> #include <Kernel/VM/SharedInodeVMObject.h> -//#define MM_DEBUG //#define PAGE_FAULT_DEBUG namespace Kernel { @@ -110,9 +109,6 @@ OwnPtr<Region> Region::clone(Process& new_owner) if (m_shared) { ASSERT(!m_stack); -#ifdef MM_DEBUG - dbg() << "Region::clone(): Sharing " << name() << " (" << vaddr() << ")"; -#endif if (vmobject().is_inode()) ASSERT(vmobject().is_shared_inode()); @@ -132,9 +128,6 @@ OwnPtr<Region> Region::clone(Process& new_owner) if (!vmobject_clone) return {}; -#ifdef MM_DEBUG - dbg() << "Region::clone(): CoWing " << name() << " (" << vaddr() << ")"; -#endif // Set up a COW region. The parent (this) region becomes COW as well! remap(); auto clone_region = Region::create_user_accessible(&new_owner, m_range, vmobject_clone.release_nonnull(), m_offset_in_vmobject, m_name, m_access); @@ -276,12 +269,8 @@ bool Region::map_individual_page_impl(size_t page_index) ASSERT(m_page_directory->get_lock().own_lock()); auto page_vaddr = vaddr_from_page_index(page_index); auto* pte = MM.ensure_pte(*m_page_directory, page_vaddr); - if (!pte) { -#ifdef MM_DEBUG - dbg() << "MM: >> region map (PD=" << m_page_directory->cr3() << " " << name() << " cannot create PTE for " << page_vaddr; -#endif + if (!pte) return false; - } auto* page = physical_page(page_index); if (!page || (!is_readable() && !is_writable())) { pte->clear(); @@ -296,9 +285,6 @@ bool Region::map_individual_page_impl(size_t page_index) if (Processor::current().has_feature(CPUFeature::NX)) pte->set_execute_disabled(!is_executable()); pte->set_user_allowed(is_user_accessible()); -#ifdef MM_DEBUG - dbg() << "MM: >> region map (PD=" << m_page_directory->cr3() << ", PTE=" << (void*)pte->raw() << "{" << pte << "}) " << name() << " " << page_vaddr << " => " << page->paddr() << " (@" << page << ")"; -#endif } return true; } @@ -405,9 +391,6 @@ bool Region::map(PageDirectory& page_directory) ScopedSpinLock lock(s_mm_lock); ScopedSpinLock page_lock(page_directory.get_lock()); set_page_directory(page_directory); -#ifdef MM_DEBUG - dbg() << "MM: Region::map() will map VMO pages " << first_page_index() << " - " << last_page_index() << " (VMO page count: " << vmobject().page_count() << ")"; -#endif size_t page_index = 0; while (page_index < page_count()) { if (!map_individual_page_impl(page_index)) @@ -576,10 +559,6 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region) if (current_thread) current_thread->did_inode_fault(); -#ifdef MM_DEBUG - dbgln("MM: page_in_from_inode ready to read from inode"); -#endif - u8 page_buffer[PAGE_SIZE]; auto& inode = inode_vmobject.inode(); auto buffer = UserOrKernelBuffer::for_kernel_buffer(page_buffer); |