summaryrefslogtreecommitdiff
path: root/Kernel/Coredump.cpp
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2022-08-23 12:28:04 +0200
committerAndreas Kling <kling@serenityos.org>2022-08-24 14:57:51 +0200
commitdc9d2c1b10999a177f27b0f0f8c3ee6df3a61dad (patch)
tree6b7d5a02a206c1f9e4d40d09c641110fa20b5db3 /Kernel/Coredump.cpp
parent352d6545a99955771c14736fda99d0ba6b124b60 (diff)
downloadserenity-dc9d2c1b10999a177f27b0f0f8c3ee6df3a61dad.zip
Kernel: Wrap RegionTree objects in SpinlockProtected
This makes locking them much more straightforward, and we can remove a bunch of confusing use of AddressSpace::m_lock. That lock will also be converted to use of SpinlockProtected in a subsequent patch.
Diffstat (limited to 'Kernel/Coredump.cpp')
-rw-r--r--Kernel/Coredump.cpp152
1 files changed, 80 insertions, 72 deletions
diff --git a/Kernel/Coredump.cpp b/Kernel/Coredump.cpp
index 1348f232f8..1c7f3add12 100644
--- a/Kernel/Coredump.cpp
+++ b/Kernel/Coredump.cpp
@@ -46,16 +46,18 @@ Coredump::Coredump(NonnullLockRefPtr<Process> process, NonnullLockRefPtr<OpenFil
, m_description(move(description))
{
m_num_program_headers = 0;
- for ([[maybe_unused]] auto& region : m_process->address_space().regions()) {
+ m_process->address_space().region_tree().with([&](auto& region_tree) {
+ for (auto& region : region_tree.regions()) {
#if !INCLUDE_USERSPACE_HEAP_MEMORY_IN_COREDUMPS
- if (looks_like_userspace_heap_region(region))
- continue;
+ if (looks_like_userspace_heap_region(region))
+ continue;
#endif
- if (region.access() == Memory::Region::Access::None)
- continue;
- ++m_num_program_headers;
- }
+ if (region.access() == Memory::Region::Access::None)
+ continue;
+ ++m_num_program_headers;
+ }
+ });
++m_num_program_headers; // +1 for NOTE segment
}
@@ -133,37 +135,39 @@ ErrorOr<void> Coredump::write_elf_header()
ErrorOr<void> Coredump::write_program_headers(size_t notes_size)
{
size_t offset = sizeof(ElfW(Ehdr)) + m_num_program_headers * sizeof(ElfW(Phdr));
- for (auto& region : m_process->address_space().regions()) {
+ m_process->address_space().region_tree().with([&](auto& region_tree) {
+ for (auto& region : region_tree.regions()) {
#if !INCLUDE_USERSPACE_HEAP_MEMORY_IN_COREDUMPS
- if (looks_like_userspace_heap_region(region))
- continue;
+ if (looks_like_userspace_heap_region(region))
+ continue;
#endif
- if (region.access() == Memory::Region::Access::None)
- continue;
+ if (region.access() == Memory::Region::Access::None)
+ continue;
- ElfW(Phdr) phdr {};
+ ElfW(Phdr) phdr {};
- phdr.p_type = PT_LOAD;
- phdr.p_offset = offset;
- phdr.p_vaddr = region.vaddr().get();
- phdr.p_paddr = 0;
+ phdr.p_type = PT_LOAD;
+ phdr.p_offset = offset;
+ phdr.p_vaddr = region.vaddr().get();
+ phdr.p_paddr = 0;
- phdr.p_filesz = region.page_count() * PAGE_SIZE;
- phdr.p_memsz = region.page_count() * PAGE_SIZE;
- phdr.p_align = 0;
+ phdr.p_filesz = region.page_count() * PAGE_SIZE;
+ phdr.p_memsz = region.page_count() * PAGE_SIZE;
+ phdr.p_align = 0;
- phdr.p_flags = region.is_readable() ? PF_R : 0;
- if (region.is_writable())
- phdr.p_flags |= PF_W;
- if (region.is_executable())
- phdr.p_flags |= PF_X;
+ phdr.p_flags = region.is_readable() ? PF_R : 0;
+ if (region.is_writable())
+ phdr.p_flags |= PF_W;
+ if (region.is_executable())
+ phdr.p_flags |= PF_X;
- offset += phdr.p_filesz;
+ offset += phdr.p_filesz;
- [[maybe_unused]] auto rc = m_description->write(UserOrKernelBuffer::for_kernel_buffer(reinterpret_cast<uint8_t*>(&phdr)), sizeof(ElfW(Phdr)));
- }
+ [[maybe_unused]] auto rc = m_description->write(UserOrKernelBuffer::for_kernel_buffer(reinterpret_cast<uint8_t*>(&phdr)), sizeof(ElfW(Phdr)));
+ }
+ });
ElfW(Phdr) notes_pheader {};
notes_pheader.p_type = PT_NOTE;
@@ -184,36 +188,38 @@ ErrorOr<void> Coredump::write_regions()
{
u8 zero_buffer[PAGE_SIZE] = {};
- for (auto& region : m_process->address_space().regions()) {
- VERIFY(!region.is_kernel());
+ return m_process->address_space().region_tree().with([&](auto& region_tree) -> ErrorOr<void> {
+ for (auto& region : region_tree.regions()) {
+ VERIFY(!region.is_kernel());
#if !INCLUDE_USERSPACE_HEAP_MEMORY_IN_COREDUMPS
- if (looks_like_userspace_heap_region(region))
- continue;
+ if (looks_like_userspace_heap_region(region))
+ continue;
#endif
- if (region.access() == Memory::Region::Access::None)
- continue;
-
- // If we crashed in the middle of mapping in Regions, they do not have a page directory yet, and will crash on a remap() call
- if (!region.is_mapped())
- continue;
-
- region.set_readable(true);
- region.remap();
-
- for (size_t i = 0; i < region.page_count(); i++) {
- auto page = region.physical_page(i);
- auto src_buffer = [&]() -> ErrorOr<UserOrKernelBuffer> {
- if (page)
- return UserOrKernelBuffer::for_user_buffer(reinterpret_cast<uint8_t*>((region.vaddr().as_ptr() + (i * PAGE_SIZE))), PAGE_SIZE);
- // If the current page is not backed by a physical page, we zero it in the coredump file.
- return UserOrKernelBuffer::for_kernel_buffer(zero_buffer);
- }();
- TRY(m_description->write(src_buffer.value(), PAGE_SIZE));
+ if (region.access() == Memory::Region::Access::None)
+ continue;
+
+ // If we crashed in the middle of mapping in Regions, they do not have a page directory yet, and will crash on a remap() call
+ if (!region.is_mapped())
+ continue;
+
+ region.set_readable(true);
+ region.remap();
+
+ for (size_t i = 0; i < region.page_count(); i++) {
+ auto page = region.physical_page(i);
+ auto src_buffer = [&]() -> ErrorOr<UserOrKernelBuffer> {
+ if (page)
+ return UserOrKernelBuffer::for_user_buffer(reinterpret_cast<uint8_t*>((region.vaddr().as_ptr() + (i * PAGE_SIZE))), PAGE_SIZE);
+ // If the current page is not backed by a physical page, we zero it in the coredump file.
+ return UserOrKernelBuffer::for_kernel_buffer(zero_buffer);
+ }();
+ TRY(m_description->write(src_buffer.value(), PAGE_SIZE));
+ }
}
- }
- return {};
+ return {};
+ });
}
ErrorOr<void> Coredump::write_notes_segment(ReadonlyBytes notes_segment)
@@ -273,33 +279,35 @@ ErrorOr<void> Coredump::create_notes_threads_data(auto& builder) const
ErrorOr<void> Coredump::create_notes_regions_data(auto& builder) const
{
size_t region_index = 0;
- for (auto const& region : m_process->address_space().regions()) {
+ return m_process->address_space().region_tree().with([&](auto& region_tree) -> ErrorOr<void> {
+ for (auto const& region : region_tree.regions()) {
#if !INCLUDE_USERSPACE_HEAP_MEMORY_IN_COREDUMPS
- if (looks_like_userspace_heap_region(region))
- continue;
+ if (looks_like_userspace_heap_region(region))
+ continue;
#endif
- if (region.access() == Memory::Region::Access::None)
- continue;
+ if (region.access() == Memory::Region::Access::None)
+ continue;
- ELF::Core::MemoryRegionInfo info {};
- info.header.type = ELF::Core::NotesEntryHeader::Type::MemoryRegionInfo;
+ ELF::Core::MemoryRegionInfo info {};
+ info.header.type = ELF::Core::NotesEntryHeader::Type::MemoryRegionInfo;
- info.region_start = region.vaddr().get();
- info.region_end = region.vaddr().offset(region.size()).get();
- info.program_header_index = region_index++;
+ info.region_start = region.vaddr().get();
+ info.region_end = region.vaddr().offset(region.size()).get();
+ info.program_header_index = region_index++;
- TRY(builder.append_bytes(ReadonlyBytes { (void*)&info, sizeof(info) }));
+ TRY(builder.append_bytes(ReadonlyBytes { (void*)&info, sizeof(info) }));
- // NOTE: The region name *is* null-terminated, so the following is ok:
- auto name = region.name();
- if (name.is_empty())
- TRY(builder.append('\0'));
- else
- TRY(builder.append(name.characters_without_null_termination(), name.length() + 1));
- }
- return {};
+ // NOTE: The region name *is* null-terminated, so the following is ok:
+ auto name = region.name();
+ if (name.is_empty())
+ TRY(builder.append('\0'));
+ else
+ TRY(builder.append(name.characters_without_null_termination(), name.length() + 1));
+ }
+ return {};
+ });
}
ErrorOr<void> Coredump::create_notes_metadata_data(auto& builder) const