summaryrefslogtreecommitdiff
path: root/Kernel/Coredump.cpp
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2022-08-23 20:30:12 +0200
committerAndreas Kling <kling@serenityos.org>2022-08-24 14:57:51 +0200
commitda24a937f5a0d647777a5161880625c34fe37234 (patch)
tree939d709f81ca58b452dcf6b9391b44b08006da72 /Kernel/Coredump.cpp
parentd3e8eb591877f7402a9deac627d57dd92b109e5e (diff)
downloadserenity-da24a937f5a0d647777a5161880625c34fe37234.zip
Kernel: Don't wrap AddressSpace's RegionTree in SpinlockProtected
Now that AddressSpace itself is always SpinlockProtected, we don't need to also wrap the RegionTree. Whoever has the AddressSpace locked is free to poke around its tree.
Diffstat (limited to 'Kernel/Coredump.cpp')
-rw-r--r--Kernel/Coredump.cpp156
1 files changed, 74 insertions, 82 deletions
diff --git a/Kernel/Coredump.cpp b/Kernel/Coredump.cpp
index a57ac038c0..cbcb57b43b 100644
--- a/Kernel/Coredump.cpp
+++ b/Kernel/Coredump.cpp
@@ -47,18 +47,16 @@ Coredump::Coredump(NonnullLockRefPtr<Process> process, NonnullLockRefPtr<OpenFil
{
m_num_program_headers = 0;
m_process->address_space().with([&](auto& space) {
- space->region_tree().with([&](auto& region_tree) {
- for (auto& region : region_tree.regions()) {
+ for (auto& region : space->region_tree().regions()) {
#if !INCLUDE_USERSPACE_HEAP_MEMORY_IN_COREDUMPS
- if (looks_like_userspace_heap_region(region))
- continue;
+ if (looks_like_userspace_heap_region(region))
+ continue;
#endif
- if (region.access() == Memory::Region::Access::None)
- continue;
- ++m_num_program_headers;
- }
- });
+ if (region.access() == Memory::Region::Access::None)
+ continue;
+ ++m_num_program_headers;
+ }
});
++m_num_program_headers; // +1 for NOTE segment
}
@@ -138,39 +136,37 @@ ErrorOr<void> Coredump::write_program_headers(size_t notes_size)
{
size_t offset = sizeof(ElfW(Ehdr)) + m_num_program_headers * sizeof(ElfW(Phdr));
m_process->address_space().with([&](auto& space) {
- space->region_tree().with([&](auto& region_tree) {
- for (auto& region : region_tree.regions()) {
+ for (auto& region : space->region_tree().regions()) {
#if !INCLUDE_USERSPACE_HEAP_MEMORY_IN_COREDUMPS
- if (looks_like_userspace_heap_region(region))
- continue;
+ if (looks_like_userspace_heap_region(region))
+ continue;
#endif
- if (region.access() == Memory::Region::Access::None)
- continue;
+ if (region.access() == Memory::Region::Access::None)
+ continue;
- ElfW(Phdr) phdr {};
+ ElfW(Phdr) phdr {};
- phdr.p_type = PT_LOAD;
- phdr.p_offset = offset;
- phdr.p_vaddr = region.vaddr().get();
- phdr.p_paddr = 0;
+ phdr.p_type = PT_LOAD;
+ phdr.p_offset = offset;
+ phdr.p_vaddr = region.vaddr().get();
+ phdr.p_paddr = 0;
- phdr.p_filesz = region.page_count() * PAGE_SIZE;
- phdr.p_memsz = region.page_count() * PAGE_SIZE;
- phdr.p_align = 0;
+ phdr.p_filesz = region.page_count() * PAGE_SIZE;
+ phdr.p_memsz = region.page_count() * PAGE_SIZE;
+ phdr.p_align = 0;
- phdr.p_flags = region.is_readable() ? PF_R : 0;
- if (region.is_writable())
- phdr.p_flags |= PF_W;
- if (region.is_executable())
- phdr.p_flags |= PF_X;
+ phdr.p_flags = region.is_readable() ? PF_R : 0;
+ if (region.is_writable())
+ phdr.p_flags |= PF_W;
+ if (region.is_executable())
+ phdr.p_flags |= PF_X;
- offset += phdr.p_filesz;
+ offset += phdr.p_filesz;
- [[maybe_unused]] auto rc = m_description->write(UserOrKernelBuffer::for_kernel_buffer(reinterpret_cast<uint8_t*>(&phdr)), sizeof(ElfW(Phdr)));
- }
- });
+ [[maybe_unused]] auto rc = m_description->write(UserOrKernelBuffer::for_kernel_buffer(reinterpret_cast<uint8_t*>(&phdr)), sizeof(ElfW(Phdr)));
+ }
});
ElfW(Phdr) notes_pheader {};
@@ -192,39 +188,37 @@ ErrorOr<void> Coredump::write_regions()
{
u8 zero_buffer[PAGE_SIZE] = {};
- return m_process->address_space().with([&](auto& space) {
- return space->region_tree().with([&](auto& region_tree) -> ErrorOr<void> {
- for (auto& region : region_tree.regions()) {
- VERIFY(!region.is_kernel());
+ return m_process->address_space().with([&](auto& space) -> ErrorOr<void> {
+ for (auto& region : space->region_tree().regions()) {
+ VERIFY(!region.is_kernel());
#if !INCLUDE_USERSPACE_HEAP_MEMORY_IN_COREDUMPS
- if (looks_like_userspace_heap_region(region))
- continue;
+ if (looks_like_userspace_heap_region(region))
+ continue;
#endif
- if (region.access() == Memory::Region::Access::None)
- continue;
-
- // If we crashed in the middle of mapping in Regions, they do not have a page directory yet, and will crash on a remap() call
- if (!region.is_mapped())
- continue;
-
- region.set_readable(true);
- region.remap();
-
- for (size_t i = 0; i < region.page_count(); i++) {
- auto page = region.physical_page(i);
- auto src_buffer = [&]() -> ErrorOr<UserOrKernelBuffer> {
- if (page)
- return UserOrKernelBuffer::for_user_buffer(reinterpret_cast<uint8_t*>((region.vaddr().as_ptr() + (i * PAGE_SIZE))), PAGE_SIZE);
- // If the current page is not backed by a physical page, we zero it in the coredump file.
- return UserOrKernelBuffer::for_kernel_buffer(zero_buffer);
- }();
- TRY(m_description->write(src_buffer.value(), PAGE_SIZE));
- }
+ if (region.access() == Memory::Region::Access::None)
+ continue;
+
+ // If we crashed in the middle of mapping in Regions, they do not have a page directory yet, and will crash on a remap() call
+ if (!region.is_mapped())
+ continue;
+
+ region.set_readable(true);
+ region.remap();
+
+ for (size_t i = 0; i < region.page_count(); i++) {
+ auto page = region.physical_page(i);
+ auto src_buffer = [&]() -> ErrorOr<UserOrKernelBuffer> {
+ if (page)
+ return UserOrKernelBuffer::for_user_buffer(reinterpret_cast<uint8_t*>((region.vaddr().as_ptr() + (i * PAGE_SIZE))), PAGE_SIZE);
+ // If the current page is not backed by a physical page, we zero it in the coredump file.
+ return UserOrKernelBuffer::for_kernel_buffer(zero_buffer);
+ }();
+ TRY(m_description->write(src_buffer.value(), PAGE_SIZE));
}
- return {};
- });
+ }
+ return {};
});
}
@@ -285,36 +279,34 @@ ErrorOr<void> Coredump::create_notes_threads_data(auto& builder) const
ErrorOr<void> Coredump::create_notes_regions_data(auto& builder) const
{
size_t region_index = 0;
- return m_process->address_space().with([&](auto& space) {
- return space->region_tree().with([&](auto& region_tree) -> ErrorOr<void> {
- for (auto const& region : region_tree.regions()) {
+ return m_process->address_space().with([&](auto& space) -> ErrorOr<void> {
+ for (auto const& region : space->region_tree().regions()) {
#if !INCLUDE_USERSPACE_HEAP_MEMORY_IN_COREDUMPS
- if (looks_like_userspace_heap_region(region))
- continue;
+ if (looks_like_userspace_heap_region(region))
+ continue;
#endif
- if (region.access() == Memory::Region::Access::None)
- continue;
+ if (region.access() == Memory::Region::Access::None)
+ continue;
- ELF::Core::MemoryRegionInfo info {};
- info.header.type = ELF::Core::NotesEntryHeader::Type::MemoryRegionInfo;
+ ELF::Core::MemoryRegionInfo info {};
+ info.header.type = ELF::Core::NotesEntryHeader::Type::MemoryRegionInfo;
- info.region_start = region.vaddr().get();
- info.region_end = region.vaddr().offset(region.size()).get();
- info.program_header_index = region_index++;
+ info.region_start = region.vaddr().get();
+ info.region_end = region.vaddr().offset(region.size()).get();
+ info.program_header_index = region_index++;
- TRY(builder.append_bytes(ReadonlyBytes { (void*)&info, sizeof(info) }));
+ TRY(builder.append_bytes(ReadonlyBytes { (void*)&info, sizeof(info) }));
- // NOTE: The region name *is* null-terminated, so the following is ok:
- auto name = region.name();
- if (name.is_empty())
- TRY(builder.append('\0'));
- else
- TRY(builder.append(name.characters_without_null_termination(), name.length() + 1));
- }
- return {};
- });
+ // NOTE: The region name *is* null-terminated, so the following is ok:
+ auto name = region.name();
+ if (name.is_empty())
+ TRY(builder.append('\0'));
+ else
+ TRY(builder.append(name.characters_without_null_termination(), name.length() + 1));
+ }
+ return {};
});
}