summaryrefslogtreecommitdiff
path: root/Userland/Libraries/LibELF/DynamicLoader.cpp
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2021-02-03 23:42:18 +0100
committerAndreas Kling <kling@serenityos.org>2021-02-03 23:42:18 +0100
commit3a3270eb682185abcda46128a862a8145fcb7073 (patch)
treeb44d8ebe99c66f80375fde812a463914aa7e4405 /Userland/Libraries/LibELF/DynamicLoader.cpp
parentc9cd5ff6bb8e4a8c8492435191c9379dc94a6f4c (diff)
downloadserenity-3a3270eb682185abcda46128a862a8145fcb7073.zip
LibELF: Make a dummy VM reservation before mapping dynamic objects
Using the text segment for the VM reservation ran into trouble when there was a discrepancy between the p_filesz and p_memsz. Simplify this mechanism and avoid trouble by making the reservation as a MAP_PRIVATE | MAP_NORESERVE throwaway mapping instead. Fixes #5225.
Diffstat (limited to 'Userland/Libraries/LibELF/DynamicLoader.cpp')
-rw-r--r--Userland/Libraries/LibELF/DynamicLoader.cpp37
1 files changed, 22 insertions, 15 deletions
diff --git a/Userland/Libraries/LibELF/DynamicLoader.cpp b/Userland/Libraries/LibELF/DynamicLoader.cpp
index 9aa46600b1..18d6b31780 100644
--- a/Userland/Libraries/LibELF/DynamicLoader.cpp
+++ b/Userland/Libraries/LibELF/DynamicLoader.cpp
@@ -286,25 +286,38 @@ void DynamicLoader::load_program_headers()
ASSERT(!text_region.value().is_writable());
- // First, we map the text *and* data segments, in order to allocate enough VM
- // to hold both contiguously in the address space.
+ // First, we make a dummy reservation mapping, in order to allocate enough VM
+ // to hold both text+data contiguously in the address space.
Checked<size_t> total_mapping_size;
total_mapping_size = text_region.value().required_load_size();
total_mapping_size += data_region.value().required_load_size();
ASSERT(!total_mapping_size.has_overflow());
+ auto* reservation = mmap(requested_load_address, total_mapping_size.value(), PROT_NONE, MAP_ANON | MAP_PRIVATE | MAP_NORESERVE, 0, 0);
+ if (reservation == MAP_FAILED) {
+ perror("mmap reservation");
+ ASSERT_NOT_REACHED();
+ }
+
+ // Then we unmap the reservation.
+ if (munmap(reservation, total_mapping_size.value()) < 0) {
+ perror("munmap reservation");
+ ASSERT_NOT_REACHED();
+ }
+
+ // Now we can map the text segment at the reserved address.
auto* text_segment_begin = (u8*)mmap_with_name(
- requested_load_address,
- total_mapping_size.value(),
+ reservation,
+ text_region.value().required_load_size(),
PROT_READ,
text_mmap_flags,
m_image_fd,
text_region.value().offset(),
String::formatted("{}: .text", m_filename).characters());
- if (MAP_FAILED == text_segment_begin) {
- perror("mmap text / initial segment");
+ if (text_segment_begin == MAP_FAILED) {
+ perror("mmap text");
ASSERT_NOT_REACHED();
}
@@ -312,19 +325,13 @@ void DynamicLoader::load_program_headers()
m_text_segment_size = text_region.value().required_load_size();
m_text_segment_load_address = VirtualAddress { (FlatPtr)text_segment_begin };
- // Then, we unmap the data segment part of the above combined VM allocation.
- auto* data_segment_address = (u8*)text_segment_begin + text_region.value().required_load_size();
- if (munmap(data_segment_address, data_region.value().required_load_size()) < 0) {
- perror("munmap");
- ASSERT_NOT_REACHED();
- }
-
if (m_elf_image.is_dynamic())
m_dynamic_section_address = dynamic_region_desired_vaddr.offset(m_text_segment_load_address.get());
else
m_dynamic_section_address = dynamic_region_desired_vaddr;
- // Finally, we remap the data segment, this time privately.
+ // Finally, we make an anonymous mapping for the data segment. Contents are then copied from the file.
+ auto* data_segment_address = (u8*)text_segment_begin + text_region.value().required_load_size();
auto* data_segment = (u8*)mmap_with_name(
data_segment_address,
data_region.value().required_load_size(),
@@ -335,7 +342,7 @@ void DynamicLoader::load_program_headers()
String::formatted("{}: .data", m_filename).characters());
if (MAP_FAILED == data_segment) {
- perror("mmap data segment");
+ perror("mmap data");
ASSERT_NOT_REACHED();
}