summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIdan Horowitz <idan.horowitz@gmail.com>2022-02-10 19:55:10 +0200
committerIdan Horowitz <idan.horowitz@gmail.com>2022-02-11 17:49:46 +0200
commit57bce8ab97f52c01698fe6ce9e2289be704f3cc5 (patch)
tree5606b1f76744a07c5386e069699df735dc4d9668
parentd9d33627223f4b713b0ca17ccd99b6a15c89745e (diff)
downloadserenity-57bce8ab97f52c01698fe6ce9e2289be704f3cc5.zip
Kernel: Set up Regions before adding them to a Process's AddressSpace
This reduces the amount of time in which not fully-initialized Regions are present inside an AddressSpace's region tree.
-rw-r--r--Kernel/Memory/AddressSpace.cpp20
-rw-r--r--Kernel/Syscalls/fork.cpp2
2 files changed, 10 insertions, 12 deletions
diff --git a/Kernel/Memory/AddressSpace.cpp b/Kernel/Memory/AddressSpace.cpp
index abd33bfa4d..f976bb6a8b 100644
--- a/Kernel/Memory/AddressSpace.cpp
+++ b/Kernel/Memory/AddressSpace.cpp
@@ -146,16 +146,15 @@ ErrorOr<Region*> AddressSpace::try_allocate_split_region(Region const& source_re
auto new_region = TRY(Region::try_create_user_accessible(
range, source_region.vmobject(), offset_in_vmobject, move(region_name), source_region.access(), source_region.is_cacheable() ? Region::Cacheable::Yes : Region::Cacheable::No, source_region.is_shared()));
- auto* region = TRY(add_region(move(new_region)));
- region->set_syscall_region(source_region.is_syscall_region());
- region->set_mmap(source_region.is_mmap());
- region->set_stack(source_region.is_stack());
+ new_region->set_syscall_region(source_region.is_syscall_region());
+ new_region->set_mmap(source_region.is_mmap());
+ new_region->set_stack(source_region.is_stack());
size_t page_offset_in_source_region = (offset_in_vmobject - source_region.offset_in_vmobject()) / PAGE_SIZE;
- for (size_t i = 0; i < region->page_count(); ++i) {
+ for (size_t i = 0; i < new_region->page_count(); ++i) {
if (source_region.should_cow(page_offset_in_source_region + i))
- TRY(region->set_should_cow(i, true));
+ TRY(new_region->set_should_cow(i, true));
}
- return region;
+ return add_region(move(new_region));
}
ErrorOr<Region*> AddressSpace::allocate_region(VirtualRange const& range, StringView name, int prot, AllocationStrategy strategy)
@@ -191,16 +190,15 @@ ErrorOr<Region*> AddressSpace::allocate_region_with_vmobject(VirtualRange const&
if (!name.is_null())
region_name = TRY(KString::try_create(name));
auto region = TRY(Region::try_create_user_accessible(range, move(vmobject), offset_in_vmobject, move(region_name), prot_to_region_access_flags(prot), Region::Cacheable::Yes, shared));
- auto* added_region = TRY(add_region(move(region)));
if (prot == PROT_NONE) {
// For PROT_NONE mappings, we don't have to set up any page table mappings.
// We do still need to attach the region to the page_directory though.
SpinlockLocker mm_locker(s_mm_lock);
- added_region->set_page_directory(page_directory());
+ region->set_page_directory(page_directory());
} else {
- TRY(added_region->map(page_directory(), ShouldFlushTLB::No));
+ TRY(region->map(page_directory(), ShouldFlushTLB::No));
}
- return added_region;
+ return add_region(move(region));
}
void AddressSpace::deallocate_region(Region& region)
diff --git a/Kernel/Syscalls/fork.cpp b/Kernel/Syscalls/fork.cpp
index 7b18227f36..678e792f08 100644
--- a/Kernel/Syscalls/fork.cpp
+++ b/Kernel/Syscalls/fork.cpp
@@ -106,8 +106,8 @@ ErrorOr<FlatPtr> Process::sys$fork(RegisterState& regs)
for (auto& region : address_space().regions()) {
dbgln_if(FORK_DEBUG, "fork: cloning Region({}) '{}' @ {}", region, region->name(), region->vaddr());
auto region_clone = TRY(region->try_clone());
+ TRY(region_clone->map(child->address_space().page_directory(), Memory::ShouldFlushTLB::No));
auto* child_region = TRY(child->address_space().add_region(move(region_clone)));
- TRY(child_region->map(child->address_space().page_directory(), Memory::ShouldFlushTLB::No));
if (region == m_master_tls_region.unsafe_ptr())
child->m_master_tls_region = child_region;