summaryrefslogtreecommitdiff
path: root/Kernel/VM/RangeAllocator.cpp
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2020-01-19 13:18:27 +0100
committerAndreas Kling <kling@serenityos.org>2020-01-19 13:29:59 +0100
commitad3f9317072697895436af93de71eb13f08c176e (patch)
tree9009a1135fd838fbe001b08cd67262b47d0bb51e /Kernel/VM/RangeAllocator.cpp
parent502626eecb5bbd658f3dc516d2d46ad1aed55a90 (diff)
downloadserenity-ad3f9317072697895436af93de71eb13f08c176e.zip
Kernel: Optimize VM range deallocation a bit
Previously, when deallocating a range of VM, we would sort and merge the range list. This was quite slow for large processes. This patch optimizes VM deallocation in the following ways: - Use binary search instead of linear scan to find the place to insert the deallocated range. - Insert at the right place immediately, removing the need to sort. - Merge the inserted range with any adjacent range(s) in-line instead of doing a separate merge pass into a list copy. - Add Traits<Range> to inform Vector that Range objects are trivial and can be moved using memmove(). I've also added an assertion that deallocated ranges are actually part of the RangeAllocator's initial address range. I've benchmarked this using g++ to compile Kernel/Process.cpp. With these changes, compilation goes from ~41 sec to ~35 sec.
Diffstat (limited to 'Kernel/VM/RangeAllocator.cpp')
-rw-r--r--Kernel/VM/RangeAllocator.cpp56
1 files changed, 29 insertions, 27 deletions
diff --git a/Kernel/VM/RangeAllocator.cpp b/Kernel/VM/RangeAllocator.cpp
index 8dd0d7d6d8..7adfe080c4 100644
--- a/Kernel/VM/RangeAllocator.cpp
+++ b/Kernel/VM/RangeAllocator.cpp
@@ -24,10 +24,12 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <AK/BinarySearch.h>
#include <AK/QuickSort.h>
#include <Kernel/Random.h>
#include <Kernel/VM/RangeAllocator.h>
#include <Kernel/kstdio.h>
+#include <Kernel/Thread.h>
//#define VRA_DEBUG
#define VM_GUARD_PAGES
@@ -38,6 +40,7 @@ RangeAllocator::RangeAllocator()
void RangeAllocator::initialize_with_range(VirtualAddress base, size_t size)
{
+ m_total_range = { base, size };
m_available_ranges.append({ base, size });
#ifdef VRA_DEBUG
dump();
@@ -46,6 +49,7 @@ void RangeAllocator::initialize_with_range(VirtualAddress base, size_t size)
void RangeAllocator::initialize_from_parent(const RangeAllocator& parent_allocator)
{
+ m_total_range = parent_allocator.m_total_range;
m_available_ranges = parent_allocator.m_available_ranges;
}
@@ -146,42 +150,40 @@ Range RangeAllocator::allocate_specific(VirtualAddress base, size_t size)
void RangeAllocator::deallocate(Range range)
{
+ ASSERT(m_total_range.contains(range));
+
#ifdef VRA_DEBUG
dbgprintf("VRA: Deallocate: %x(%u)\n", range.base().get(), range.size());
dump();
#endif
- for (auto& available_range : m_available_ranges) {
- if (available_range.end() == range.base()) {
- available_range.m_size += range.size();
- goto sort_and_merge;
- }
+ ASSERT(!m_available_ranges.is_empty());
+
+ int nearby_index = 0;
+ auto* existing_range = binary_search(m_available_ranges.data(), m_available_ranges.size(), range, [](auto& a, auto& b) {
+ return a.base().get() - b.end().get();
+ }, &nearby_index);
+
+ int inserted_index = 0;
+ if (existing_range) {
+ existing_range->m_size += range.size();
+ inserted_index = nearby_index;
+ } else {
+ m_available_ranges.insert_before_matching(Range(range), [&](auto& entry) {
+ return entry.base() < range.end();
+ }, nearby_index, &inserted_index);
}
- m_available_ranges.append(range);
-
-sort_and_merge:
- // FIXME: We don't have to sort if we insert at the right position immediately.
- quick_sort(m_available_ranges.begin(), m_available_ranges.end(), [](auto& a, auto& b) {
- return a.base() < b.base();
- });
- Vector<Range> merged_ranges;
- merged_ranges.ensure_capacity(m_available_ranges.size());
-
- for (auto& range : m_available_ranges) {
- if (merged_ranges.is_empty()) {
- merged_ranges.append(range);
- continue;
- }
- if (range.base() == merged_ranges.last().end()) {
- merged_ranges.last().m_size += range.size();
- continue;
+ if (inserted_index < (m_available_ranges.size() - 1)) {
+ // We already merged with previous. Try to merge with next.
+ auto& inserted_range = m_available_ranges[inserted_index];
+ auto& next_range = m_available_ranges[inserted_index + 1];
+ if (inserted_range.end() == next_range.base()) {
+ inserted_range.m_size += next_range.size();
+ m_available_ranges.remove(inserted_index + 1);
+ return;
}
- merged_ranges.append(range);
}
-
- m_available_ranges = move(merged_ranges);
-
#ifdef VRA_DEBUG
dbgprintf("VRA: After deallocate\n");
dump();