summaryrefslogtreecommitdiff
path: root/Kernel/VM/RangeAllocator.h
diff options
context:
space:
mode:
authorAndreas Kling <awesomekling@gmail.com>2019-05-17 03:40:15 +0200
committerAndreas Kling <awesomekling@gmail.com>2019-05-17 03:40:15 +0200
commitc414e65498770f148a9447f7efd728d72c35ab9a (patch)
treeaab8599a7983121d9cb0a07e225f08f6435e8a70 /Kernel/VM/RangeAllocator.h
parentc56e3ebee1fdbd242a974f7d95cbcb98272ac3b7 (diff)
downloadserenity-c414e65498770f148a9447f7efd728d72c35ab9a.zip
Kernel: Implement a simple virtual address range allocator.
This replaces the previous virtual address allocator which was basically just "m_next_address += size;" With this in place, virtual addresses can get reused, which cuts down on the number of page tables created. When we implement ASLR some day, we'll probably have to do page table deallocation, but for now page tables are only deallocated once the process dies.
Diffstat (limited to 'Kernel/VM/RangeAllocator.h')
-rw-r--r--Kernel/VM/RangeAllocator.h59
1 files changed, 59 insertions, 0 deletions
diff --git a/Kernel/VM/RangeAllocator.h b/Kernel/VM/RangeAllocator.h
new file mode 100644
index 0000000000..0c2a7759cb
--- /dev/null
+++ b/Kernel/VM/RangeAllocator.h
@@ -0,0 +1,59 @@
+#pragma once
+
+#include <Kernel/LinearAddress.h>
+#include <AK/Vector.h>
+
+class Range {
+ friend class RangeAllocator;
+public:
+ Range() { }
+ Range(LinearAddress base, size_t size)
+ : m_base(base)
+ , m_size(size)
+ {
+ }
+
+ LinearAddress base() const { return m_base; }
+ size_t size() const { return m_size; }
+ bool is_valid() const { return m_base.is_null(); }
+
+ LinearAddress end() const { return m_base.offset(m_size); }
+
+ bool operator==(const Range& other) const
+ {
+ return m_base == other.m_base && m_size == other.m_size;
+ }
+
+ bool contains(LinearAddress base, size_t size) const
+ {
+ return base >= m_base && base.offset(size) <= end();
+ }
+
+ bool contains(const Range& other) const
+ {
+ return contains(other.base(), other.size());
+ }
+
+ Vector<Range, 2> carve(const Range&);
+
+private:
+ LinearAddress m_base;
+ size_t m_size { 0 };
+};
+
+class RangeAllocator {
+public:
+ RangeAllocator(LinearAddress, size_t);
+ ~RangeAllocator();
+
+ Range allocate_anywhere(size_t);
+ Range allocate_specific(LinearAddress, size_t);
+ void deallocate(Range);
+
+ void dump() const;
+
+private:
+ void carve_at_index(int, const Range&);
+
+ Vector<Range> m_available_ranges;
+};