summaryrefslogtreecommitdiff
path: root/AK/BumpAllocator.h
blob: 27fbff7a2c4e4b1fdcba9fedb80d23c343e15b35 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
/*
 * Copyright (c) 2021, Ali Mohammad Pur <mpfard@serenityos.org>
 *
 * SPDX-License-Identifier: BSD-2-Clause
 */

#pragma once

#include <AK/Atomic.h>
#include <AK/StdLibExtras.h>
#include <AK/Types.h>
#include <AK/kmalloc.h>
#include <sys/mman.h>

namespace AK {

template<bool use_mmap = false, size_t chunk_size = use_mmap ? 4 * MiB : 4 * KiB>
class BumpAllocator {
public:
    BumpAllocator()
    {
        if constexpr (use_mmap)
            m_chunk_size = chunk_size;
        else
            m_chunk_size = kmalloc_good_size(chunk_size);
    }

    ~BumpAllocator()
    {
        deallocate_all();
    }

    void* allocate(size_t size, size_t align)
    {
        VERIFY(size < m_chunk_size - sizeof(ChunkHeader));
        if (!m_current_chunk) {
            if (!allocate_a_chunk())
                return nullptr;
        }

    allocate_again:;
        VERIFY(m_current_chunk != 0);

        auto aligned_ptr = align_up_to(m_byte_offset_into_current_chunk + m_current_chunk, align);
        auto next_offset = aligned_ptr + size - m_current_chunk;
        if (next_offset > m_chunk_size) {
            if (!allocate_a_chunk())
                return nullptr;
            goto allocate_again;
        }
        m_byte_offset_into_current_chunk = next_offset;
        return (void*)aligned_ptr;
    }

    void deallocate_all()
    {
        if (!m_head_chunk)
            return;
        // Note that 'cache_filled' is just an educated guess, and we don't rely on it.
        // If we determine 'cache_filled=true' and the cache becomes empty in the meantime,
        // then we haven't lost much; it was a close call anyway.
        // If we determine 'cache_filled=false' and the cache becomes full in the meantime,
        // then we'll end up with a different chunk to munmap(), no big difference.
        bool cache_filled = s_unused_allocation_cache.load(MemoryOrder::memory_order_relaxed);
        for_each_chunk([&](auto chunk) {
            if (!cache_filled) {
                cache_filled = true;
                ((ChunkHeader*)chunk)->next_chunk = 0;
                chunk = s_unused_allocation_cache.exchange(chunk);
                if (!chunk)
                    return;
                // The cache got filled in the meantime. Oh well, we have to call munmap() anyway.
            }

            if constexpr (use_mmap) {
                munmap((void*)chunk, m_chunk_size);
            } else {
                kfree_sized((void*)chunk, m_chunk_size);
            }
        });
    }

protected:
    template<typename TFn>
    void for_each_chunk(TFn&& fn)
    {
        auto head_chunk = m_head_chunk;
        while (head_chunk) {
            auto& chunk_header = *(ChunkHeader const*)head_chunk;
            VERIFY(chunk_header.magic == chunk_magic);
            if (head_chunk == m_current_chunk)
                VERIFY(chunk_header.next_chunk == 0);
            auto next_chunk = chunk_header.next_chunk;
            fn(head_chunk);
            head_chunk = next_chunk;
        }
    }

    bool allocate_a_chunk()
    {
        // dbgln("Allocated {} entries in previous chunk and have {} unusable bytes", m_allocations_in_previous_chunk, m_chunk_size - m_byte_offset_into_current_chunk);
        // m_allocations_in_previous_chunk = 0;
        void* new_chunk = (void*)s_unused_allocation_cache.exchange(0);
        if (!new_chunk) {
            if constexpr (use_mmap) {
#ifdef __serenity__
                new_chunk = serenity_mmap(nullptr, m_chunk_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_RANDOMIZED | MAP_PRIVATE, 0, 0, m_chunk_size, "BumpAllocator Chunk");
#else
                new_chunk = mmap(nullptr, m_chunk_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
#endif
                if (new_chunk == MAP_FAILED)
                    return false;
            } else {
                new_chunk = kmalloc(m_chunk_size);
                if (!new_chunk)
                    return false;
            }
        }

        auto& new_header = *(ChunkHeader*)new_chunk;
        new_header.magic = chunk_magic;
        new_header.next_chunk = 0;
        m_byte_offset_into_current_chunk = sizeof(ChunkHeader);

        if (!m_head_chunk) {
            VERIFY(!m_current_chunk);
            m_head_chunk = (FlatPtr)new_chunk;
            m_current_chunk = (FlatPtr)new_chunk;
            return true;
        }

        VERIFY(m_current_chunk);
        auto& old_header = *(ChunkHeader*)m_current_chunk;
        VERIFY(old_header.magic == chunk_magic);
        VERIFY(old_header.next_chunk == 0);
        old_header.next_chunk = (FlatPtr)new_chunk;
        m_current_chunk = (FlatPtr)new_chunk;
        return true;
    }

    constexpr static FlatPtr chunk_magic = explode_byte(0xdf);
    struct ChunkHeader {
        FlatPtr magic;
        FlatPtr next_chunk;
    };
    FlatPtr m_head_chunk { 0 };
    FlatPtr m_current_chunk { 0 };
    size_t m_byte_offset_into_current_chunk { 0 };
    size_t m_chunk_size { 0 };
    static Atomic<FlatPtr> s_unused_allocation_cache;
};

template<typename T, bool use_mmap = false, size_t chunk_size = use_mmap ? 4 * MiB : 4 * KiB>
class UniformBumpAllocator : protected BumpAllocator<use_mmap, chunk_size> {
    using Allocator = BumpAllocator<use_mmap, chunk_size>;

public:
    UniformBumpAllocator() = default;
    ~UniformBumpAllocator()
    {
        destroy_all();
    }

    template<typename... Args>
    T* allocate(Args&&... args)
    {
        auto ptr = (T*)Allocator::allocate(sizeof(T), alignof(T));
        if (!ptr)
            return nullptr;
        return new (ptr) T { forward<Args>(args)... };
    }

    void deallocate_all()
    {
        destroy_all();
        Allocator::deallocate_all();
    }

    void destroy_all()
    {
        this->for_each_chunk([&](auto chunk) {
            auto base_ptr = align_up_to(chunk + sizeof(typename Allocator::ChunkHeader), alignof(T));
            // Compute the offset of the first byte *after* this chunk:
            FlatPtr end_offset = base_ptr + this->m_chunk_size - chunk;
            // Compute the offset of the first byte *after* the last valid object, in case the end of the chunk does not align with the end of an object:
            end_offset = (end_offset / sizeof(T)) * sizeof(T);
            if (chunk == this->m_current_chunk)
                end_offset = this->m_byte_offset_into_current_chunk;
            for (; base_ptr - chunk < end_offset; base_ptr += sizeof(T))
                reinterpret_cast<T*>(base_ptr)->~T();
        });
    }
};

template<bool use_mmap, size_t size>
inline Atomic<FlatPtr> BumpAllocator<use_mmap, size>::s_unused_allocation_cache { 0 };

}

using AK::BumpAllocator;
using AK::UniformBumpAllocator;