summaryrefslogtreecommitdiff
path: root/Kernel/VM/VMObject.h
blob: 45a5976af8bd91375ee886165317885a46a2c150 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
/*
 * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
 *
 * SPDX-License-Identifier: BSD-2-Clause
 */

#pragma once

#include <AK/FixedArray.h>
#include <AK/HashTable.h>
#include <AK/IntrusiveList.h>
#include <AK/RefCounted.h>
#include <AK/RefPtr.h>
#include <AK/Vector.h>
#include <AK/Weakable.h>
#include <Kernel/Forward.h>
#include <Kernel/Mutex.h>
#include <Kernel/VM/Region.h>

namespace Kernel {

class VMObjectDeletedHandler {
public:
    virtual ~VMObjectDeletedHandler() = default;
    virtual void vmobject_deleted(VMObject&) = 0;
};

class VMObject : public RefCounted<VMObject>
    , public Weakable<VMObject> {
    friend class MemoryManager;
    friend class Region;

public:
    virtual ~VMObject();

    virtual RefPtr<VMObject> try_clone() = 0;

    virtual bool is_anonymous() const { return false; }
    virtual bool is_inode() const { return false; }
    virtual bool is_shared_inode() const { return false; }
    virtual bool is_private_inode() const { return false; }
    virtual bool is_contiguous() const { return false; }

    size_t page_count() const { return m_physical_pages.size(); }
    Span<RefPtr<PhysicalPage> const> physical_pages() const { return m_physical_pages.span(); }
    Span<RefPtr<PhysicalPage>> physical_pages() { return m_physical_pages.span(); }

    size_t size() const { return m_physical_pages.size() * PAGE_SIZE; }

    virtual StringView class_name() const = 0;

    ALWAYS_INLINE void add_region(Region& region)
    {
        ScopedSpinLock locker(m_lock);
        m_regions.append(region);
    }

    ALWAYS_INLINE void remove_region(Region& region)
    {
        ScopedSpinLock locker(m_lock);
        m_regions.remove(region);
    }

    void register_on_deleted_handler(VMObjectDeletedHandler& handler)
    {
        ScopedSpinLock locker(m_on_deleted_lock);
        m_on_deleted.set(&handler);
    }
    void unregister_on_deleted_handler(VMObjectDeletedHandler& handler)
    {
        ScopedSpinLock locker(m_on_deleted_lock);
        m_on_deleted.remove(&handler);
    }

protected:
    explicit VMObject(size_t);
    explicit VMObject(VMObject const&);

    template<typename Callback>
    void for_each_region(Callback);

    IntrusiveListNode<VMObject> m_list_node;
    FixedArray<RefPtr<PhysicalPage>> m_physical_pages;

    mutable RecursiveSpinLock m_lock;

private:
    VMObject& operator=(VMObject const&) = delete;
    VMObject& operator=(VMObject&&) = delete;
    VMObject(VMObject&&) = delete;

    HashTable<VMObjectDeletedHandler*> m_on_deleted;
    SpinLock<u8> m_on_deleted_lock;

    Region::ListInVMObject m_regions;

public:
    using List = IntrusiveList<VMObject, RawPtr<VMObject>, &VMObject::m_list_node>;
};

template<typename Callback>
inline void VMObject::for_each_region(Callback callback)
{
    ScopedSpinLock lock(m_lock);
    for (auto& region : m_regions) {
        callback(region);
    }
}

inline PhysicalPage const* Region::physical_page(size_t index) const
{
    VERIFY(index < page_count());
    return vmobject().physical_pages()[first_page_index() + index];
}

inline RefPtr<PhysicalPage>& Region::physical_page_slot(size_t index)
{
    VERIFY(index < page_count());
    return vmobject().physical_pages()[first_page_index() + index];
}

}