blob: 5d2b572409a61fc1e91b031714f23a2e79ea953a (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
|
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/NonnullRefPtr.h>
#include <Kernel/Arch/x86/CPU.h>
#include <Kernel/Assertions.h>
#include <Kernel/Heap/SlabAllocator.h>
#include <Kernel/PhysicalAddress.h>
namespace Kernel {
class PhysicalPage {
friend class MemoryManager;
friend class PageDirectory;
friend class VMObject;
MAKE_SLAB_ALLOCATED(PhysicalPage);
AK_MAKE_NONMOVABLE(PhysicalPage);
public:
PhysicalAddress paddr() const { return m_paddr; }
void ref()
{
m_ref_count.fetch_add(1, AK::memory_order_acq_rel);
}
void unref()
{
if (m_ref_count.fetch_sub(1, AK::memory_order_acq_rel) == 1) {
if (m_may_return_to_freelist)
return_to_freelist();
delete this;
}
}
static NonnullRefPtr<PhysicalPage> create(PhysicalAddress, bool supervisor, bool may_return_to_freelist = true);
u32 ref_count() const { return m_ref_count.load(AK::memory_order_consume); }
bool is_shared_zero_page() const;
bool is_lazy_committed_page() const;
private:
PhysicalPage(PhysicalAddress paddr, bool supervisor, bool may_return_to_freelist = true);
~PhysicalPage() = default;
void return_to_freelist() const;
Atomic<u32> m_ref_count { 1 };
bool m_may_return_to_freelist { true };
bool m_supervisor { false };
PhysicalAddress m_paddr;
};
}
|