1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
|
#include <Kernel/FileSystem/FileSystem.h>
#include <Kernel/FileSystem/Inode.h>
#include <Kernel/VM/MemoryManager.h>
#include <Kernel/VM/VMObject.h>
NonnullRefPtr<VMObject> VMObject::create_file_backed(RefPtr<Inode>&& inode)
{
InterruptDisabler disabler;
if (inode->vmo())
return *inode->vmo();
auto vmo = adopt(*new VMObject(move(inode)));
vmo->inode()->set_vmo(*vmo);
return vmo;
}
NonnullRefPtr<VMObject> VMObject::create_anonymous(size_t size)
{
size = ceil_div(size, PAGE_SIZE) * PAGE_SIZE;
return adopt(*new VMObject(size));
}
NonnullRefPtr<VMObject> VMObject::create_for_physical_range(PhysicalAddress paddr, size_t size)
{
size = ceil_div(size, PAGE_SIZE) * PAGE_SIZE;
auto vmo = adopt(*new VMObject(paddr, size));
vmo->m_allow_cpu_caching = false;
return vmo;
}
NonnullRefPtr<VMObject> VMObject::clone()
{
return adopt(*new VMObject(*this));
}
VMObject::VMObject(VMObject& other)
: m_name(other.m_name)
, m_inode_offset(other.m_inode_offset)
, m_size(other.m_size)
, m_inode(other.m_inode)
, m_physical_pages(other.m_physical_pages)
{
MM.register_vmo(*this);
}
VMObject::VMObject(size_t size)
: m_size(size)
{
MM.register_vmo(*this);
m_physical_pages.resize(page_count());
}
VMObject::VMObject(PhysicalAddress paddr, size_t size)
: m_size(size)
{
MM.register_vmo(*this);
for (size_t i = 0; i < size; i += PAGE_SIZE) {
m_physical_pages.append(PhysicalPage::create(paddr.offset(i), false, false));
}
ASSERT(m_physical_pages.size() == page_count());
}
VMObject::VMObject(RefPtr<Inode>&& inode)
: m_inode(move(inode))
{
ASSERT(m_inode);
m_size = ceil_div(m_inode->size(), PAGE_SIZE) * PAGE_SIZE;
m_physical_pages.resize(page_count());
MM.register_vmo(*this);
}
VMObject::~VMObject()
{
if (m_inode)
ASSERT(m_inode->vmo() == this);
MM.unregister_vmo(*this);
}
template<typename Callback>
void VMObject::for_each_region(Callback callback)
{
// FIXME: Figure out a better data structure so we don't have to walk every single region every time an inode changes.
// Perhaps VMObject could have a Vector<Region*> with all of his mappers?
for (auto* region : MM.m_user_regions) {
if (®ion->vmo() == this)
callback(*region);
}
for (auto* region : MM.m_kernel_regions) {
if (®ion->vmo() == this)
callback(*region);
}
}
void VMObject::inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size)
{
dbgprintf("VMObject::inode_size_changed: {%u:%u} %u -> %u\n",
m_inode->fsid(), m_inode->index(),
old_size, new_size);
InterruptDisabler disabler;
auto old_page_count = page_count();
m_size = new_size;
if (page_count() > old_page_count) {
// Add null pages and let the fault handler page these in when that day comes.
for (auto i = old_page_count; i < page_count(); ++i)
m_physical_pages.append(nullptr);
} else {
// Prune the no-longer valid pages. I'm not sure this is actually correct behavior.
for (auto i = page_count(); i < old_page_count; ++i)
m_physical_pages.take_last();
}
// FIXME: Consolidate with inode_contents_changed() so we only do a single walk.
for_each_region([](Region& region) {
ASSERT(region.page_directory());
MM.remap_region(*region.page_directory(), region);
});
}
void VMObject::inode_contents_changed(Badge<Inode>, off_t offset, ssize_t size, const u8* data)
{
(void)size;
(void)data;
InterruptDisabler disabler;
ASSERT(offset >= 0);
// FIXME: Only invalidate the parts that actually changed.
for (auto& physical_page : m_physical_pages)
physical_page = nullptr;
#if 0
size_t current_offset = offset;
size_t remaining_bytes = size;
const u8* data_ptr = data;
auto to_page_index = [] (size_t offset) -> size_t {
return offset / PAGE_SIZE;
};
if (current_offset & PAGE_MASK) {
size_t page_index = to_page_index(current_offset);
size_t bytes_to_copy = min(size, PAGE_SIZE - (current_offset & PAGE_MASK));
if (m_physical_pages[page_index]) {
auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
memcpy(ptr, data_ptr, bytes_to_copy);
MM.unquickmap_page();
}
current_offset += bytes_to_copy;
data += bytes_to_copy;
remaining_bytes -= bytes_to_copy;
}
for (size_t page_index = to_page_index(current_offset); page_index < m_physical_pages.size(); ++page_index) {
size_t bytes_to_copy = PAGE_SIZE - (current_offset & PAGE_MASK);
if (m_physical_pages[page_index]) {
auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
memcpy(ptr, data_ptr, bytes_to_copy);
MM.unquickmap_page();
}
current_offset += bytes_to_copy;
data += bytes_to_copy;
}
#endif
// FIXME: Consolidate with inode_size_changed() so we only do a single walk.
for_each_region([](Region& region) {
ASSERT(region.page_directory());
MM.remap_region(*region.page_directory(), region);
});
}
|