summaryrefslogtreecommitdiff
path: root/Kernel/VM/InodeVMObject.cpp
blob: e96e44b4733ea074d59b8c370d2812d5be8d65e8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
#include <Kernel/FileSystem/Inode.h>
#include <Kernel/VM/InodeVMObject.h>
#include <Kernel/VM/MemoryManager.h>
#include <Kernel/VM/Region.h>

NonnullRefPtr<InodeVMObject> InodeVMObject::create_with_inode(Inode& inode)
{
    InterruptDisabler disabler;
    if (inode.vmobject())
        return *inode.vmobject();
    auto vmo = adopt(*new InodeVMObject(inode));
    vmo->inode().set_vmo(*vmo);
    return vmo;
}

NonnullRefPtr<VMObject> InodeVMObject::clone()
{
    return adopt(*new InodeVMObject(*this));
}

InodeVMObject::InodeVMObject(Inode& inode)
    : VMObject(inode.size())
    , m_inode(inode)
{
}

InodeVMObject::InodeVMObject(const InodeVMObject& other)
    : VMObject(other)
    , m_inode(other.m_inode)
{
}

InodeVMObject::~InodeVMObject()
{
    ASSERT(inode().vmobject() == this);
}

void InodeVMObject::inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size)
{
    dbgprintf("VMObject::inode_size_changed: {%u:%u} %u -> %u\n",
        m_inode->fsid(), m_inode->index(),
        old_size, new_size);

    InterruptDisabler disabler;

    auto new_page_count = PAGE_ROUND_UP(new_size) / PAGE_SIZE;
    m_physical_pages.resize(new_page_count);

    // FIXME: Consolidate with inode_contents_changed() so we only do a single walk.
    for_each_region([](Region& region) {
        ASSERT(region.page_directory());
        MM.remap_region(*region.page_directory(), region);
    });
}

void InodeVMObject::inode_contents_changed(Badge<Inode>, off_t offset, ssize_t size, const u8* data)
{
    (void)size;
    (void)data;
    InterruptDisabler disabler;
    ASSERT(offset >= 0);

    // FIXME: Only invalidate the parts that actually changed.
    for (auto& physical_page : m_physical_pages)
        physical_page = nullptr;

#if 0
    size_t current_offset = offset;
    size_t remaining_bytes = size;
    const u8* data_ptr = data;

    auto to_page_index = [] (size_t offset) -> size_t {
        return offset / PAGE_SIZE;
    };

    if (current_offset & PAGE_MASK) {
        size_t page_index = to_page_index(current_offset);
        size_t bytes_to_copy = min(size, PAGE_SIZE - (current_offset & PAGE_MASK));
        if (m_physical_pages[page_index]) {
            auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
            memcpy(ptr, data_ptr, bytes_to_copy);
            MM.unquickmap_page();
        }
        current_offset += bytes_to_copy;
        data += bytes_to_copy;
        remaining_bytes -= bytes_to_copy;
    }

    for (size_t page_index = to_page_index(current_offset); page_index < m_physical_pages.size(); ++page_index) {
        size_t bytes_to_copy = PAGE_SIZE - (current_offset & PAGE_MASK);
        if (m_physical_pages[page_index]) {
            auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]);
            memcpy(ptr, data_ptr, bytes_to_copy);
            MM.unquickmap_page();
        }
        current_offset += bytes_to_copy;
        data += bytes_to_copy;
    }
#endif

    // FIXME: Consolidate with inode_size_changed() so we only do a single walk.
    for_each_region([](Region& region) {
        ASSERT(region.page_directory());
        MM.remap_region(*region.page_directory(), region);
    });
}

template<typename Callback>
void VMObject::for_each_region(Callback callback)
{
    // FIXME: Figure out a better data structure so we don't have to walk every single region every time an inode changes.
    //        Perhaps VMObject could have a Vector<Region*> with all of his mappers?
    for (auto& region : MM.m_user_regions) {
        if (&region.vmobject() == this)
            callback(region);
    }
    for (auto& region : MM.m_kernel_regions) {
        if (&region.vmobject() == this)
            callback(region);
    }
}