summaryrefslogtreecommitdiff
path: root/Kernel/FileSystem
diff options
context:
space:
mode:
authorAndreas Kling <awesomekling@gmail.com>2019-09-30 10:31:06 +0200
committerAndreas Kling <awesomekling@gmail.com>2019-09-30 10:34:50 +0200
commita61f6ccc2757b3b7711df5fd7374ae0fc0ac55d6 (patch)
tree880a29153726a2b9c546d9f3bc770a61a9a3b070 /Kernel/FileSystem
parent8f45a259fc9f8180b366cbaccac1af6d368e3b3a (diff)
downloadserenity-a61f6ccc2757b3b7711df5fd7374ae0fc0ac55d6.zip
Kernel: Implement a simpler, bigger cache for DiskBackedFS
The hashmap cache was ridiculously slow and hurt us more than it helped us. This patch replaces it with a flat memory cache that keeps up to 10'000 blocks in cache with a simple dirty bit. The syncd task will wake up periodically and call flush_writes() on all file systems, which now causes us to traverse the cache and write all dirty blocks to disk. There's a ton of room for improvement here, but this itself is already drastically better when doing repeated GCC invocations.
Diffstat (limited to 'Kernel/FileSystem')
-rw-r--r--Kernel/FileSystem/DiskBackedFileSystem.cpp161
-rw-r--r--Kernel/FileSystem/DiskBackedFileSystem.h6
2 files changed, 95 insertions, 72 deletions
diff --git a/Kernel/FileSystem/DiskBackedFileSystem.cpp b/Kernel/FileSystem/DiskBackedFileSystem.cpp
index 59d51ff4b4..85b791adb2 100644
--- a/Kernel/FileSystem/DiskBackedFileSystem.cpp
+++ b/Kernel/FileSystem/DiskBackedFileSystem.cpp
@@ -1,50 +1,77 @@
-#include "DiskBackedFileSystem.h"
-#include <AK/InlineLRUCache.h>
#include <Kernel/Arch/i386/CPU.h>
+#include <Kernel/FileSystem/DiskBackedFileSystem.h>
+#include <Kernel/KBuffer.h>
#include <Kernel/Process.h>
//#define DBFS_DEBUG
-struct BlockIdentifier {
- unsigned fsid { 0 };
- unsigned index { 0 };
-
- bool operator==(const BlockIdentifier& other) const { return fsid == other.fsid && index == other.index; }
+struct CacheEntry {
+ u32 timestamp { 0 };
+ u32 block_index { 0 };
+ u8* data { nullptr };
+ bool has_data { false };
+ bool is_dirty { false };
};
-namespace AK {
+class DiskCache {
+public:
+ explicit DiskCache(size_t block_size)
+ : m_cached_block_data(KBuffer::create_with_size(m_entry_count * block_size))
+ {
+ m_entries = (CacheEntry*)kmalloc_eternal(m_entry_count * sizeof(CacheEntry));
+ for (size_t i = 0; i < m_entry_count; ++i) {
+ m_entries[i].data = m_cached_block_data.data() + i * block_size;
+ }
+ }
-template<>
-struct Traits<BlockIdentifier> : public GenericTraits<BlockIdentifier> {
- static unsigned hash(const BlockIdentifier& block_id) { return pair_int_hash(block_id.fsid, block_id.index); }
- static void dump(const BlockIdentifier& block_id) { kprintf("[block %02u:%08u]", block_id.fsid, block_id.index); }
-};
+ ~DiskCache() {}
-}
+ bool is_dirty() const { return m_dirty; }
+ void set_dirty(bool b) { m_dirty = b; }
-class CachedBlock : public InlineLinkedListNode<CachedBlock> {
-public:
- CachedBlock(const BlockIdentifier& block_id, const ByteBuffer& buffer)
- : m_key(block_id)
- , m_buffer(buffer)
+ CacheEntry& get(u32 block_index) const
{
+ auto now = kgettimeofday().tv_sec;
+
+ CacheEntry* oldest_clean_entry = nullptr;
+ for (size_t i = 0; i < m_entry_count; ++i) {
+ auto& entry = m_entries[i];
+ if (entry.block_index == block_index) {
+ entry.timestamp = now;
+ return entry;
+ }
+ if (!entry.is_dirty) {
+ if (!oldest_clean_entry)
+ oldest_clean_entry = &entry;
+ else if (entry.timestamp < oldest_clean_entry->timestamp)
+ oldest_clean_entry = &entry;
+ }
+ }
+ // FIXME: What if every single entry was dirty though :(
+ ASSERT(oldest_clean_entry);
+
+ // Replace the oldest clean entry.
+ auto& new_entry = *oldest_clean_entry;
+ new_entry.timestamp = now;
+ new_entry.block_index = block_index;
+ new_entry.has_data = false;
+ new_entry.is_dirty = false;
+ return new_entry;
}
- BlockIdentifier m_key;
- CachedBlock* m_next { nullptr };
- CachedBlock* m_prev { nullptr };
+ template<typename Callback>
+ void for_each_entry(Callback callback)
+ {
+ for (size_t i = 0; i < m_entry_count; ++i)
+ callback(m_entries[i]);
+ }
- ByteBuffer m_buffer;
+ size_t m_entry_count { 10000 };
+ KBuffer m_cached_block_data;
+ CacheEntry* m_entries { nullptr };
+ bool m_dirty { false };
};
-Lockable<InlineLRUCache<BlockIdentifier, CachedBlock>>& block_cache()
-{
- static Lockable<InlineLRUCache<BlockIdentifier, CachedBlock>>* s_cache;
- if (!s_cache)
- s_cache = new Lockable<InlineLRUCache<BlockIdentifier, CachedBlock>>;
- return *s_cache;
-}
-
DiskBackedFS::DiskBackedFS(NonnullRefPtr<DiskDevice>&& device)
: m_device(move(device))
{
@@ -61,18 +88,12 @@ bool DiskBackedFS::write_block(unsigned index, const ByteBuffer& data)
#endif
ASSERT(data.size() == block_size());
- {
- LOCKER(block_cache().lock());
- if (auto* cached_block = block_cache().resource().get({ fsid(), index }))
- cached_block->m_buffer = data;
- }
-
- LOCKER(m_lock);
- m_write_cache.set(index, data.isolated_copy());
-
- if (m_write_cache.size() >= 32)
- flush_writes();
+ auto& entry = cache().get(index);
+ memcpy(entry.data, data.data(), data.size());
+ entry.is_dirty = true;
+ entry.has_data = true;
+ cache().set_dirty(true);
return true;
}
@@ -92,31 +113,14 @@ ByteBuffer DiskBackedFS::read_block(unsigned index) const
kprintf("DiskBackedFileSystem::read_block %u\n", index);
#endif
- {
- LOCKER(m_lock);
- if (auto it = m_write_cache.find(index); it != m_write_cache.end())
- return it->value;
- }
-
- {
- LOCKER(block_cache().lock());
- if (auto* cached_block = block_cache().resource().get({ fsid(), index }))
- return cached_block->m_buffer;
- }
-
- auto buffer = ByteBuffer::create_uninitialized(block_size());
- //kprintf("created block buffer with size %u\n", block_size());
- DiskOffset base_offset = static_cast<DiskOffset>(index) * static_cast<DiskOffset>(block_size());
- auto* buffer_pointer = buffer.data();
- bool success = device().read(base_offset, block_size(), buffer_pointer);
- ASSERT(success);
- ASSERT(buffer.size() == block_size());
-
- {
- LOCKER(block_cache().lock());
- block_cache().resource().put({ fsid(), index }, CachedBlock({ fsid(), index }, buffer));
+ auto& entry = cache().get(index);
+ if (!entry.has_data) {
+ DiskOffset base_offset = static_cast<DiskOffset>(index) * static_cast<DiskOffset>(block_size());
+ bool success = device().read(base_offset, block_size(), entry.data);
+ entry.has_data = true;
+ ASSERT(success);
}
- return buffer;
+ return ByteBuffer::copy(entry.data, block_size());
}
ByteBuffer DiskBackedFS::read_blocks(unsigned index, unsigned count) const
@@ -142,9 +146,24 @@ ByteBuffer DiskBackedFS::read_blocks(unsigned index, unsigned count) const
void DiskBackedFS::flush_writes()
{
LOCKER(m_lock);
- for (auto& it : m_write_cache) {
- DiskOffset base_offset = static_cast<DiskOffset>(it.key) * static_cast<DiskOffset>(block_size());
- device().write(base_offset, block_size(), it.value.data());
- }
- m_write_cache.clear();
+ if (!cache().is_dirty())
+ return;
+ u32 count = 0;
+ cache().for_each_entry([&](CacheEntry& entry) {
+ if (!entry.is_dirty)
+ return;
+ DiskOffset base_offset = static_cast<DiskOffset>(entry.block_index) * static_cast<DiskOffset>(block_size());
+ device().write(base_offset, block_size(), entry.data);
+ ++count;
+ entry.is_dirty = false;
+ });
+ cache().set_dirty(false);
+ dbg() << class_name() << ": " << "Flushed " << count << " blocks to disk";
+}
+
+DiskCache& DiskBackedFS::cache() const
+{
+ if (!m_cache)
+ m_cache = make<DiskCache>(block_size());
+ return *m_cache;
}
diff --git a/Kernel/FileSystem/DiskBackedFileSystem.h b/Kernel/FileSystem/DiskBackedFileSystem.h
index 1b6ee8189c..fff5b11945 100644
--- a/Kernel/FileSystem/DiskBackedFileSystem.h
+++ b/Kernel/FileSystem/DiskBackedFileSystem.h
@@ -3,6 +3,8 @@
#include "FileSystem.h"
#include <AK/ByteBuffer.h>
+class DiskCache;
+
class DiskBackedFS : public FS {
public:
virtual ~DiskBackedFS() override;
@@ -24,6 +26,8 @@ protected:
bool write_blocks(unsigned index, unsigned count, const ByteBuffer&);
private:
+ DiskCache& cache() const;
+
NonnullRefPtr<DiskDevice> m_device;
- HashMap<unsigned, ByteBuffer> m_write_cache;
+ mutable OwnPtr<DiskCache> m_cache;
};