summaryrefslogtreecommitdiff
path: root/Kernel/FileSystem
diff options
context:
space:
mode:
authorAndreas Kling <awesomekling@gmail.com>2019-11-03 00:10:24 +0100
committerAndreas Kling <awesomekling@gmail.com>2019-11-03 00:10:24 +0100
commit1e36d899f12a3dbac485a6eda1185035e2a4d5a2 (patch)
tree3f47faa5e4952de7e7c7dcd18112ff6c25d85222 /Kernel/FileSystem
parentddd83320156e7bff5703a00b63d11d812c2ed891 (diff)
downloadserenity-1e36d899f12a3dbac485a6eda1185035e2a4d5a2.zip
Ext2FS: Use KBuffers for the cached bitmap blocks
Also cache the block group descriptor table in a KBuffer on file system initialization, instead of on first access. This reduces pressure on the kmalloc heap somewhat.
Diffstat (limited to 'Kernel/FileSystem')
-rw-r--r--Kernel/FileSystem/Ext2FileSystem.cpp24
-rw-r--r--Kernel/FileSystem/Ext2FileSystem.h13
2 files changed, 15 insertions, 22 deletions
diff --git a/Kernel/FileSystem/Ext2FileSystem.cpp b/Kernel/FileSystem/Ext2FileSystem.cpp
index c1e8892ed7..a1b4593480 100644
--- a/Kernel/FileSystem/Ext2FileSystem.cpp
+++ b/Kernel/FileSystem/Ext2FileSystem.cpp
@@ -63,19 +63,7 @@ const ext2_group_desc& Ext2FS::group_descriptor(GroupIndex group_index) const
{
// FIXME: Should this fail gracefully somehow?
ASSERT(group_index <= m_block_group_count);
-
- if (!m_cached_group_descriptor_table) {
- LOCKER(m_lock);
- unsigned blocks_to_read = ceil_div(m_block_group_count * (unsigned)sizeof(ext2_group_desc), block_size());
- unsigned first_block_of_bgdt = block_size() == 1024 ? 2 : 1;
-#ifdef EXT2_DEBUG
- kprintf("ext2fs: block group count: %u, blocks-to-read: %u\n", m_block_group_count, blocks_to_read);
- kprintf("ext2fs: first block of BGDT: %u\n", first_block_of_bgdt);
-#endif
- m_cached_group_descriptor_table = ByteBuffer::create_uninitialized(block_size() * blocks_to_read);
- read_blocks(first_block_of_bgdt, blocks_to_read, m_cached_group_descriptor_table.data());
- }
- return reinterpret_cast<ext2_group_desc*>(m_cached_group_descriptor_table.data())[group_index - 1];
+ return block_group_descriptors()[group_index - 1];
}
bool Ext2FS::initialize()
@@ -113,8 +101,10 @@ bool Ext2FS::initialize()
return false;
}
- // Preheat the BGD cache.
- group_descriptor(0);
+ unsigned blocks_to_read = ceil_div(m_block_group_count * (unsigned)sizeof(ext2_group_desc), block_size());
+ BlockIndex first_block_of_bgdt = block_size() == 1024 ? 2 : 1;
+ m_cached_group_descriptor_table = KBuffer::create_with_size(block_size() * blocks_to_read);
+ read_blocks(first_block_of_bgdt, blocks_to_read, m_cached_group_descriptor_table.value().data());
#ifdef EXT2_DEBUG
for (unsigned i = 1; i <= m_block_group_count; ++i) {
@@ -489,7 +479,7 @@ void Ext2FS::flush_block_group_descriptor_table()
LOCKER(m_lock);
unsigned blocks_to_write = ceil_div(m_block_group_count * (unsigned)sizeof(ext2_group_desc), block_size());
unsigned first_block_of_bgdt = block_size() == 1024 ? 2 : 1;
- write_blocks(first_block_of_bgdt, blocks_to_write, m_cached_group_descriptor_table.data());
+ write_blocks(first_block_of_bgdt, blocks_to_write, (const u8*)block_group_descriptors());
}
void Ext2FS::flush_writes()
@@ -1222,7 +1212,7 @@ Ext2FS::CachedBitmap& Ext2FS::get_bitmap_block(BlockIndex bitmap_block_index)
return *cached_bitmap;
}
- auto block = ByteBuffer::create_uninitialized(block_size());
+ auto block = KBuffer::create_with_size(block_size());
bool success = read_block(bitmap_block_index, block.data());
ASSERT(success);
m_cached_bitmaps.append(make<CachedBitmap>(bitmap_block_index, move(block)));
diff --git a/Kernel/FileSystem/Ext2FileSystem.h b/Kernel/FileSystem/Ext2FileSystem.h
index 6bdb27590c..5b7cd41aa8 100644
--- a/Kernel/FileSystem/Ext2FileSystem.h
+++ b/Kernel/FileSystem/Ext2FileSystem.h
@@ -1,6 +1,7 @@
#pragma once
#include <AK/Bitmap.h>
+#include <Kernel/KBuffer.h>
#include <Kernel/FileSystem/DiskBackedFileSystem.h>
#include <Kernel/FileSystem/Inode.h>
#include <Kernel/FileSystem/ext2_fs.h>
@@ -80,7 +81,9 @@ private:
explicit Ext2FS(NonnullRefPtr<DiskDevice>&&);
const ext2_super_block& super_block() const { return m_super_block; }
- const ext2_group_desc& group_descriptor(unsigned groupIndex) const;
+ const ext2_group_desc& group_descriptor(GroupIndex) const;
+ ext2_group_desc* block_group_descriptors() { return (ext2_group_desc*)m_cached_group_descriptor_table.value().data(); }
+ const ext2_group_desc* block_group_descriptors() const { return (const ext2_group_desc*)m_cached_group_descriptor_table.value().data(); }
void flush_block_group_descriptor_table();
unsigned first_block_of_group(unsigned groupIndex) const;
unsigned inodes_per_block() const;
@@ -130,7 +133,7 @@ private:
unsigned m_block_group_count { 0 };
mutable ext2_super_block m_super_block;
- mutable ByteBuffer m_cached_group_descriptor_table;
+ mutable Optional<KBuffer> m_cached_group_descriptor_table;
mutable HashMap<BlockIndex, RefPtr<Ext2FSInode>> m_inode_cache;
@@ -138,13 +141,13 @@ private:
bool m_block_group_descriptors_dirty { false };
struct CachedBitmap {
- CachedBitmap(BlockIndex bi, ByteBuffer&& buf)
+ CachedBitmap(BlockIndex bi, KBuffer&& buf)
: bitmap_block_index(bi)
- , buffer(buf)
+ , buffer(move(buf))
{}
BlockIndex bitmap_block_index { 0 };
bool dirty { false };
- ByteBuffer buffer;
+ KBuffer buffer;
Bitmap bitmap(u32 blocks_per_group) { return Bitmap::wrap(buffer.data(), blocks_per_group); }
};