summaryrefslogtreecommitdiff
path: root/Kernel/KBuffer.h
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2021-09-06 01:36:14 +0200
committerAndreas Kling <kling@serenityos.org>2021-09-06 01:55:27 +0200
commit75564b4a5f2b5452163571116ee9efaf5c3c65af (patch)
treeec672e92fcf3e7dca6ed51bce1c287319a9fc77d /Kernel/KBuffer.h
parentcb71a7370836c3b70038e2f718ee87f6fc286229 (diff)
downloadserenity-75564b4a5f2b5452163571116ee9efaf5c3c65af.zip
Kernel: Make kernel region allocators return KResultOr<NOP<Region>>
This expands the reach of error propagation greatly throughout the kernel. Sadly, it also exposes the fact that we're allocating (and doing other fallible things) in constructors all over the place. This patch doesn't attempt to address that of course. That's work for our future selves.
Diffstat (limited to 'Kernel/KBuffer.h')
-rw-r--r--Kernel/KBuffer.h20
1 files changed, 11 insertions, 9 deletions
diff --git a/Kernel/KBuffer.h b/Kernel/KBuffer.h
index 1d74ca963b..e3e005db1d 100644
--- a/Kernel/KBuffer.h
+++ b/Kernel/KBuffer.h
@@ -29,20 +29,21 @@ class KBufferImpl : public RefCounted<KBufferImpl> {
public:
static RefPtr<KBufferImpl> try_create_with_size(size_t size, Memory::Region::Access access, StringView name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve)
{
- auto region = MM.allocate_kernel_region(Memory::page_round_up(size), name, access, strategy);
- if (!region)
+ auto region_or_error = MM.allocate_kernel_region(Memory::page_round_up(size), name, access, strategy);
+ if (region_or_error.is_error())
return nullptr;
- return adopt_ref_if_nonnull(new (nothrow) KBufferImpl(region.release_nonnull(), size, strategy));
+ return adopt_ref_if_nonnull(new (nothrow) KBufferImpl(region_or_error.release_value(), size, strategy));
}
static RefPtr<KBufferImpl> try_create_with_bytes(ReadonlyBytes bytes, Memory::Region::Access access, StringView name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve)
{
- auto region = MM.allocate_kernel_region(Memory::page_round_up(bytes.size()), name, access, strategy);
- if (!region)
+ auto region_or_error = MM.allocate_kernel_region(Memory::page_round_up(bytes.size()), name, access, strategy);
+ if (region_or_error.is_error())
return nullptr;
+ auto region = region_or_error.release_value();
memcpy(region->vaddr().as_ptr(), bytes.data(), bytes.size());
- return adopt_ref_if_nonnull(new (nothrow) KBufferImpl(region.release_nonnull(), bytes.size(), strategy));
+ return adopt_ref_if_nonnull(new (nothrow) KBufferImpl(move(region), bytes.size(), strategy));
}
static RefPtr<KBufferImpl> create_with_size(size_t size, Memory::Region::Access access, StringView name, AllocationStrategy strategy = AllocationStrategy::Reserve)
@@ -61,12 +62,13 @@ public:
[[nodiscard]] bool expand(size_t new_capacity)
{
- auto new_region = MM.allocate_kernel_region(Memory::page_round_up(new_capacity), m_region->name(), m_region->access(), m_allocation_strategy);
- if (!new_region)
+ auto new_region_or_error = MM.allocate_kernel_region(Memory::page_round_up(new_capacity), m_region->name(), m_region->access(), m_allocation_strategy);
+ if (new_region_or_error.is_error())
return false;
+ auto new_region = new_region_or_error.release_value();
if (m_size > 0)
memcpy(new_region->vaddr().as_ptr(), data(), min(m_region->size(), m_size));
- m_region = new_region.release_nonnull();
+ m_region = move(new_region);
return true;
}