summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorsin-ack <sin-ack@users.noreply.github.com>2022-09-14 19:45:36 +0000
committerAndreas Kling <kling@serenityos.org>2022-09-15 12:01:16 +0200
commita19209559417a68d5eba330a54b3c82452251620 (patch)
tree1977bade6b77dda5a90ff745a0671556393724f2
parentd3979b0bbdbe2c16fe5952dbe5969c7d3d65132b (diff)
downloadserenity-a19209559417a68d5eba330a54b3c82452251620.zip
LibCore: Rewrite Core::Stream::read_all_impl
The previous version relied on manually setting the amount of data to read for the next chunk and was overall unclear. The new version uses the Bytes API to vastly improve readability, and fixes a bug where reading from files where a single read that wasn't of equal size to the block size would cause the byte buffer to be incorrectly resized causing corrupted output.
-rw-r--r--Userland/Libraries/LibCore/Stream.cpp25
1 files changed, 10 insertions, 15 deletions
diff --git a/Userland/Libraries/LibCore/Stream.cpp b/Userland/Libraries/LibCore/Stream.cpp
index cfa6da77b4..22affc4bfa 100644
--- a/Userland/Libraries/LibCore/Stream.cpp
+++ b/Userland/Libraries/LibCore/Stream.cpp
@@ -55,26 +55,21 @@ ErrorOr<ByteBuffer> Stream::read_all(size_t block_size)
ErrorOr<ByteBuffer> Stream::read_all_impl(size_t block_size, size_t expected_file_size)
{
ByteBuffer data;
- data.ensure_capacity(file_size);
+ data.ensure_capacity(expected_file_size);
- size_t total_read {};
- size_t next_reading_size { block_size };
- for (Span<u8> chunk; !is_eof();) {
- if (next_reading_size == block_size)
- chunk = TRY(data.get_bytes_for_writing(next_reading_size));
- auto const nread = TRY(read(chunk)).size();
-
- next_reading_size -= nread;
-
- if (next_reading_size == 0)
- next_reading_size = block_size;
+ size_t total_read = 0;
+ Bytes buffer;
+ while (!is_eof()) {
+ if (buffer.is_empty()) {
+ buffer = TRY(data.get_bytes_for_writing(block_size));
+ }
+ auto nread = TRY(read(buffer)).size();
total_read += nread;
-
- if (nread < block_size)
- data.resize(total_read);
+ buffer = buffer.slice(nread);
}
+ data.resize(total_read);
return data;
}