summaryrefslogtreecommitdiff
path: root/Userland/Libraries/LibGfx/ImageFormats
diff options
context:
space:
mode:
Diffstat (limited to 'Userland/Libraries/LibGfx/ImageFormats')
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/BMPLoader.cpp1588
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/BMPLoader.h47
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/BMPWriter.cpp177
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/BMPWriter.h51
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/DDSLoader.cpp710
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/DDSLoader.h269
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/GIFLoader.cpp673
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/GIFLoader.h39
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/ICOLoader.cpp281
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/ICOLoader.h39
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/ImageDecoder.cpp101
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/ImageDecoder.h71
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/JPEGLoader.cpp1625
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/JPEGLoader.h40
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/PBMLoader.cpp67
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/PBMLoader.h26
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/PGMLoader.cpp68
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/PGMLoader.h27
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/PNGLoader.cpp1111
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/PNGLoader.h38
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/PNGShared.h59
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/PNGWriter.cpp284
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/PNGWriter.h44
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/PPMLoader.cpp72
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/PPMLoader.h27
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/PortableFormatWriter.cpp54
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/PortableFormatWriter.h38
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/PortableImageLoaderCommon.h272
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/PortableImageMapLoader.h194
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/QOILoader.cpp266
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/QOILoader.h70
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/QOIWriter.cpp225
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/QOIWriter.h40
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/TGALoader.cpp370
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/TGALoader.h38
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/WebPLoader.cpp612
-rw-r--r--Userland/Libraries/LibGfx/ImageFormats/WebPLoader.h38
37 files changed, 9751 insertions, 0 deletions
diff --git a/Userland/Libraries/LibGfx/ImageFormats/BMPLoader.cpp b/Userland/Libraries/LibGfx/ImageFormats/BMPLoader.cpp
new file mode 100644
index 0000000000..76c9e25d88
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/BMPLoader.cpp
@@ -0,0 +1,1588 @@
+/*
+ * Copyright (c) 2020, Matthew Olsson <mattco@serenityos.org>
+ * Copyright (c) 2022, Bruno Conde <brunompconde@gmail.com>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <AK/BuiltinWrappers.h>
+#include <AK/Debug.h>
+#include <AK/DeprecatedString.h>
+#include <AK/Error.h>
+#include <AK/Function.h>
+#include <AK/Try.h>
+#include <AK/Vector.h>
+#include <LibGfx/ImageFormats/BMPLoader.h>
+
+namespace Gfx {
+
+const u8 bmp_header_size = 14;
+const u32 color_palette_limit = 1024;
+
+// Compression flags
+// https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-wmf/4e588f70-bd92-4a6f-b77f-35d0feaf7a57
+struct Compression {
+ enum : u32 {
+ RGB = 0,
+ RLE8,
+ RLE4,
+ BITFIELDS,
+ RLE24, // doubles as JPEG for V4+, but that is unsupported
+ PNG,
+ ALPHABITFIELDS,
+ CMYK = 11,
+ CMYKRLE8,
+ CMYKRLE4,
+ };
+};
+
+struct DIBCore {
+ // u16 for BITMAPHEADERCORE, but i32 for everything else. If the dib type is
+ // BITMAPHEADERCORE, this is range checked.
+ i32 width;
+ i32 height;
+ u16 bpp;
+};
+
+struct DIBInfo {
+ u32 compression { Compression::RGB };
+ u32 image_size { 0 };
+ i32 horizontal_resolution { 0 };
+ i32 vertical_resolution { 0 };
+ u32 number_of_palette_colors { 0 };
+ u32 number_of_important_palette_colors { number_of_palette_colors };
+
+ // Introduced in the BITMAPV2INFOHEADER and would ideally be stored in the
+ // DIBV2 struct, however with a compression value of BI_BITFIELDS or
+ // BI_ALPHABITFIELDS, these can be specified with the Info header.
+ Vector<u32> masks;
+ Vector<i8> mask_shifts;
+ Vector<u8> mask_sizes;
+};
+
+struct DIBOSV2 {
+ u16 recording;
+ u16 halftoning;
+ u16 size1;
+ u16 size2;
+};
+
+template<typename T>
+struct Endpoint {
+ T x;
+ T y;
+ T z;
+};
+
+}
+
+namespace AK {
+
+template<typename T>
+struct Formatter<Gfx::Endpoint<T>> : Formatter<StringView> {
+ ErrorOr<void> format(FormatBuilder& builder, Gfx::Endpoint<T> const& value)
+ {
+ return Formatter<StringView>::format(builder, DeprecatedString::formatted("({}, {}, {})", value.x, value.y, value.z));
+ }
+};
+
+}
+
+namespace Gfx {
+
+// CALIBRATED_RGB, sRGB, WINDOWS_COLOR_SPACE values are from
+// https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-wmf/eb4bbd50-b3ce-4917-895c-be31f214797f
+// PROFILE_LINKED, PROFILE_EMBEDDED values are from
+// https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-wmf/3c289fe1-c42e-42f6-b125-4b5fc49a2b20
+struct ColorSpace {
+ enum : u32 {
+ // "This value implies that endpoints and gamma values are given in the appropriate fields" in DIBV4.
+ // The only valid value in v4 bmps.
+ CALIBRATED_RGB = 0,
+
+ // "Specifies that the bitmap is in sRGB color space."
+ sRGB = 0x73524742, // 'sRGB'
+
+ // "This value indicates that the bitmap is in the system default color space, sRGB."
+ WINDOWS_COLOR_SPACE = 0x57696E20, // 'Win '
+
+ // "This value indicates that bV5ProfileData points to the file name of the profile to use
+ // (gamma and endpoints values are ignored)."
+ LINKED = 0x4C494E4B, // 'LINK'
+
+ // "This value indicates that bV5ProfileData points to a memory buffer that contains the profile to be used
+ // (gamma and endpoints values are ignored)."
+ EMBEDDED = 0x4D424544, // 'MBED'
+ };
+};
+
+// https://learn.microsoft.com/en-us/windows/win32/api/wingdi/ns-wingdi-bitmapv4header
+struct DIBV4 {
+ u32 color_space { 0 };
+ Endpoint<i32> red_endpoint { 0, 0, 0 };
+ Endpoint<i32> green_endpoint { 0, 0, 0 };
+ Endpoint<i32> blue_endpoint { 0, 0, 0 };
+ Endpoint<u32> gamma_endpoint { 0, 0, 0 };
+};
+
+// https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-wmf/9fec0834-607d-427d-abd5-ab240fb0db38
+struct GamutMappingIntent {
+ enum : u32 {
+ // "Specifies that the white point SHOULD be maintained.
+ // Typically used when logical colors MUST be matched to their nearest physical color in the destination color gamut.
+ //
+ // Intent: Match
+ //
+ // ICC name: Absolute Colorimetric"
+ ABS_COLORIMETRIC = 8,
+
+ // "Specifies that saturation SHOULD be maintained.
+ // Typically used for business charts and other situations in which dithering is not required.
+ //
+ // Intent: Graphic
+ //
+ // ICC name: Saturation"
+ BUSINESS = 1,
+
+ // "Specifies that a colorimetric match SHOULD be maintained.
+ // Typically used for graphic designs and named colors.
+ //
+ // Intent: Proof
+ //
+ // ICC name: Relative Colorimetric"
+ GRAPHICS = 2,
+
+ // "Specifies that contrast SHOULD be maintained.
+ // Typically used for photographs and natural images.
+ //
+ // Intent: Picture
+ //
+ // ICC name: Perceptual"
+ IMAGES = 4,
+ };
+};
+
+// https://learn.microsoft.com/en-us/windows/win32/api/wingdi/ns-wingdi-bitmapv5header
+struct DIBV5 {
+ u32 intent { 0 };
+ u32 profile_data { 0 };
+ u32 profile_size { 0 };
+};
+
+struct DIB {
+ DIBCore core;
+ DIBInfo info;
+ DIBOSV2 osv2;
+ DIBV4 v4;
+ DIBV5 v5;
+};
+
+enum class DIBType {
+ Core = 0,
+ OSV2Short,
+ OSV2,
+ Info,
+ V2,
+ V3,
+ V4,
+ V5,
+};
+
+struct BMPLoadingContext {
+ enum class State {
+ NotDecoded = 0,
+ HeaderDecoded,
+ DIBDecoded,
+ ColorTableDecoded,
+ PixelDataDecoded,
+ Error,
+ };
+ State state { State::NotDecoded };
+
+ u8 const* file_bytes { nullptr };
+ size_t file_size { 0 };
+ u32 data_offset { 0 };
+
+ bool is_included_in_ico { false };
+
+ DIB dib;
+ DIBType dib_type;
+
+ Vector<u32> color_table;
+ RefPtr<Gfx::Bitmap> bitmap;
+
+ u32 dib_size() const
+ {
+ switch (dib_type) {
+ case DIBType::Core:
+ return 12;
+ case DIBType::OSV2Short:
+ return 16;
+ case DIBType::OSV2:
+ return 64;
+ case DIBType::Info:
+ return 40;
+ case DIBType::V2:
+ return 52;
+ case DIBType::V3:
+ return 56;
+ case DIBType::V4:
+ return 108;
+ case DIBType::V5:
+ return 124;
+ }
+
+ VERIFY_NOT_REACHED();
+ }
+};
+
+class InputStreamer {
+public:
+ InputStreamer(u8 const* data, size_t size)
+ : m_data_ptr(data)
+ , m_size_remaining(size)
+ {
+ }
+
+ u8 read_u8()
+ {
+ VERIFY(m_size_remaining >= 1);
+ m_size_remaining--;
+ return *(m_data_ptr++);
+ }
+
+ u16 read_u16()
+ {
+ return read_u8() | (read_u8() << 8);
+ }
+
+ u32 read_u24()
+ {
+ return read_u8() | (read_u8() << 8) | (read_u8() << 16);
+ }
+
+ i32 read_i32()
+ {
+ return static_cast<i32>(read_u16() | (read_u16() << 16));
+ }
+
+ u32 read_u32()
+ {
+ return read_u16() | (read_u16() << 16);
+ }
+
+ void drop_bytes(u8 num_bytes)
+ {
+ VERIFY(m_size_remaining >= num_bytes);
+ m_size_remaining -= num_bytes;
+ m_data_ptr += num_bytes;
+ }
+
+ bool at_end() const { return !m_size_remaining; }
+
+ bool has_u8() const { return m_size_remaining >= 1; }
+ bool has_u16() const { return m_size_remaining >= 2; }
+ bool has_u24() const { return m_size_remaining >= 3; }
+ bool has_u32() const { return m_size_remaining >= 4; }
+
+ size_t remaining() const { return m_size_remaining; }
+
+private:
+ u8 const* m_data_ptr { nullptr };
+ size_t m_size_remaining { 0 };
+};
+
+// Lookup table for distributing all possible 2-bit numbers evenly into 8-bit numbers
+static u8 scaling_factors_2bit[4] = {
+ 0x00,
+ 0x55,
+ 0xaa,
+ 0xff,
+};
+
+// Lookup table for distributing all possible 3-bit numbers evenly into 8-bit numbers
+static u8 scaling_factors_3bit[8] = {
+ 0x00,
+ 0x24,
+ 0x48,
+ 0x6d,
+ 0x91,
+ 0xb6,
+ 0xdb,
+ 0xff,
+};
+
+static u8 scale_masked_8bit_number(u8 number, u8 bits_set)
+{
+ // If there are more than 4 bit set, an easy way to scale the number is to
+ // just copy the most significant bits into the least significant bits
+ if (bits_set >= 4)
+ return number | (number >> bits_set);
+ if (!bits_set)
+ return 0;
+ if (bits_set == 1)
+ return number ? 0xff : 0;
+ if (bits_set == 2)
+ return scaling_factors_2bit[number >> 6];
+ return scaling_factors_3bit[number >> 5];
+}
+
+static u8 get_scaled_color(u32 data, u8 mask_size, i8 mask_shift)
+{
+ // A negative mask_shift indicates we actually need to left shift
+ // the result in order to get out a valid 8-bit color (for example, the blue
+ // value in an RGB555 encoding is XXXBBBBB, which needs to be shifted to the
+ // left by 3, hence it would have a "mask_shift" value of -3).
+ if (mask_shift < 0)
+ return scale_masked_8bit_number(data << -mask_shift, mask_size);
+ return scale_masked_8bit_number(data >> mask_shift, mask_size);
+}
+
+// Scales an 8-bit number with "mask_size" bits set (and "8 - mask_size" bits
+// ignored). This function scales the number appropriately over the entire
+// 256 value color spectrum.
+// Note that a much simpler scaling can be done by simple bit shifting. If you
+// just ignore the bottom 8-mask_size bits, then you get *close*. However,
+// consider, as an example, a 5 bit number (so the bottom 3 bits are ignored).
+// The purest white you could get is 0xf8, which is 248 in RGB-land. We need
+// to scale the values in order to reach the proper value of 255.
+static u32 int_to_scaled_rgb(BMPLoadingContext& context, u32 data)
+{
+ dbgln_if(BMP_DEBUG, "DIB info sizes before access: #masks={}, #mask_sizes={}, #mask_shifts={}",
+ context.dib.info.masks.size(),
+ context.dib.info.mask_sizes.size(),
+ context.dib.info.mask_shifts.size());
+
+ u8 r = get_scaled_color(data & context.dib.info.masks[0], context.dib.info.mask_sizes[0], context.dib.info.mask_shifts[0]);
+ u8 g = get_scaled_color(data & context.dib.info.masks[1], context.dib.info.mask_sizes[1], context.dib.info.mask_shifts[1]);
+ u8 b = get_scaled_color(data & context.dib.info.masks[2], context.dib.info.mask_sizes[2], context.dib.info.mask_shifts[2]);
+ u32 color = (r << 16) | (g << 8) | b;
+
+ if (context.dib.info.masks.size() == 4) {
+ // The bitmap has an alpha mask
+ u8 a = get_scaled_color(data & context.dib.info.masks[3], context.dib.info.mask_sizes[3], context.dib.info.mask_shifts[3]);
+ color |= (a << 24);
+ } else {
+ color |= 0xff000000;
+ }
+
+ return color;
+}
+
+static void populate_dib_mask_info_if_needed(BMPLoadingContext& context)
+{
+ if (context.dib.info.masks.is_empty())
+ return;
+
+ // Mask shift is the number of right shifts needed to align the MSb of the
+ // mask to the MSb of the LSB. Note that this can be a negative number.
+ // Mask size is the number of set bits in the mask. This is required for
+ // color scaling (for example, ensuring that a 4-bit color value spans the
+ // entire 256 value color spectrum.
+ auto& masks = context.dib.info.masks;
+ auto& mask_shifts = context.dib.info.mask_shifts;
+ auto& mask_sizes = context.dib.info.mask_sizes;
+
+ if (!mask_shifts.is_empty() && !mask_sizes.is_empty())
+ return;
+
+ VERIFY(mask_shifts.is_empty() && mask_sizes.is_empty());
+
+ mask_shifts.ensure_capacity(masks.size());
+ mask_sizes.ensure_capacity(masks.size());
+
+ for (size_t i = 0; i < masks.size(); ++i) {
+ u32 mask = masks[i];
+ if (!mask) {
+ mask_shifts.append(0);
+ mask_sizes.append(0);
+ continue;
+ }
+ int trailing_zeros = count_trailing_zeroes(mask);
+ // If mask is exactly `0xFFFFFFFF`, then we might try to count the trailing zeros of 0x00000000 here, so we need the safe version:
+ int size = count_trailing_zeroes_safe(~(mask >> trailing_zeros));
+ if (size > 8) {
+ // Drop lowest bits if mask is longer than 8 bits.
+ trailing_zeros += size - 8;
+ size = 8;
+ }
+ mask_shifts.append(size + trailing_zeros - 8);
+ mask_sizes.append(size);
+ }
+}
+
+static bool check_for_invalid_bitmask_combinations(BMPLoadingContext& context)
+{
+ auto& bpp = context.dib.core.bpp;
+ auto& compression = context.dib.info.compression;
+
+ if (compression == Compression::ALPHABITFIELDS && context.dib_type != DIBType::Info)
+ return false;
+
+ switch (context.dib_type) {
+ case DIBType::Core:
+ if (bpp == 2 || bpp == 16 || bpp == 32)
+ return false;
+ break;
+ case DIBType::Info:
+ switch (compression) {
+ case Compression::BITFIELDS:
+ case Compression::ALPHABITFIELDS:
+ if (bpp != 16 && bpp != 32)
+ return false;
+ break;
+ case Compression::RGB:
+ break;
+ case Compression::RLE8:
+ if (bpp > 8)
+ return false;
+ break;
+ case Compression::RLE4:
+ // TODO: This is a guess
+ if (bpp > 4)
+ return false;
+ break;
+ default:
+ // Other compressions are not officially supported.
+ // Technically, we could even drop ALPHABITFIELDS.
+ return false;
+ }
+ break;
+ case DIBType::OSV2Short:
+ case DIBType::OSV2:
+ case DIBType::V2:
+ case DIBType::V3:
+ case DIBType::V4:
+ case DIBType::V5:
+ if (compression == Compression::BITFIELDS && bpp != 16 && bpp != 32)
+ return false;
+ break;
+ }
+
+ return true;
+}
+
+static bool set_dib_bitmasks(BMPLoadingContext& context, InputStreamer& streamer)
+{
+ if (!check_for_invalid_bitmask_combinations(context))
+ return false;
+
+ auto& bpp = context.dib.core.bpp;
+ if (bpp <= 8 || bpp == 24)
+ return true;
+
+ auto& compression = context.dib.info.compression;
+ auto& type = context.dib_type;
+
+ if (type > DIBType::OSV2 && bpp == 16 && compression == Compression::RGB) {
+ context.dib.info.masks.extend({ 0x7c00, 0x03e0, 0x001f });
+ context.dib.info.mask_shifts.extend({ 7, 2, -3 });
+ context.dib.info.mask_sizes.extend({ 5, 5, 5 });
+ } else if (type == DIBType::Info && (compression == Compression::BITFIELDS || compression == Compression::ALPHABITFIELDS)) {
+ // Consume the extra BITFIELDS bytes
+ auto number_of_mask_fields = compression == Compression::ALPHABITFIELDS ? 4 : 3;
+
+ for (auto i = 0; i < number_of_mask_fields; i++) {
+ if (!streamer.has_u32())
+ return false;
+ context.dib.info.masks.append(streamer.read_u32());
+ }
+ }
+
+ populate_dib_mask_info_if_needed(context);
+ return true;
+}
+
+static ErrorOr<void> decode_bmp_header(BMPLoadingContext& context)
+{
+ if (context.state == BMPLoadingContext::State::Error)
+ return Error::from_string_literal("Error before starting decode_bmp_header");
+
+ if (context.state >= BMPLoadingContext::State::HeaderDecoded)
+ return {};
+
+ if (!context.file_bytes || context.file_size < bmp_header_size) {
+ dbgln_if(BMP_DEBUG, "Missing BMP header");
+ context.state = BMPLoadingContext::State::Error;
+ return Error::from_string_literal("Missing BMP header");
+ }
+
+ InputStreamer streamer(context.file_bytes, bmp_header_size);
+
+ u16 header = streamer.read_u16();
+ if (header != 0x4d42) {
+ dbgln_if(BMP_DEBUG, "BMP has invalid magic header number: {:#04x}", header);
+ context.state = BMPLoadingContext::State::Error;
+ return Error::from_string_literal("BMP has invalid magic header number");
+ }
+
+ // The reported size of the file in the header is actually not important
+ // for decoding the file. Some specifications say that this value should
+ // be the size of the header instead, so we just rely on the known file
+ // size, instead of a possibly-correct-but-also-possibly-incorrect reported
+ // value of the file size.
+ streamer.drop_bytes(4);
+
+ // Ignore reserved bytes
+ streamer.drop_bytes(4);
+ context.data_offset = streamer.read_u32();
+
+ if constexpr (BMP_DEBUG) {
+ dbgln("BMP file size: {}", context.file_size);
+ dbgln("BMP data offset: {}", context.data_offset);
+ }
+
+ if (context.data_offset >= context.file_size) {
+ dbgln_if(BMP_DEBUG, "BMP data offset is beyond file end?!");
+ return Error::from_string_literal("BMP data offset is beyond file end");
+ }
+
+ context.state = BMPLoadingContext::State::HeaderDecoded;
+ return {};
+}
+
+static bool decode_bmp_core_dib(BMPLoadingContext& context, InputStreamer& streamer)
+{
+ auto& core = context.dib.core;
+
+ // The width and height are u16 fields in the actual BITMAPCOREHEADER format.
+ if (context.dib_type == DIBType::Core) {
+ core.width = streamer.read_u16();
+ core.height = streamer.read_u16();
+ } else {
+ core.width = streamer.read_i32();
+ core.height = streamer.read_i32();
+ }
+
+ if (core.width < 0) {
+ dbgln("BMP has a negative width: {}", core.width);
+ return false;
+ }
+
+ if (static_cast<size_t>(core.width) > maximum_width_for_decoded_images || static_cast<size_t>(abs(core.height)) > maximum_height_for_decoded_images) {
+ dbgln("This BMP is too large for comfort: {}x{}", core.width, abs(core.height));
+ return false;
+ }
+
+ auto color_planes = streamer.read_u16();
+ if (color_planes != 1) {
+ dbgln("BMP has an invalid number of color planes: {}", color_planes);
+ return false;
+ }
+
+ core.bpp = streamer.read_u16();
+ switch (core.bpp) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ case 16:
+ case 24:
+ case 32:
+ break;
+ default:
+ dbgln("BMP has an invalid bpp: {}", core.bpp);
+ context.state = BMPLoadingContext::State::Error;
+ return false;
+ }
+
+ if constexpr (BMP_DEBUG) {
+ dbgln("BMP width: {}", core.width);
+ dbgln("BMP height: {}", core.height);
+ dbgln("BMP bits_per_pixel: {}", core.bpp);
+ }
+
+ return true;
+}
+
+ALWAYS_INLINE static bool is_supported_compression_format(BMPLoadingContext& context, u32 compression)
+{
+ return compression == Compression::RGB || compression == Compression::BITFIELDS
+ || compression == Compression::ALPHABITFIELDS || compression == Compression::RLE8
+ || compression == Compression::RLE4 || (compression == Compression::RLE24 && context.dib_type <= DIBType::OSV2);
+}
+
+static bool decode_bmp_osv2_dib(BMPLoadingContext& context, InputStreamer& streamer, bool short_variant = false)
+{
+ auto& core = context.dib.core;
+
+ core.width = streamer.read_u32();
+ core.height = streamer.read_u32();
+
+ if (core.width < 0) {
+ dbgln("BMP has a negative width: {}", core.width);
+ return false;
+ }
+
+ auto color_planes = streamer.read_u16();
+ if (color_planes != 1) {
+ dbgln("BMP has an invalid number of color planes: {}", color_planes);
+ return false;
+ }
+
+ core.bpp = streamer.read_u16();
+ switch (core.bpp) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ case 24:
+ break;
+ default:
+ // OS/2 didn't expect 16- or 32-bpp to be popular.
+ dbgln("BMP has an invalid bpp: {}", core.bpp);
+ context.state = BMPLoadingContext::State::Error;
+ return false;
+ }
+
+ if constexpr (BMP_DEBUG) {
+ dbgln("BMP width: {}", core.width);
+ dbgln("BMP height: {}", core.height);
+ dbgln("BMP bits_per_pixel: {}", core.bpp);
+ }
+
+ if (short_variant)
+ return true;
+
+ auto& info = context.dib.info;
+ auto& osv2 = context.dib.osv2;
+
+ info.compression = streamer.read_u32();
+ info.image_size = streamer.read_u32();
+ info.horizontal_resolution = streamer.read_u32();
+ info.vertical_resolution = streamer.read_u32();
+ info.number_of_palette_colors = streamer.read_u32();
+ info.number_of_important_palette_colors = streamer.read_u32();
+
+ if (!is_supported_compression_format(context, info.compression)) {
+ dbgln("BMP has unsupported compression value: {}", info.compression);
+ return false;
+ }
+
+ if (info.number_of_palette_colors > color_palette_limit || info.number_of_important_palette_colors > color_palette_limit) {
+ dbgln("BMP header indicates too many palette colors: {}", info.number_of_palette_colors);
+ return false;
+ }
+
+ // Units (2) + reserved (2)
+ streamer.drop_bytes(4);
+
+ osv2.recording = streamer.read_u16();
+ osv2.halftoning = streamer.read_u16();
+ osv2.size1 = streamer.read_u32();
+ osv2.size2 = streamer.read_u32();
+
+ // ColorEncoding (4) + Identifier (4)
+ streamer.drop_bytes(8);
+
+ if constexpr (BMP_DEBUG) {
+ dbgln("BMP compression: {}", info.compression);
+ dbgln("BMP image size: {}", info.image_size);
+ dbgln("BMP horizontal res: {}", info.horizontal_resolution);
+ dbgln("BMP vertical res: {}", info.vertical_resolution);
+ dbgln("BMP colors: {}", info.number_of_palette_colors);
+ dbgln("BMP important colors: {}", info.number_of_important_palette_colors);
+ }
+
+ return true;
+}
+
+static bool decode_bmp_info_dib(BMPLoadingContext& context, InputStreamer& streamer)
+{
+ if (!decode_bmp_core_dib(context, streamer))
+ return false;
+
+ auto& info = context.dib.info;
+
+ auto compression = streamer.read_u32();
+ info.compression = compression;
+ if (!is_supported_compression_format(context, compression)) {
+ dbgln("BMP has unsupported compression value: {}", compression);
+ return false;
+ }
+
+ info.image_size = streamer.read_u32();
+ info.horizontal_resolution = streamer.read_i32();
+ info.vertical_resolution = streamer.read_i32();
+ info.number_of_palette_colors = streamer.read_u32();
+ info.number_of_important_palette_colors = streamer.read_u32();
+
+ if (info.number_of_palette_colors > color_palette_limit || info.number_of_important_palette_colors > color_palette_limit) {
+ dbgln("BMP header indicates too many palette colors: {}", info.number_of_palette_colors);
+ return false;
+ }
+
+ if (info.number_of_important_palette_colors == 0)
+ info.number_of_important_palette_colors = info.number_of_palette_colors;
+
+ if constexpr (BMP_DEBUG) {
+ dbgln("BMP compression: {}", info.compression);
+ dbgln("BMP image size: {}", info.image_size);
+ dbgln("BMP horizontal res: {}", info.horizontal_resolution);
+ dbgln("BMP vertical res: {}", info.vertical_resolution);
+ dbgln("BMP colors: {}", info.number_of_palette_colors);
+ dbgln("BMP important colors: {}", info.number_of_important_palette_colors);
+ }
+
+ return true;
+}
+
+static bool decode_bmp_v2_dib(BMPLoadingContext& context, InputStreamer& streamer)
+{
+ if (!decode_bmp_info_dib(context, streamer))
+ return false;
+
+ context.dib.info.masks.append(streamer.read_u32());
+ context.dib.info.masks.append(streamer.read_u32());
+ context.dib.info.masks.append(streamer.read_u32());
+
+ if constexpr (BMP_DEBUG) {
+ dbgln("BMP red mask: {:#08x}", context.dib.info.masks[0]);
+ dbgln("BMP green mask: {:#08x}", context.dib.info.masks[1]);
+ dbgln("BMP blue mask: {:#08x}", context.dib.info.masks[2]);
+ }
+
+ return true;
+}
+
+static bool decode_bmp_v3_dib(BMPLoadingContext& context, InputStreamer& streamer)
+{
+ if (!decode_bmp_v2_dib(context, streamer))
+ return false;
+
+ // There is zero documentation about when alpha masks actually get applied.
+ // Well, there's some, but it's not even close to comprehensive. So, this is
+ // in no way based off of any spec, it's simply based off of the BMP test
+ // suite results.
+ if (context.dib.info.compression == Compression::ALPHABITFIELDS) {
+ context.dib.info.masks.append(streamer.read_u32());
+ dbgln_if(BMP_DEBUG, "BMP alpha mask: {:#08x}", context.dib.info.masks[3]);
+ } else if (context.dib_size() >= 56 && context.dib.core.bpp >= 16) {
+ auto mask = streamer.read_u32();
+ if ((context.dib.core.bpp == 32 && mask != 0) || context.dib.core.bpp == 16) {
+ context.dib.info.masks.append(mask);
+ dbgln_if(BMP_DEBUG, "BMP alpha mask: {:#08x}", mask);
+ } else {
+ dbgln_if(BMP_DEBUG, "BMP alpha mask (ignored): {:#08x}", mask);
+ }
+ } else {
+ streamer.drop_bytes(4);
+ dbgln_if(BMP_DEBUG, "BMP alpha mask skipped");
+ }
+
+ return true;
+}
+
+static bool decode_bmp_v4_dib(BMPLoadingContext& context, InputStreamer& streamer)
+{
+ if (!decode_bmp_v3_dib(context, streamer))
+ return false;
+
+ auto& v4 = context.dib.v4;
+ v4.color_space = streamer.read_u32();
+ v4.red_endpoint = { streamer.read_i32(), streamer.read_i32(), streamer.read_i32() };
+ v4.green_endpoint = { streamer.read_i32(), streamer.read_i32(), streamer.read_i32() };
+ v4.blue_endpoint = { streamer.read_i32(), streamer.read_i32(), streamer.read_i32() };
+ v4.gamma_endpoint = { streamer.read_u32(), streamer.read_u32(), streamer.read_u32() };
+
+ if constexpr (BMP_DEBUG) {
+ dbgln("BMP color space: {}", v4.color_space);
+ dbgln("BMP red endpoint: {}", v4.red_endpoint);
+ dbgln("BMP green endpoint: {}", v4.green_endpoint);
+ dbgln("BMP blue endpoint: {}", v4.blue_endpoint);
+ dbgln("BMP gamma endpoint: {}", v4.gamma_endpoint);
+ }
+
+ return true;
+}
+
+static bool decode_bmp_v5_dib(BMPLoadingContext& context, InputStreamer& streamer)
+{
+ if (!decode_bmp_v4_dib(context, streamer))
+ return false;
+
+ auto& v5 = context.dib.v5;
+ v5.intent = streamer.read_u32();
+ v5.profile_data = streamer.read_u32();
+ v5.profile_size = streamer.read_u32();
+ streamer.drop_bytes(4); // Ignore reserved field.
+
+ if constexpr (BMP_DEBUG) {
+ dbgln("BMP intent: {}", v5.intent);
+ dbgln("BMP profile data: {}", v5.profile_data);
+ dbgln("BMP profile size: {}", v5.profile_size);
+ }
+
+ return true;
+}
+
+static ErrorOr<void> decode_bmp_dib(BMPLoadingContext& context)
+{
+ if (context.state == BMPLoadingContext::State::Error)
+ return Error::from_string_literal("Error before starting decode_bmp_dib");
+
+ if (context.state >= BMPLoadingContext::State::DIBDecoded)
+ return {};
+
+ if (!context.is_included_in_ico && context.state < BMPLoadingContext::State::HeaderDecoded)
+ TRY(decode_bmp_header(context));
+
+ u8 header_size = context.is_included_in_ico ? 0 : bmp_header_size;
+
+ if (context.file_size < (u8)(header_size + 4))
+ return Error::from_string_literal("File size too short");
+
+ InputStreamer streamer(context.file_bytes + header_size, 4);
+
+ u32 dib_size = streamer.read_u32();
+
+ if (context.file_size < header_size + dib_size)
+ return Error::from_string_literal("File size too short");
+
+ if (!context.is_included_in_ico && (context.data_offset < header_size + dib_size)) {
+ dbgln("Shenanigans! BMP pixel data and header usually don't overlap.");
+ return Error::from_string_literal("BMP pixel data and header usually don't overlap");
+ }
+
+ // NOTE: If this is a headless BMP (embedded on ICO files), then we can only infer the data_offset after we know the data table size.
+ // We are also assuming that no Extra bit masks are present
+ u32 dib_offset = context.is_included_in_ico ? dib_size : context.data_offset - header_size - 4;
+ streamer = InputStreamer(context.file_bytes + header_size + 4, dib_offset);
+
+ dbgln_if(BMP_DEBUG, "BMP dib size: {}", dib_size);
+
+ bool error = false;
+
+ if (dib_size == 12) {
+ context.dib_type = DIBType::Core;
+ if (!decode_bmp_core_dib(context, streamer))
+ error = true;
+ } else if (dib_size == 64) {
+ context.dib_type = DIBType::OSV2;
+ if (!decode_bmp_osv2_dib(context, streamer))
+ error = true;
+ } else if (dib_size == 16) {
+ context.dib_type = DIBType::OSV2Short;
+ if (!decode_bmp_osv2_dib(context, streamer, true))
+ error = true;
+ } else if (dib_size == 40) {
+ context.dib_type = DIBType::Info;
+ if (!decode_bmp_info_dib(context, streamer))
+ error = true;
+ } else if (dib_size == 52) {
+ context.dib_type = DIBType::V2;
+ if (!decode_bmp_v2_dib(context, streamer))
+ error = true;
+ } else if (dib_size == 56) {
+ context.dib_type = DIBType::V3;
+ if (!decode_bmp_v3_dib(context, streamer))
+ error = true;
+ } else if (dib_size == 108) {
+ context.dib_type = DIBType::V4;
+ if (!decode_bmp_v4_dib(context, streamer))
+ error = true;
+ } else if (dib_size == 124) {
+ context.dib_type = DIBType::V5;
+ if (!decode_bmp_v5_dib(context, streamer))
+ error = true;
+ } else {
+ dbgln("Unsupported BMP DIB size: {}", dib_size);
+ error = true;
+ }
+
+ switch (context.dib.info.compression) {
+ case Compression::RGB:
+ case Compression::RLE8:
+ case Compression::RLE4:
+ case Compression::BITFIELDS:
+ case Compression::RLE24:
+ case Compression::PNG:
+ case Compression::ALPHABITFIELDS:
+ case Compression::CMYK:
+ case Compression::CMYKRLE8:
+ case Compression::CMYKRLE4:
+ break;
+ default:
+ error = true;
+ }
+
+ if (!error && !set_dib_bitmasks(context, streamer))
+ error = true;
+
+ if (error) {
+ dbgln("BMP has an invalid DIB");
+ context.state = BMPLoadingContext::State::Error;
+ return Error::from_string_literal("BMP has an invalid DIB");
+ }
+
+ // NOTE: If this is a headless BMP (included on ICOns), the data_offset is set based on the number_of_palette_colors found on the DIB header
+ if (context.is_included_in_ico) {
+ if (context.dib.core.bpp > 8)
+ context.data_offset = dib_size;
+ else {
+ auto bytes_per_color = context.dib_type == DIBType::Core ? 3 : 4;
+ u32 max_colors = 1 << context.dib.core.bpp;
+ auto size_of_color_table = (context.dib.info.number_of_palette_colors > 0 ? context.dib.info.number_of_palette_colors : max_colors) * bytes_per_color;
+ context.data_offset = dib_size + size_of_color_table;
+ }
+ }
+
+ context.state = BMPLoadingContext::State::DIBDecoded;
+
+ return {};
+}
+
+static ErrorOr<void> decode_bmp_color_table(BMPLoadingContext& context)
+{
+ if (context.state == BMPLoadingContext::State::Error)
+ return Error::from_string_literal("Error before starting decode_bmp_color_table");
+
+ if (context.state < BMPLoadingContext::State::DIBDecoded)
+ TRY(decode_bmp_dib(context));
+
+ if (context.state >= BMPLoadingContext::State::ColorTableDecoded)
+ return {};
+
+ if (context.dib.core.bpp > 8) {
+ context.state = BMPLoadingContext::State::ColorTableDecoded;
+ return {};
+ }
+
+ auto bytes_per_color = context.dib_type == DIBType::Core ? 3 : 4;
+ u32 max_colors = 1 << context.dib.core.bpp;
+
+ u8 header_size = !context.is_included_in_ico ? bmp_header_size : 0;
+ VERIFY(context.data_offset >= header_size + context.dib_size());
+
+ u32 size_of_color_table;
+ if (!context.is_included_in_ico) {
+ size_of_color_table = context.data_offset - header_size - context.dib_size();
+ } else {
+ size_of_color_table = (context.dib.info.number_of_palette_colors > 0 ? context.dib.info.number_of_palette_colors : max_colors) * bytes_per_color;
+ }
+
+ if (context.dib_type <= DIBType::OSV2) {
+ // Partial color tables are not supported, so the space of the color
+ // table must be at least enough for the maximum amount of colors
+ if (size_of_color_table < 3 * max_colors) {
+ // This is against the spec, but most viewers process it anyways
+ dbgln("BMP with CORE header does not have enough colors. Has: {}, expected: {}", size_of_color_table, (3 * max_colors));
+ }
+ }
+
+ InputStreamer streamer(context.file_bytes + header_size + context.dib_size(), size_of_color_table);
+ for (u32 i = 0; !streamer.at_end() && i < max_colors; ++i) {
+ if (bytes_per_color == 4) {
+ if (!streamer.has_u32())
+ return Error::from_string_literal("Cannot read 32 bits");
+ context.color_table.append(streamer.read_u32());
+ } else {
+ if (!streamer.has_u24())
+ return Error::from_string_literal("Cannot read 24 bits");
+ context.color_table.append(streamer.read_u24());
+ }
+ }
+
+ context.state = BMPLoadingContext::State::ColorTableDecoded;
+
+ return {};
+}
+
+struct RLEState {
+ enum : u8 {
+ PixelCount = 0,
+ PixelValue,
+ Meta, // Represents just consuming a null byte, which indicates something special
+ };
+};
+
+static ErrorOr<void> uncompress_bmp_rle_data(BMPLoadingContext& context, ByteBuffer& buffer)
+{
+ // RLE-compressed images cannot be stored top-down
+ if (context.dib.core.height < 0) {
+ dbgln_if(BMP_DEBUG, "BMP is top-down and RLE compressed");
+ context.state = BMPLoadingContext::State::Error;
+ return Error::from_string_literal("BMP is top-down and RLE compressed");
+ }
+
+ InputStreamer streamer(context.file_bytes + context.data_offset, context.file_size - context.data_offset);
+
+ auto compression = context.dib.info.compression;
+
+ u32 total_rows = static_cast<u32>(context.dib.core.height);
+ u32 total_columns = round_up_to_power_of_two(static_cast<u32>(context.dib.core.width), 4);
+ u32 column = 0;
+ u32 row = 0;
+ auto currently_consuming = RLEState::PixelCount;
+ i16 pixel_count = 0;
+
+ // ByteBuffer asserts that allocating the memory never fails.
+ // FIXME: ByteBuffer should return either RefPtr<> or Optional<>.
+ // Decoding the RLE data on-the-fly might actually be faster, and avoids this topic entirely.
+ u32 buffer_size;
+ if (compression == Compression::RLE24) {
+ buffer_size = total_rows * round_up_to_power_of_two(total_columns, 4) * 4;
+ } else {
+ buffer_size = total_rows * round_up_to_power_of_two(total_columns, 4);
+ }
+ if (buffer_size > 300 * MiB) {
+ dbgln("Suspiciously large amount of RLE data");
+ return Error::from_string_literal("Suspiciously large amount of RLE data");
+ }
+ auto buffer_result = ByteBuffer::create_zeroed(buffer_size);
+ if (buffer_result.is_error()) {
+ dbgln("Not enough memory for buffer allocation");
+ return buffer_result.release_error();
+ }
+ buffer = buffer_result.release_value();
+
+ // Avoid as many if statements as possible by pulling out
+ // compression-dependent actions into separate lambdas
+ Function<u32()> get_buffer_index;
+ Function<ErrorOr<void>(u32, bool)> set_byte;
+ Function<ErrorOr<u32>()> read_byte;
+
+ if (compression == Compression::RLE8) {
+ get_buffer_index = [&]() -> u32 { return row * total_columns + column; };
+ } else if (compression == Compression::RLE4) {
+ get_buffer_index = [&]() -> u32 { return (row * total_columns + column) / 2; };
+ } else {
+ get_buffer_index = [&]() -> u32 { return (row * total_columns + column) * 3; };
+ }
+
+ if (compression == Compression::RLE8) {
+ set_byte = [&](u32 color, bool) -> ErrorOr<void> {
+ if (column >= total_columns) {
+ column = 0;
+ row++;
+ }
+ auto index = get_buffer_index();
+ if (index >= buffer.size()) {
+ dbgln("BMP has badly-formatted RLE data");
+ return Error::from_string_literal("BMP has badly-formatted RLE data");
+ }
+ buffer[index] = color;
+ column++;
+ return {};
+ };
+ } else if (compression == Compression::RLE24) {
+ set_byte = [&](u32 color, bool) -> ErrorOr<void> {
+ if (column >= total_columns) {
+ column = 0;
+ row++;
+ }
+ auto index = get_buffer_index();
+ if (index + 3 >= buffer.size()) {
+ dbgln("BMP has badly-formatted RLE data");
+ return Error::from_string_literal("BMP has badly-formatted RLE data");
+ }
+ ((u32&)buffer[index]) = color;
+ column++;
+ return {};
+ };
+ } else {
+ set_byte = [&](u32 byte, bool rle4_set_second_nibble) -> ErrorOr<void> {
+ if (column >= total_columns) {
+ column = 0;
+ row++;
+ }
+
+ u32 index = get_buffer_index();
+ if (index >= buffer.size() || (rle4_set_second_nibble && index + 1 >= buffer.size())) {
+ dbgln("BMP has badly-formatted RLE data");
+ return Error::from_string_literal("BMP has badly-formatted RLE data");
+ }
+
+ if (column % 2) {
+ buffer[index] |= byte >> 4;
+ if (rle4_set_second_nibble) {
+ buffer[index + 1] |= byte << 4;
+ column++;
+ }
+ } else {
+ if (rle4_set_second_nibble) {
+ buffer[index] = byte;
+ column++;
+ } else {
+ buffer[index] |= byte & 0xf0;
+ }
+ }
+
+ column++;
+ return {};
+ };
+ }
+
+ if (compression == Compression::RLE24) {
+ read_byte = [&]() -> ErrorOr<u32> {
+ if (!streamer.has_u24()) {
+ dbgln("BMP has badly-formatted RLE data");
+ return Error::from_string_literal("BMP has badly-formatted RLE data");
+ }
+ return streamer.read_u24();
+ };
+ } else {
+ read_byte = [&]() -> ErrorOr<u32> {
+ if (!streamer.has_u8()) {
+ dbgln("BMP has badly-formatted RLE data");
+ return Error::from_string_literal("BMP has badly-formatted RLE data");
+ }
+ return streamer.read_u8();
+ };
+ }
+
+ while (true) {
+ u32 byte;
+
+ switch (currently_consuming) {
+ case RLEState::PixelCount:
+ if (!streamer.has_u8())
+ return Error::from_string_literal("Cannot read 8 bits");
+ byte = streamer.read_u8();
+ if (!byte) {
+ currently_consuming = RLEState::Meta;
+ } else {
+ pixel_count = byte;
+ currently_consuming = RLEState::PixelValue;
+ }
+ break;
+ case RLEState::PixelValue:
+ byte = TRY(read_byte());
+ for (u16 i = 0; i < pixel_count; ++i) {
+ if (compression != Compression::RLE4) {
+ TRY(set_byte(byte, true));
+ } else {
+ TRY(set_byte(byte, i != pixel_count - 1));
+ i++;
+ }
+ }
+
+ currently_consuming = RLEState::PixelCount;
+ break;
+ case RLEState::Meta:
+ if (!streamer.has_u8())
+ return Error::from_string_literal("Cannot read 8 bits");
+ byte = streamer.read_u8();
+ if (!byte) {
+ column = 0;
+ row++;
+ currently_consuming = RLEState::PixelCount;
+ continue;
+ }
+ if (byte == 1)
+ return {};
+ if (byte == 2) {
+ if (!streamer.has_u8())
+ return Error::from_string_literal("Cannot read 8 bits");
+ u8 offset_x = streamer.read_u8();
+ if (!streamer.has_u8())
+ return Error::from_string_literal("Cannot read 8 bits");
+ u8 offset_y = streamer.read_u8();
+ column += offset_x;
+ if (column >= total_columns) {
+ column -= total_columns;
+ row++;
+ }
+ row += offset_y;
+ currently_consuming = RLEState::PixelCount;
+ continue;
+ }
+
+ // Consume literal bytes
+ pixel_count = byte;
+ i16 i = byte;
+
+ while (i >= 1) {
+ byte = TRY(read_byte());
+ TRY(set_byte(byte, i != 1));
+ i--;
+ if (compression == Compression::RLE4)
+ i--;
+ }
+
+ // Optionally consume a padding byte
+ if (compression != Compression::RLE4) {
+ if (pixel_count % 2) {
+ if (!streamer.has_u8())
+ return Error::from_string_literal("Cannot read 8 bits");
+ byte = streamer.read_u8();
+ }
+ } else {
+ if (((pixel_count + 1) / 2) % 2) {
+ if (!streamer.has_u8())
+ return Error::from_string_literal("Cannot read 8 bits");
+ byte = streamer.read_u8();
+ }
+ }
+ currently_consuming = RLEState::PixelCount;
+ break;
+ }
+ }
+
+ VERIFY_NOT_REACHED();
+}
+
+static ErrorOr<void> decode_bmp_pixel_data(BMPLoadingContext& context)
+{
+ if (context.state == BMPLoadingContext::State::Error)
+ return Error::from_string_literal("Error before starting decode_bmp_pixel_data");
+
+ if (context.state <= BMPLoadingContext::State::ColorTableDecoded)
+ TRY(decode_bmp_color_table(context));
+
+ const u16 bits_per_pixel = context.dib.core.bpp;
+
+ BitmapFormat format = [&]() -> BitmapFormat {
+ // NOTE: If this is an BMP included in an ICO, the bitmap format will be converted to BGRA8888.
+ // This is because images with less than 32 bits of color depth follow a particular format:
+ // the image is encoded with a color mask (the "XOR mask") together with an opacity mask (the "AND mask") of 1 bit per pixel.
+ // The height of the encoded image must be exactly twice the real height, before both masks are combined.
+ // Bitmaps have no knowledge of this format as they do not store extra rows for the AND mask.
+ if (context.is_included_in_ico)
+ return BitmapFormat::BGRA8888;
+
+ switch (bits_per_pixel) {
+ case 1:
+ return BitmapFormat::Indexed1;
+ case 2:
+ return BitmapFormat::Indexed2;
+ case 4:
+ return BitmapFormat::Indexed4;
+ case 8:
+ return BitmapFormat::Indexed8;
+ case 16:
+ if (context.dib.info.masks.size() == 4)
+ return BitmapFormat::BGRA8888;
+ return BitmapFormat::BGRx8888;
+ case 24:
+ return BitmapFormat::BGRx8888;
+ case 32:
+ return BitmapFormat::BGRA8888;
+ default:
+ return BitmapFormat::Invalid;
+ }
+ }();
+
+ if (format == BitmapFormat::Invalid) {
+ dbgln("BMP has invalid bpp of {}", bits_per_pixel);
+ context.state = BMPLoadingContext::State::Error;
+ return Error::from_string_literal("BMP has invalid bpp");
+ }
+
+ const u32 width = abs(context.dib.core.width);
+ const u32 height = !context.is_included_in_ico ? context.dib.core.height : (context.dib.core.height / 2);
+
+ context.bitmap = TRY(Bitmap::create(format, { static_cast<int>(width), static_cast<int>(height) }));
+
+ ByteBuffer rle_buffer;
+ ReadonlyBytes bytes { context.file_bytes + context.data_offset, context.file_size - context.data_offset };
+
+ if (context.dib.info.compression == Compression::RLE4 || context.dib.info.compression == Compression::RLE8
+ || context.dib.info.compression == Compression::RLE24) {
+ TRY(uncompress_bmp_rle_data(context, rle_buffer));
+ bytes = rle_buffer.bytes();
+ }
+
+ InputStreamer streamer(bytes.data(), bytes.size());
+
+ auto process_row_padding = [&](const u8 consumed) -> ErrorOr<void> {
+ // Calculate padding
+ u8 remaining = consumed % 4;
+ u8 bytes_to_drop = remaining == 0 ? 0 : 4 - remaining;
+
+ if (streamer.remaining() < bytes_to_drop)
+ return Error::from_string_literal("Not enough bytes available to drop");
+ streamer.drop_bytes(bytes_to_drop);
+
+ return {};
+ };
+
+ auto process_row = [&](u32 row) -> ErrorOr<void> {
+ u32 space_remaining_before_consuming_row = streamer.remaining();
+
+ for (u32 column = 0; column < width;) {
+ switch (bits_per_pixel) {
+ case 1: {
+ if (!streamer.has_u8())
+ return Error::from_string_literal("Cannot read 8 bits");
+ u8 byte = streamer.read_u8();
+ u8 mask = 8;
+ while (column < width && mask > 0) {
+ mask -= 1;
+ auto color_idx = (byte >> mask) & 0x1;
+ if (context.is_included_in_ico) {
+ auto color = context.color_table[color_idx];
+ context.bitmap->scanline(row)[column++] = color;
+ } else {
+ context.bitmap->scanline_u8(row)[column++] = color_idx;
+ }
+ }
+ break;
+ }
+ case 2: {
+ if (!streamer.has_u8())
+ return Error::from_string_literal("Cannot read 8 bits");
+ u8 byte = streamer.read_u8();
+ u8 mask = 8;
+ while (column < width && mask > 0) {
+ mask -= 2;
+ auto color_idx = (byte >> mask) & 0x3;
+ if (context.is_included_in_ico) {
+ auto color = context.color_table[color_idx];
+ context.bitmap->scanline(row)[column++] = color;
+ } else {
+ context.bitmap->scanline_u8(row)[column++] = color_idx;
+ }
+ }
+ break;
+ }
+ case 4: {
+ if (!streamer.has_u8()) {
+ return Error::from_string_literal("Cannot read 8 bits");
+ }
+ u8 byte = streamer.read_u8();
+
+ u32 high_color_idx = (byte >> 4) & 0xf;
+ u32 low_color_idx = byte & 0xf;
+
+ if (context.is_included_in_ico) {
+ auto high_color = context.color_table[high_color_idx];
+ auto low_color = context.color_table[low_color_idx];
+ context.bitmap->scanline(row)[column++] = high_color;
+ if (column < width) {
+ context.bitmap->scanline(row)[column++] = low_color;
+ }
+ } else {
+ context.bitmap->scanline_u8(row)[column++] = high_color_idx;
+ if (column < width)
+ context.bitmap->scanline_u8(row)[column++] = low_color_idx;
+ }
+ break;
+ }
+ case 8: {
+ if (!streamer.has_u8())
+ return Error::from_string_literal("Cannot read 8 bits");
+
+ u8 byte = streamer.read_u8();
+ if (context.is_included_in_ico) {
+ auto color = context.color_table[byte];
+ context.bitmap->scanline(row)[column++] = color;
+ } else {
+ context.bitmap->scanline_u8(row)[column++] = byte;
+ }
+ break;
+ }
+ case 16: {
+ if (!streamer.has_u16())
+ return Error::from_string_literal("Cannot read 16 bits");
+ context.bitmap->scanline(row)[column++] = int_to_scaled_rgb(context, streamer.read_u16());
+ break;
+ }
+ case 24: {
+ if (!streamer.has_u24())
+ return Error::from_string_literal("Cannot read 24 bits");
+ context.bitmap->scanline(row)[column++] = streamer.read_u24();
+ break;
+ }
+ case 32:
+ if (!streamer.has_u32())
+ return Error::from_string_literal("Cannot read 32 bits");
+ if (context.dib.info.masks.is_empty()) {
+ context.bitmap->scanline(row)[column++] = streamer.read_u32();
+ } else {
+ context.bitmap->scanline(row)[column++] = int_to_scaled_rgb(context, streamer.read_u32());
+ }
+ break;
+ }
+ }
+
+ auto consumed = space_remaining_before_consuming_row - streamer.remaining();
+
+ return process_row_padding(consumed);
+ };
+
+ auto process_mask_row = [&](u32 row) -> ErrorOr<void> {
+ u32 space_remaining_before_consuming_row = streamer.remaining();
+
+ for (u32 column = 0; column < width;) {
+ if (!streamer.has_u8())
+ return Error::from_string_literal("Cannot read 8 bits");
+
+ u8 byte = streamer.read_u8();
+ u8 mask = 8;
+ while (column < width && mask > 0) {
+ mask -= 1;
+ // apply transparency mask
+ // AND mask = 0 -> fully opaque
+ // AND mask = 1 -> fully transparent
+ u8 and_byte = (byte >> (mask)) & 0x1;
+ auto pixel = context.bitmap->scanline(row)[column];
+
+ if (and_byte) {
+ pixel &= 0x00ffffff;
+ } else if (context.dib.core.bpp < 32) {
+ pixel |= 0xff000000;
+ }
+
+ context.bitmap->scanline(row)[column++] = pixel;
+ }
+ }
+
+ auto consumed = space_remaining_before_consuming_row - streamer.remaining();
+ return process_row_padding(consumed);
+ };
+
+ if (context.dib.core.height < 0) {
+ // BMP is stored top-down
+ for (u32 row = 0; row < height; ++row) {
+ TRY(process_row(row));
+ }
+
+ if (context.is_included_in_ico) {
+ for (u32 row = 0; row < height; ++row) {
+ TRY(process_mask_row(row));
+ }
+ }
+ } else {
+ // BMP is stored bottom-up
+ for (i32 row = height - 1; row >= 0; --row) {
+ TRY(process_row(row));
+ }
+
+ if (context.is_included_in_ico) {
+ for (i32 row = height - 1; row >= 0; --row) {
+ TRY(process_mask_row(row));
+ }
+ }
+ }
+
+ if (!context.is_included_in_ico) {
+ for (size_t i = 0; i < context.color_table.size(); ++i) {
+ context.bitmap->set_palette_color(i, Color::from_rgb(context.color_table[i]));
+ }
+ }
+
+ context.state = BMPLoadingContext::State::PixelDataDecoded;
+
+ return {};
+}
+
+BMPImageDecoderPlugin::BMPImageDecoderPlugin(u8 const* data, size_t data_size, IncludedInICO is_included_in_ico)
+{
+ m_context = make<BMPLoadingContext>();
+ m_context->file_bytes = data;
+ m_context->file_size = data_size;
+ m_context->is_included_in_ico = (is_included_in_ico == IncludedInICO::Yes);
+}
+
+BMPImageDecoderPlugin::~BMPImageDecoderPlugin() = default;
+
+IntSize BMPImageDecoderPlugin::size()
+{
+ if (m_context->state == BMPLoadingContext::State::Error)
+ return {};
+
+ if (m_context->state < BMPLoadingContext::State::DIBDecoded && decode_bmp_dib(*m_context).is_error())
+ return {};
+
+ return { m_context->dib.core.width, abs(m_context->dib.core.height) };
+}
+
+void BMPImageDecoderPlugin::set_volatile()
+{
+ if (m_context->bitmap)
+ m_context->bitmap->set_volatile();
+}
+
+bool BMPImageDecoderPlugin::set_nonvolatile(bool& was_purged)
+{
+ if (!m_context->bitmap)
+ return false;
+ return m_context->bitmap->set_nonvolatile(was_purged);
+}
+
+bool BMPImageDecoderPlugin::initialize()
+{
+ return !decode_bmp_header(*m_context).is_error();
+}
+
+bool BMPImageDecoderPlugin::sniff(ReadonlyBytes data)
+{
+ BMPLoadingContext context;
+ context.file_bytes = data.data();
+ context.file_size = data.size();
+ return !decode_bmp_header(context).is_error();
+}
+
+ErrorOr<NonnullOwnPtr<ImageDecoderPlugin>> BMPImageDecoderPlugin::create(ReadonlyBytes data)
+{
+ return adopt_nonnull_own_or_enomem(new (nothrow) BMPImageDecoderPlugin(data.data(), data.size()));
+}
+
+ErrorOr<NonnullOwnPtr<BMPImageDecoderPlugin>> BMPImageDecoderPlugin::create_as_included_in_ico(Badge<ICOImageDecoderPlugin>, ReadonlyBytes data)
+{
+ return adopt_nonnull_own_or_enomem(new (nothrow) BMPImageDecoderPlugin(data.data(), data.size(), IncludedInICO::Yes));
+}
+
+bool BMPImageDecoderPlugin::sniff_dib()
+{
+ return !decode_bmp_dib(*m_context).is_error();
+}
+
+bool BMPImageDecoderPlugin::is_animated()
+{
+ return false;
+}
+
+size_t BMPImageDecoderPlugin::loop_count()
+{
+ return 0;
+}
+
+size_t BMPImageDecoderPlugin::frame_count()
+{
+ return 1;
+}
+
+ErrorOr<ImageFrameDescriptor> BMPImageDecoderPlugin::frame(size_t index)
+{
+ if (index > 0)
+ return Error::from_string_literal("BMPImageDecoderPlugin: Invalid frame index");
+
+ if (m_context->state == BMPLoadingContext::State::Error)
+ return Error::from_string_literal("BMPImageDecoderPlugin: Decoding failed");
+
+ if (m_context->state < BMPLoadingContext::State::PixelDataDecoded)
+ TRY(decode_bmp_pixel_data(*m_context));
+
+ VERIFY(m_context->bitmap);
+ return ImageFrameDescriptor { m_context->bitmap, 0 };
+}
+
+ErrorOr<Optional<ReadonlyBytes>> BMPImageDecoderPlugin::icc_data()
+{
+ TRY(decode_bmp_dib(*m_context));
+
+ if (m_context->dib_type != DIBType::V5)
+ return OptionalNone {};
+
+ // FIXME: For LINKED, return data from the linked file?
+ // FIXME: For sRGB and WINDOWS_COLOR_SPACE, return an sRGB profile somehow.
+ // FIXME: For CALIBRATED_RGB, do something with v4.{red_endpoint,green_endpoint,blue_endpoint,gamma_endpoint}
+ if (m_context->dib.v4.color_space != ColorSpace::EMBEDDED)
+ return OptionalNone {};
+
+ auto const& v5 = m_context->dib.v5;
+ if (!v5.profile_data || !v5.profile_size)
+ return OptionalNone {};
+
+ // FIXME: Do something with v5.intent (which has a GamutMappingIntent value).
+
+ u8 header_size = m_context->is_included_in_ico ? 0 : bmp_header_size;
+ if (v5.profile_data + header_size + v5.profile_size > m_context->file_size)
+ return Error::from_string_literal("BMPImageDecoderPlugin: ICC profile data out of bounds");
+
+ return ReadonlyBytes { m_context->file_bytes + header_size + v5.profile_data, v5.profile_size };
+}
+
+}
diff --git a/Userland/Libraries/LibGfx/ImageFormats/BMPLoader.h b/Userland/Libraries/LibGfx/ImageFormats/BMPLoader.h
new file mode 100644
index 0000000000..7999a4a76e
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/BMPLoader.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2020, Matthew Olsson <mattco@serenityos.org>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#pragma once
+
+#include <LibGfx/ImageFormats/ICOLoader.h>
+#include <LibGfx/ImageFormats/ImageDecoder.h>
+
+namespace Gfx {
+
+struct BMPLoadingContext;
+class ICOImageDecoderPlugin;
+
+class BMPImageDecoderPlugin final : public ImageDecoderPlugin {
+public:
+ static bool sniff(ReadonlyBytes);
+ static ErrorOr<NonnullOwnPtr<ImageDecoderPlugin>> create(ReadonlyBytes);
+ static ErrorOr<NonnullOwnPtr<BMPImageDecoderPlugin>> create_as_included_in_ico(Badge<ICOImageDecoderPlugin>, ReadonlyBytes);
+
+ enum class IncludedInICO {
+ Yes,
+ No,
+ };
+
+ virtual ~BMPImageDecoderPlugin() override;
+
+ virtual IntSize size() override;
+ virtual void set_volatile() override;
+ [[nodiscard]] virtual bool set_nonvolatile(bool& was_purged) override;
+ virtual bool initialize() override;
+ bool sniff_dib();
+ virtual bool is_animated() override;
+ virtual size_t loop_count() override;
+ virtual size_t frame_count() override;
+ virtual ErrorOr<ImageFrameDescriptor> frame(size_t index) override;
+ virtual ErrorOr<Optional<ReadonlyBytes>> icc_data() override;
+
+private:
+ BMPImageDecoderPlugin(u8 const*, size_t, IncludedInICO included_in_ico = IncludedInICO::No);
+
+ OwnPtr<BMPLoadingContext> m_context;
+};
+
+}
diff --git a/Userland/Libraries/LibGfx/ImageFormats/BMPWriter.cpp b/Userland/Libraries/LibGfx/ImageFormats/BMPWriter.cpp
new file mode 100644
index 0000000000..fb56998407
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/BMPWriter.cpp
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2020, Ben Jilks <benjyjilks@gmail.com>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <LibGfx/Bitmap.h>
+#include <LibGfx/ImageFormats/BMPWriter.h>
+
+namespace Gfx {
+
+class OutputStreamer {
+public:
+ OutputStreamer(u8* data)
+ : m_data(data)
+ {
+ }
+
+ void write_u8(u8 i)
+ {
+ *(m_data++) = i;
+ }
+
+ void write_u16(u16 i)
+ {
+ *(m_data++) = i & 0xFF;
+ *(m_data++) = (i >> 8) & 0xFF;
+ }
+
+ void write_u32(u32 i)
+ {
+ write_u16(i & 0xFFFF);
+ write_u16((i >> 16) & 0xFFFF);
+ }
+
+ void write_i32(i32 i)
+ {
+ write_u32(static_cast<u32>(i));
+ }
+
+private:
+ u8* m_data;
+};
+
+static ErrorOr<ByteBuffer> write_pixel_data(Bitmap const& bitmap, int pixel_row_data_size, int bytes_per_pixel, bool include_alpha_channel)
+{
+ int image_size = pixel_row_data_size * bitmap.height();
+ auto buffer = TRY(ByteBuffer::create_uninitialized(image_size));
+
+ int current_row = 0;
+ for (int y = bitmap.physical_height() - 1; y >= 0; --y) {
+ auto* row = buffer.data() + (pixel_row_data_size * current_row++);
+ for (int x = 0; x < bitmap.physical_width(); x++) {
+ auto pixel = bitmap.get_pixel(x, y);
+ row[x * bytes_per_pixel + 0] = pixel.blue();
+ row[x * bytes_per_pixel + 1] = pixel.green();
+ row[x * bytes_per_pixel + 2] = pixel.red();
+ if (include_alpha_channel)
+ row[x * bytes_per_pixel + 3] = pixel.alpha();
+ }
+ }
+
+ return buffer;
+}
+
+ErrorOr<ByteBuffer> BMPWriter::encode(Bitmap const& bitmap, Options options)
+{
+ return BMPWriter().dump(bitmap, options);
+}
+
+ByteBuffer BMPWriter::compress_pixel_data(ByteBuffer pixel_data, BMPWriter::Compression compression)
+{
+ switch (compression) {
+ case BMPWriter::Compression::BI_BITFIELDS:
+ case BMPWriter::Compression::BI_RGB:
+ return pixel_data;
+ }
+
+ VERIFY_NOT_REACHED();
+}
+
+ErrorOr<ByteBuffer> BMPWriter::dump(Bitmap const& bitmap, Options options)
+{
+ Options::DibHeader dib_header = options.dib_header;
+
+ auto icc_data = options.icc_data;
+ if (icc_data.has_value() && dib_header < Options::DibHeader::V5)
+ return Error::from_string_literal("can only embed ICC profiles in v5+ bmps");
+
+ switch (dib_header) {
+ case Options::DibHeader::Info:
+ m_compression = Compression::BI_RGB;
+ m_bytes_per_pixel = 3;
+ m_include_alpha_channel = false;
+ break;
+ case Options::DibHeader::V3:
+ case Options::DibHeader::V4:
+ case Options::DibHeader::V5:
+ m_compression = Compression::BI_BITFIELDS;
+ m_bytes_per_pixel = 4;
+ m_include_alpha_channel = true;
+ }
+
+ const size_t file_header_size = 14;
+ size_t header_size = file_header_size + (u32)dib_header;
+
+ int pixel_row_data_size = (m_bytes_per_pixel * 8 * bitmap.width() + 31) / 32 * 4;
+ int image_size = pixel_row_data_size * bitmap.height();
+ auto buffer = TRY(ByteBuffer::create_uninitialized(header_size));
+
+ auto pixel_data = TRY(write_pixel_data(bitmap, pixel_row_data_size, m_bytes_per_pixel, m_include_alpha_channel));
+ pixel_data = compress_pixel_data(move(pixel_data), m_compression);
+
+ size_t icc_profile_size = 0;
+ if (icc_data.has_value())
+ icc_profile_size = icc_data->size();
+
+ size_t pixel_data_offset = header_size + icc_profile_size;
+ size_t file_size = pixel_data_offset + pixel_data.size();
+ OutputStreamer streamer(buffer.data());
+ streamer.write_u8('B');
+ streamer.write_u8('M');
+ streamer.write_u32(file_size);
+ streamer.write_u32(0);
+ streamer.write_u32(pixel_data_offset);
+
+ streamer.write_u32((u32)dib_header); // Header size
+ streamer.write_i32(bitmap.width()); // ImageWidth
+ streamer.write_i32(bitmap.height()); // ImageHeight
+ streamer.write_u16(1); // Planes
+ streamer.write_u16(m_bytes_per_pixel * 8); // BitsPerPixel
+ streamer.write_u32((u32)m_compression); // Compression
+ streamer.write_u32(image_size); // ImageSize
+ streamer.write_i32(0); // XpixelsPerMeter
+ streamer.write_i32(0); // YpixelsPerMeter
+ streamer.write_u32(0); // TotalColors
+ streamer.write_u32(0); // ImportantColors
+
+ if (dib_header >= Options::DibHeader::V3) {
+ streamer.write_u32(0x00ff0000); // Red bitmask
+ streamer.write_u32(0x0000ff00); // Green bitmask
+ streamer.write_u32(0x000000ff); // Blue bitmask
+ streamer.write_u32(0xff000000); // Alpha bitmask
+ }
+
+ if (dib_header >= Options::DibHeader::V4) {
+ if (icc_data.has_value())
+ streamer.write_u32(0x4D424544); // Colorspace EMBEDDED
+ else
+ streamer.write_u32(0); // Colorspace CALIBRATED_RGB
+
+ for (int i = 0; i < 12; i++) {
+ streamer.write_u32(0); // Endpoints and gamma
+ }
+ }
+
+ if (dib_header >= Options::DibHeader::V5) {
+ streamer.write_u32(4); // Rendering intent IMAGES / Perceptual.
+
+ if (icc_data.has_value()) {
+ streamer.write_u32((u32)dib_header); // Profile data (relative to file_header_size)
+ streamer.write_u32(icc_data->size()); // Profile size
+ } else {
+ streamer.write_u32(0); // Profile data
+ streamer.write_u32(0); // Profile size
+ }
+ streamer.write_u32(0); // Reserved
+ }
+
+ if (icc_data.has_value())
+ TRY(buffer.try_append(icc_data.value()));
+
+ TRY(buffer.try_append(pixel_data));
+ return buffer;
+}
+
+}
diff --git a/Userland/Libraries/LibGfx/ImageFormats/BMPWriter.h b/Userland/Libraries/LibGfx/ImageFormats/BMPWriter.h
new file mode 100644
index 0000000000..8cb9fb111a
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/BMPWriter.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2020, Ben Jilks <benjyjilks@gmail.com>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#pragma once
+
+#include <AK/ByteBuffer.h>
+
+namespace Gfx {
+
+class Bitmap;
+
+// This is not a nested struct to work around https://llvm.org/PR36684
+struct BMPWriterOptions {
+ enum class DibHeader : u32 {
+ Info = 40,
+ V3 = 56,
+ V4 = 108,
+ V5 = 124,
+ };
+ DibHeader dib_header = DibHeader::V5;
+
+ Optional<ReadonlyBytes> icc_data;
+};
+
+class BMPWriter {
+public:
+ using Options = BMPWriterOptions;
+ static ErrorOr<ByteBuffer> encode(Bitmap const&, Options options = Options {});
+
+private:
+ BMPWriter() = default;
+
+ ErrorOr<ByteBuffer> dump(Bitmap const&, Options options);
+
+ enum class Compression : u32 {
+ BI_RGB = 0,
+ BI_BITFIELDS = 3,
+ };
+
+ static ByteBuffer compress_pixel_data(ByteBuffer, Compression);
+
+ Compression m_compression { Compression::BI_BITFIELDS };
+
+ int m_bytes_per_pixel { 4 };
+ bool m_include_alpha_channel { true };
+};
+
+}
diff --git a/Userland/Libraries/LibGfx/ImageFormats/DDSLoader.cpp b/Userland/Libraries/LibGfx/ImageFormats/DDSLoader.cpp
new file mode 100644
index 0000000000..8f2f889f8b
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/DDSLoader.cpp
@@ -0,0 +1,710 @@
+/*
+ * Copyright (c) 2021, the SerenityOS developers.
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <AK/Debug.h>
+#include <AK/DeprecatedString.h>
+#include <AK/Endian.h>
+#include <AK/Error.h>
+#include <AK/MemoryStream.h>
+#include <AK/StringBuilder.h>
+#include <AK/Try.h>
+#include <AK/Vector.h>
+#include <LibGfx/ImageFormats/DDSLoader.h>
+#include <fcntl.h>
+#include <math.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+namespace Gfx {
+
+struct DDSLoadingContext {
+ enum State {
+ NotDecoded = 0,
+ Error,
+ BitmapDecoded,
+ };
+
+ State state { State::NotDecoded };
+
+ u8 const* data { nullptr };
+ size_t data_size { 0 };
+
+ DDSHeader header;
+ DDSHeaderDXT10 header10;
+ RefPtr<Gfx::Bitmap> bitmap;
+
+ void dump_debug();
+};
+
+static constexpr u32 create_four_cc(char c0, char c1, char c2, char c3)
+{
+ return c0 | c1 << 8 | c2 << 16 | c3 << 24;
+}
+
+static u64 get_width(DDSHeader header, size_t mipmap_level)
+{
+ if (mipmap_level >= header.mip_map_count) {
+ return header.width;
+ }
+
+ return header.width >> mipmap_level;
+}
+
+static u64 get_height(DDSHeader header, size_t mipmap_level)
+{
+ if (mipmap_level >= header.mip_map_count) {
+ return header.height;
+ }
+
+ return header.height >> mipmap_level;
+}
+
+static constexpr bool has_bitmask(DDSPixelFormat format, u32 r, u32 g, u32 b, u32 a)
+{
+ return format.r_bit_mask == r && format.g_bit_mask == g && format.b_bit_mask == b && format.a_bit_mask == a;
+}
+
+static DXGIFormat get_format(DDSPixelFormat format)
+{
+ if ((format.flags & PixelFormatFlags::DDPF_RGB) == PixelFormatFlags::DDPF_RGB) {
+ switch (format.rgb_bit_count) {
+ case 32: {
+ if (has_bitmask(format, 0x000000FF, 0x0000FF00, 0x00FF0000, 0xFF000000))
+ return DXGI_FORMAT_R8G8B8A8_UNORM;
+ if (has_bitmask(format, 0x00FF0000, 0x0000FF00, 0x000000FF, 0xFF000000))
+ return DXGI_FORMAT_B8G8R8A8_UNORM;
+ if (has_bitmask(format, 0x00FF0000, 0x0000FF00, 0x000000FF, 0x00000000))
+ return DXGI_FORMAT_B8G8R8X8_UNORM;
+ if (has_bitmask(format, 0x3FF00000, 0x000FFC00, 0x000003FF, 0xC0000000))
+ return DXGI_FORMAT_R10G10B10A2_UNORM;
+ if (has_bitmask(format, 0x0000FFFF, 0xFFFF0000, 0x00000000, 0x00000000))
+ return DXGI_FORMAT_R16G16_UNORM;
+ if (has_bitmask(format, 0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000))
+ return DXGI_FORMAT_R32_FLOAT;
+ break;
+ }
+ case 24:
+ break;
+ case 16: {
+ if (has_bitmask(format, 0x7C00, 0x03E0, 0x001F, 0x8000))
+ return DXGI_FORMAT_B5G5R5A1_UNORM;
+ if (has_bitmask(format, 0xF800, 0x07E0, 0x001F, 0x0000))
+ return DXGI_FORMAT_B5G6R5_UNORM;
+ if (has_bitmask(format, 0xF800, 0x07E0, 0x001F, 0x0000))
+ return DXGI_FORMAT_B5G6R5_UNORM;
+ if (has_bitmask(format, 0x0F00, 0x00F0, 0x000F, 0xF000))
+ return DXGI_FORMAT_B4G4R4A4_UNORM;
+ if (has_bitmask(format, 0x00FF, 0x0000, 0x0000, 0xFF00))
+ return DXGI_FORMAT_R8G8_UNORM;
+ if (has_bitmask(format, 0xFFFF, 0x0000, 0x0000, 0x0000))
+ return DXGI_FORMAT_R16_UNORM;
+ break;
+ }
+ case 8: {
+ if (has_bitmask(format, 0xFF, 0x00, 0x00, 0x00))
+ return DXGI_FORMAT_R8_UNORM;
+ break;
+ }
+ }
+ } else if ((format.flags & PixelFormatFlags::DDPF_LUMINANCE) == PixelFormatFlags::DDPF_LUMINANCE) {
+ switch (format.rgb_bit_count) {
+ case 16: {
+ if (has_bitmask(format, 0xFFFF, 0x0000, 0x0000, 0x0000))
+ return DXGI_FORMAT_R16_UNORM;
+ if (has_bitmask(format, 0x00FF, 0x0000, 0x0000, 0xFF00))
+ return DXGI_FORMAT_R8G8_UNORM;
+ break;
+ }
+ case 8: {
+ if (has_bitmask(format, 0xFF, 0x00, 0x00, 0x00))
+ return DXGI_FORMAT_R8_UNORM;
+
+ // Some writers mistakenly write this as 8 bpp.
+ if (has_bitmask(format, 0x00FF, 0x0000, 0x0000, 0xFF00))
+ return DXGI_FORMAT_R8G8_UNORM;
+ break;
+ }
+ }
+ } else if ((format.flags & PixelFormatFlags::DDPF_ALPHA) == PixelFormatFlags::DDPF_ALPHA) {
+ if (format.rgb_bit_count == 8)
+ return DXGI_FORMAT_A8_UNORM;
+ } else if ((format.flags & PixelFormatFlags::DDPF_BUMPDUDV) == PixelFormatFlags::DDPF_BUMPDUDV) {
+ switch (format.rgb_bit_count) {
+ case 32: {
+ if (has_bitmask(format, 0x000000FF, 0x0000FF00, 0x00FF0000, 0xFF000000))
+ return DXGI_FORMAT_R8G8B8A8_SNORM;
+ if (has_bitmask(format, 0x0000FFFF, 0xFFFF0000, 0x00000000, 0x00000000))
+ return DXGI_FORMAT_R16G16_SNORM;
+ break;
+ }
+ case 16: {
+ if (has_bitmask(format, 0x00FF, 0xFF00, 0x0000, 0x0000))
+ return DXGI_FORMAT_R8G8_SNORM;
+ break;
+ }
+ }
+ } else if ((format.flags & PixelFormatFlags::DDPF_FOURCC) == PixelFormatFlags::DDPF_FOURCC) {
+ if (format.four_cc == create_four_cc('D', 'X', 'T', '1'))
+ return DXGI_FORMAT_BC1_UNORM;
+ if (format.four_cc == create_four_cc('D', 'X', 'T', '2'))
+ return DXGI_FORMAT_BC2_UNORM;
+ if (format.four_cc == create_four_cc('D', 'X', 'T', '3'))
+ return DXGI_FORMAT_BC2_UNORM;
+ if (format.four_cc == create_four_cc('D', 'X', 'T', '4'))
+ return DXGI_FORMAT_BC3_UNORM;
+ if (format.four_cc == create_four_cc('D', 'X', 'T', '5'))
+ return DXGI_FORMAT_BC3_UNORM;
+ if (format.four_cc == create_four_cc('A', 'T', 'I', '1'))
+ return DXGI_FORMAT_BC4_UNORM;
+ if (format.four_cc == create_four_cc('B', 'C', '4', 'U'))
+ return DXGI_FORMAT_BC4_UNORM;
+ if (format.four_cc == create_four_cc('B', 'C', '4', 'S'))
+ return DXGI_FORMAT_BC4_SNORM;
+ if (format.four_cc == create_four_cc('A', 'T', 'I', '2'))
+ return DXGI_FORMAT_BC5_UNORM;
+ if (format.four_cc == create_four_cc('B', 'C', '5', 'U'))
+ return DXGI_FORMAT_BC5_UNORM;
+ if (format.four_cc == create_four_cc('B', 'C', '5', 'S'))
+ return DXGI_FORMAT_BC5_SNORM;
+ if (format.four_cc == create_four_cc('R', 'G', 'B', 'G'))
+ return DXGI_FORMAT_R8G8_B8G8_UNORM;
+ if (format.four_cc == create_four_cc('G', 'R', 'G', 'B'))
+ return DXGI_FORMAT_G8R8_G8B8_UNORM;
+ if (format.four_cc == create_four_cc('Y', 'U', 'Y', '2'))
+ return DXGI_FORMAT_YUY2;
+
+ switch (format.four_cc) {
+ case 36:
+ return DXGI_FORMAT_R16G16B16A16_UNORM;
+ case 110:
+ return DXGI_FORMAT_R16G16B16A16_SNORM;
+ case 111:
+ return DXGI_FORMAT_R16_FLOAT;
+ case 112:
+ return DXGI_FORMAT_R16G16_FLOAT;
+ case 113:
+ return DXGI_FORMAT_R16G16B16A16_FLOAT;
+ case 114:
+ return DXGI_FORMAT_R32_FLOAT;
+ case 115:
+ return DXGI_FORMAT_R32G32_FLOAT;
+ case 116:
+ return DXGI_FORMAT_R32G32B32A32_FLOAT;
+ }
+ }
+
+ return DXGI_FORMAT_UNKNOWN;
+}
+
+static ErrorOr<void> decode_dx5_alpha_block(Stream& stream, DDSLoadingContext& context, u64 bitmap_x, u64 bitmap_y)
+{
+ auto color0 = TRY(stream.read_value<LittleEndian<u8>>());
+ auto color1 = TRY(stream.read_value<LittleEndian<u8>>());
+
+ auto code0 = TRY(stream.read_value<LittleEndian<u8>>());
+ auto code1 = TRY(stream.read_value<LittleEndian<u8>>());
+ auto code2 = TRY(stream.read_value<LittleEndian<u8>>());
+ auto code3 = TRY(stream.read_value<LittleEndian<u8>>());
+ auto code4 = TRY(stream.read_value<LittleEndian<u8>>());
+ auto code5 = TRY(stream.read_value<LittleEndian<u8>>());
+
+ u32 codes[6] = { 0 };
+ codes[0] = code0 + 256 * (code1 + 256);
+ codes[1] = code1 + 256 * (code2 + 256);
+ codes[2] = code2 + 256 * (code3 + 256);
+ codes[3] = code3 + 256 * (code4 + 256);
+ codes[4] = code4 + 256 * code5;
+ codes[5] = code5;
+
+ u32 color[8] = { 0 };
+
+ if (color0 > 128) {
+ color[0] = color0;
+ }
+
+ if (color1 > 128) {
+ color[1] = color1;
+ }
+
+ if (color0 > color1) {
+ color[2] = (6 * color[0] + 1 * color[1]) / 7;
+ color[3] = (5 * color[0] + 2 * color[1]) / 7;
+ color[4] = (4 * color[0] + 3 * color[1]) / 7;
+ color[5] = (3 * color[0] + 4 * color[1]) / 7;
+ color[6] = (2 * color[0] + 5 * color[1]) / 7;
+ color[7] = (1 * color[0] + 6 * color[1]) / 7;
+ } else {
+ color[2] = (4 * color[0] + 1 * color[1]) / 5;
+ color[3] = (3 * color[0] + 2 * color[1]) / 5;
+ color[4] = (2 * color[0] + 3 * color[1]) / 5;
+ color[5] = (1 * color[0] + 4 * color[1]) / 5;
+ color[6] = 0;
+ color[7] = 255;
+ }
+
+ for (size_t y = 0; y < 4; y++) {
+ for (size_t x = 0; x < 4; x++) {
+ u8 index = 3 * (4 * y + x);
+ u8 bit_location = floor(index / 8.0);
+ u8 adjusted_index = index - (bit_location * 8);
+
+ u8 code = (codes[bit_location] >> adjusted_index) & 7;
+ u8 alpha = color[code];
+
+ Color color = Color(0, 0, 0, alpha);
+ context.bitmap->set_pixel(bitmap_x + x, bitmap_y + y, color);
+ }
+ }
+
+ return {};
+}
+
+static ErrorOr<void> decode_dx3_alpha_block(Stream& stream, DDSLoadingContext& context, u64 bitmap_x, u64 bitmap_y)
+{
+ auto a0 = TRY(stream.read_value<LittleEndian<u8>>());
+ auto a1 = TRY(stream.read_value<LittleEndian<u8>>());
+ auto a2 = TRY(stream.read_value<LittleEndian<u8>>());
+ auto a3 = TRY(stream.read_value<LittleEndian<u8>>());
+ auto a4 = TRY(stream.read_value<LittleEndian<u8>>());
+ auto a5 = TRY(stream.read_value<LittleEndian<u8>>());
+ auto a6 = TRY(stream.read_value<LittleEndian<u8>>());
+ auto a7 = TRY(stream.read_value<LittleEndian<u8>>());
+
+ u64 alpha_0 = a0 + 256u * (a1 + 256u * (a2 + 256u * (a3 + 256u)));
+ u64 alpha_1 = a4 + 256u * (a5 + 256u * (a6 + 256u * a7));
+
+ for (size_t y = 0; y < 4; y++) {
+ for (size_t x = 0; x < 4; x++) {
+ u8 code = 4 * (4 * y + x);
+
+ if (code >= 32) {
+ code = code - 32;
+ u8 alpha = ((alpha_1 >> code) & 0x0F) * 17;
+
+ Color color = Color(0, 0, 0, alpha);
+ context.bitmap->set_pixel(bitmap_x + x, bitmap_y + y, color);
+ } else {
+ u8 alpha = ((alpha_0 >> code) & 0x0F) * 17;
+
+ Color color = Color(0, 0, 0, alpha);
+ context.bitmap->set_pixel(bitmap_x + x, bitmap_y + y, color);
+ }
+ }
+ }
+
+ return {};
+}
+
+static void unpack_rbg_565(u32 rgb, u8* output)
+{
+ u8 r = (rgb >> 11) & 0x1F;
+ u8 g = (rgb >> 5) & 0x3F;
+ u8 b = rgb & 0x1F;
+
+ output[0] = (r << 3) | (r >> 2);
+ output[1] = (g << 2) | (g >> 4);
+ output[2] = (b << 3) | (b >> 2);
+ output[3] = 255;
+}
+
+static ErrorOr<void> decode_color_block(Stream& stream, DDSLoadingContext& context, bool dxt1, u64 bitmap_x, u64 bitmap_y)
+{
+ auto c0_low = TRY(stream.read_value<LittleEndian<u8>>());
+ auto c0_high = TRY(stream.read_value<LittleEndian<u8>>());
+ auto c1_low = TRY(stream.read_value<LittleEndian<u8>>());
+ auto c1_high = TRY(stream.read_value<LittleEndian<u8>>());
+
+ auto codes_0 = TRY(stream.read_value<LittleEndian<u8>>());
+ auto codes_1 = TRY(stream.read_value<LittleEndian<u8>>());
+ auto codes_2 = TRY(stream.read_value<LittleEndian<u8>>());
+ auto codes_3 = TRY(stream.read_value<LittleEndian<u8>>());
+
+ u64 code = codes_0 + 256 * (codes_1 + 256 * (codes_2 + 256 * codes_3));
+ u32 color_0 = c0_low + (c0_high * 256);
+ u32 color_1 = c1_low + (c1_high * 256);
+
+ u8 rgba[4][4];
+ unpack_rbg_565(color_0, rgba[0]);
+ unpack_rbg_565(color_1, rgba[1]);
+
+ if (color_0 > color_1) {
+ for (size_t i = 0; i < 3; i++) {
+ rgba[2][i] = (2 * rgba[0][i] + rgba[1][i]) / 3;
+ rgba[3][i] = (rgba[0][i] + 2 * rgba[1][i]) / 3;
+ }
+
+ rgba[2][3] = 255;
+ rgba[3][3] = 255;
+ } else {
+ for (size_t i = 0; i < 3; i++) {
+ rgba[2][i] = (rgba[0][i] + rgba[1][i]) / 2;
+ rgba[3][i] = 0;
+ }
+
+ rgba[2][3] = 255;
+ rgba[3][3] = dxt1 ? 0 : 255;
+ }
+
+ size_t i = 0;
+ for (size_t y = 0; y < 4; y++) {
+ for (size_t x = 0; x < 4; x++) {
+ u8 code_byte = (code >> (i * 2)) & 3;
+ u8 r = rgba[code_byte][0];
+ u8 g = rgba[code_byte][1];
+ u8 b = rgba[code_byte][2];
+ u8 a = dxt1 ? rgba[code_byte][3] : context.bitmap->get_pixel(bitmap_x + x, bitmap_y + y).alpha();
+
+ Color color = Color(r, g, b, a);
+ context.bitmap->set_pixel(bitmap_x + x, bitmap_y + y, color);
+ i++;
+ }
+ }
+
+ return {};
+}
+
+static ErrorOr<void> decode_dxt(Stream& stream, DDSLoadingContext& context, DXGIFormat format, u64 width, u64 y)
+{
+ if (format == DXGI_FORMAT_BC1_UNORM) {
+ for (size_t x = 0; x < width; x += 4) {
+ TRY(decode_color_block(stream, context, true, x, y));
+ }
+ }
+
+ if (format == DXGI_FORMAT_BC2_UNORM) {
+ for (size_t x = 0; x < width; x += 4) {
+ TRY(decode_dx3_alpha_block(stream, context, x, y));
+ TRY(decode_color_block(stream, context, false, x, y));
+ }
+ }
+
+ if (format == DXGI_FORMAT_BC3_UNORM) {
+ for (size_t x = 0; x < width; x += 4) {
+ TRY(decode_dx5_alpha_block(stream, context, x, y));
+ TRY(decode_color_block(stream, context, false, x, y));
+ }
+ }
+
+ return {};
+}
+static ErrorOr<void> decode_bitmap(Stream& stream, DDSLoadingContext& context, DXGIFormat format, u64 width, u64 height)
+{
+ Vector<u32> dxt_formats = { DXGI_FORMAT_BC1_UNORM, DXGI_FORMAT_BC2_UNORM, DXGI_FORMAT_BC3_UNORM };
+ if (dxt_formats.contains_slow(format)) {
+ for (u64 y = 0; y < height; y += 4) {
+ TRY(decode_dxt(stream, context, format, width, y));
+ }
+ }
+
+ // FIXME: Support more encodings (ATI, YUV, RAW, etc...).
+ return {};
+}
+
+static ErrorOr<void> decode_dds(DDSLoadingContext& context)
+{
+ // All valid DDS files are at least 128 bytes long.
+ if (context.data_size < 128) {
+ dbgln_if(DDS_DEBUG, "File is too short for DDS");
+ context.state = DDSLoadingContext::State::Error;
+ return Error::from_string_literal("File is too short for DDS");
+ }
+
+ FixedMemoryStream stream { ReadonlyBytes { context.data, context.data_size } };
+
+ auto magic = TRY(stream.read_value<u32>());
+
+ if (magic != create_four_cc('D', 'D', 'S', ' ')) {
+ dbgln_if(DDS_DEBUG, "Missing magic number");
+ context.state = DDSLoadingContext::State::Error;
+ return Error::from_string_literal("Missing magic number");
+ }
+
+ context.header = TRY(stream.read_value<DDSHeader>());
+
+ if (context.header.size != 124) {
+ dbgln_if(DDS_DEBUG, "Header size is malformed");
+ context.state = DDSLoadingContext::State::Error;
+ return Error::from_string_literal("Header size is malformed");
+ }
+ if (context.header.pixel_format.size != 32) {
+ dbgln_if(DDS_DEBUG, "Pixel format size is malformed");
+ context.state = DDSLoadingContext::State::Error;
+ return Error::from_string_literal("Pixel format size is malformed");
+ }
+
+ if ((context.header.pixel_format.flags & PixelFormatFlags::DDPF_FOURCC) == PixelFormatFlags::DDPF_FOURCC) {
+ if (context.header.pixel_format.four_cc == create_four_cc('D', 'X', '1', '0')) {
+ if (context.data_size < 148) {
+ dbgln_if(DDS_DEBUG, "DX10 header is too short");
+ context.state = DDSLoadingContext::State::Error;
+ return Error::from_string_literal("DX10 header is too short");
+ }
+
+ context.header10 = TRY(stream.read_value<DDSHeaderDXT10>());
+ }
+ }
+
+ if constexpr (DDS_DEBUG) {
+ context.dump_debug();
+ }
+
+ DXGIFormat format = get_format(context.header.pixel_format);
+
+ Vector<u32> supported_formats = { DXGI_FORMAT_BC1_UNORM, DXGI_FORMAT_BC2_UNORM, DXGI_FORMAT_BC3_UNORM };
+ if (!supported_formats.contains_slow(format)) {
+ dbgln_if(DDS_DEBUG, "Format of type {} is not supported at the moment", static_cast<u32>(format));
+ context.state = DDSLoadingContext::State::Error;
+ return Error::from_string_literal("Format type is not supported at the moment");
+ }
+
+ // We support parsing mipmaps, but we only care about the largest one :^) (At least for now)
+ if (size_t mipmap_level = 0; mipmap_level < max(context.header.mip_map_count, 1u)) {
+ u64 width = get_width(context.header, mipmap_level);
+ u64 height = get_height(context.header, mipmap_level);
+
+ context.bitmap = TRY(Bitmap::create(BitmapFormat::BGRA8888, { width, height }));
+
+ TRY(decode_bitmap(stream, context, format, width, height));
+ }
+
+ context.state = DDSLoadingContext::State::BitmapDecoded;
+
+ return {};
+}
+
+void DDSLoadingContext::dump_debug()
+{
+ StringBuilder builder;
+
+ builder.append("\nDDS:\n"sv);
+ builder.appendff("\tHeader Size: {}\n", header.size);
+
+ builder.append("\tFlags:"sv);
+ if ((header.flags & DDSFlags::DDSD_CAPS) == DDSFlags::DDSD_CAPS)
+ builder.append(" DDSD_CAPS"sv);
+ if ((header.flags & DDSFlags::DDSD_HEIGHT) == DDSFlags::DDSD_HEIGHT)
+ builder.append(" DDSD_HEIGHT"sv);
+ if ((header.flags & DDSFlags::DDSD_WIDTH) == DDSFlags::DDSD_WIDTH)
+ builder.append(" DDSD_WIDTH"sv);
+ if ((header.flags & DDSFlags::DDSD_PITCH) == DDSFlags::DDSD_PITCH)
+ builder.append(" DDSD_PITCH"sv);
+ if ((header.flags & DDSFlags::DDSD_PIXELFORMAT) == DDSFlags::DDSD_PIXELFORMAT)
+ builder.append(" DDSD_PIXELFORMAT"sv);
+ if ((header.flags & DDSFlags::DDSD_MIPMAPCOUNT) == DDSFlags::DDSD_MIPMAPCOUNT)
+ builder.append(" DDSD_MIPMAPCOUNT"sv);
+ if ((header.flags & DDSFlags::DDSD_LINEARSIZE) == DDSFlags::DDSD_LINEARSIZE)
+ builder.append(" DDSD_LINEARSIZE"sv);
+ if ((header.flags & DDSFlags::DDSD_DEPTH) == DDSFlags::DDSD_DEPTH)
+ builder.append(" DDSD_DEPTH"sv);
+ builder.append("\n"sv);
+
+ builder.appendff("\tHeight: {}\n", header.height);
+ builder.appendff("\tWidth: {}\n", header.width);
+ builder.appendff("\tPitch: {}\n", header.pitch);
+ builder.appendff("\tDepth: {}\n", header.depth);
+ builder.appendff("\tMipmap Count: {}\n", header.mip_map_count);
+
+ builder.append("\tCaps:"sv);
+ if ((header.caps1 & Caps1Flags::DDSCAPS_COMPLEX) == Caps1Flags::DDSCAPS_COMPLEX)
+ builder.append(" DDSCAPS_COMPLEX"sv);
+ if ((header.caps1 & Caps1Flags::DDSCAPS_MIPMAP) == Caps1Flags::DDSCAPS_MIPMAP)
+ builder.append(" DDSCAPS_MIPMAP"sv);
+ if ((header.caps1 & Caps1Flags::DDSCAPS_TEXTURE) == Caps1Flags::DDSCAPS_TEXTURE)
+ builder.append(" DDSCAPS_TEXTURE"sv);
+ builder.append("\n"sv);
+
+ builder.append("\tCaps2:"sv);
+ if ((header.caps2 & Caps2Flags::DDSCAPS2_CUBEMAP) == Caps2Flags::DDSCAPS2_CUBEMAP)
+ builder.append(" DDSCAPS2_CUBEMAP"sv);
+ if ((header.caps2 & Caps2Flags::DDSCAPS2_CUBEMAP_POSITIVEX) == Caps2Flags::DDSCAPS2_CUBEMAP_POSITIVEX)
+ builder.append(" DDSCAPS2_CUBEMAP_POSITIVEX"sv);
+ if ((header.caps2 & Caps2Flags::DDSCAPS2_CUBEMAP_NEGATIVEX) == Caps2Flags::DDSCAPS2_CUBEMAP_NEGATIVEX)
+ builder.append(" DDSCAPS2_CUBEMAP_NEGATIVEX"sv);
+ if ((header.caps2 & Caps2Flags::DDSCAPS2_CUBEMAP_POSITIVEY) == Caps2Flags::DDSCAPS2_CUBEMAP_POSITIVEY)
+ builder.append(" DDSCAPS2_CUBEMAP_POSITIVEY"sv);
+ if ((header.caps2 & Caps2Flags::DDSCAPS2_CUBEMAP_NEGATIVEY) == Caps2Flags::DDSCAPS2_CUBEMAP_NEGATIVEY)
+ builder.append(" DDSCAPS2_CUBEMAP_NEGATIVEY"sv);
+ if ((header.caps2 & Caps2Flags::DDSCAPS2_CUBEMAP_POSITIVEZ) == Caps2Flags::DDSCAPS2_CUBEMAP_POSITIVEZ)
+ builder.append(" DDSCAPS2_CUBEMAP_POSITIVEZ"sv);
+ if ((header.caps2 & Caps2Flags::DDSCAPS2_CUBEMAP_NEGATIVEZ) == Caps2Flags::DDSCAPS2_CUBEMAP_NEGATIVEZ)
+ builder.append(" DDSCAPS2_CUBEMAP_NEGATIVEZ"sv);
+ if ((header.caps2 & Caps2Flags::DDSCAPS2_VOLUME) == Caps2Flags::DDSCAPS2_VOLUME)
+ builder.append(" DDSCAPS2_VOLUME"sv);
+ builder.append("\n"sv);
+
+ builder.append("Pixel Format:\n"sv);
+ builder.appendff("\tStruct Size: {}\n", header.pixel_format.size);
+
+ builder.append("\tFlags:"sv);
+ if ((header.pixel_format.flags & PixelFormatFlags::DDPF_ALPHAPIXELS) == PixelFormatFlags::DDPF_ALPHAPIXELS)
+ builder.append(" DDPF_ALPHAPIXELS"sv);
+ if ((header.pixel_format.flags & PixelFormatFlags::DDPF_ALPHA) == PixelFormatFlags::DDPF_ALPHA)
+ builder.append(" DDPF_ALPHA"sv);
+ if ((header.pixel_format.flags & PixelFormatFlags::DDPF_FOURCC) == PixelFormatFlags::DDPF_FOURCC)
+ builder.append(" DDPF_FOURCC"sv);
+ if ((header.pixel_format.flags & PixelFormatFlags::DDPF_PALETTEINDEXED8) == PixelFormatFlags::DDPF_PALETTEINDEXED8)
+ builder.append(" DDPF_PALETTEINDEXED8"sv);
+ if ((header.pixel_format.flags & PixelFormatFlags::DDPF_RGB) == PixelFormatFlags::DDPF_RGB)
+ builder.append(" DDPF_RGB"sv);
+ if ((header.pixel_format.flags & PixelFormatFlags::DDPF_YUV) == PixelFormatFlags::DDPF_YUV)
+ builder.append(" DDPF_YUV"sv);
+ if ((header.pixel_format.flags & PixelFormatFlags::DDPF_LUMINANCE) == PixelFormatFlags::DDPF_LUMINANCE)
+ builder.append(" DDPF_LUMINANCE"sv);
+ if ((header.pixel_format.flags & PixelFormatFlags::DDPF_BUMPDUDV) == PixelFormatFlags::DDPF_BUMPDUDV)
+ builder.append(" DDPF_BUMPDUDV"sv);
+ if ((header.pixel_format.flags & PixelFormatFlags::DDPF_NORMAL) == PixelFormatFlags::DDPF_NORMAL)
+ builder.append(" DDPF_NORMAL"sv);
+ builder.append("\n"sv);
+
+ builder.append("\tFour CC: "sv);
+ builder.appendff("{:c}", (header.pixel_format.four_cc >> (8 * 0)) & 0xFF);
+ builder.appendff("{:c}", (header.pixel_format.four_cc >> (8 * 1)) & 0xFF);
+ builder.appendff("{:c}", (header.pixel_format.four_cc >> (8 * 2)) & 0xFF);
+ builder.appendff("{:c}", (header.pixel_format.four_cc >> (8 * 3)) & 0xFF);
+ builder.append("\n"sv);
+ builder.appendff("\tRGB Bit Count: {}\n", header.pixel_format.rgb_bit_count);
+ builder.appendff("\tR Bit Mask: {}\n", header.pixel_format.r_bit_mask);
+ builder.appendff("\tG Bit Mask: {}\n", header.pixel_format.g_bit_mask);
+ builder.appendff("\tB Bit Mask: {}\n", header.pixel_format.b_bit_mask);
+ builder.appendff("\tA Bit Mask: {}\n", header.pixel_format.a_bit_mask);
+
+ builder.append("DDS10:\n"sv);
+ builder.appendff("\tFormat: {}\n", static_cast<u32>(header10.format));
+
+ builder.append("\tResource Dimension:"sv);
+ if ((header10.resource_dimension & ResourceDimensions::DDS_DIMENSION_UNKNOWN) == ResourceDimensions::DDS_DIMENSION_UNKNOWN)
+ builder.append(" DDS_DIMENSION_UNKNOWN"sv);
+ if ((header10.resource_dimension & ResourceDimensions::DDS_DIMENSION_BUFFER) == ResourceDimensions::DDS_DIMENSION_BUFFER)
+ builder.append(" DDS_DIMENSION_BUFFER"sv);
+ if ((header10.resource_dimension & ResourceDimensions::DDS_DIMENSION_TEXTURE1D) == ResourceDimensions::DDS_DIMENSION_TEXTURE1D)
+ builder.append(" DDS_DIMENSION_TEXTURE1D"sv);
+ if ((header10.resource_dimension & ResourceDimensions::DDS_DIMENSION_TEXTURE2D) == ResourceDimensions::DDS_DIMENSION_TEXTURE2D)
+ builder.append(" DDS_DIMENSION_TEXTURE2D"sv);
+ if ((header10.resource_dimension & ResourceDimensions::DDS_DIMENSION_TEXTURE3D) == ResourceDimensions::DDS_DIMENSION_TEXTURE3D)
+ builder.append(" DDS_DIMENSION_TEXTURE3D"sv);
+ builder.append("\n"sv);
+
+ builder.appendff("\tArray Size: {}\n", header10.array_size);
+
+ builder.append("\tMisc Flags:"sv);
+ if ((header10.misc_flag & MiscFlags::DDS_RESOURCE_MISC_TEXTURECUBE) == MiscFlags::DDS_RESOURCE_MISC_TEXTURECUBE)
+ builder.append(" DDS_RESOURCE_MISC_TEXTURECUBE"sv);
+ builder.append("\n"sv);
+
+ builder.append("\tMisc Flags 2:"sv);
+ if ((header10.misc_flag2 & Misc2Flags::DDS_ALPHA_MODE_UNKNOWN) == Misc2Flags::DDS_ALPHA_MODE_UNKNOWN)
+ builder.append(" DDS_ALPHA_MODE_UNKNOWN"sv);
+ if ((header10.misc_flag2 & Misc2Flags::DDS_ALPHA_MODE_STRAIGHT) == Misc2Flags::DDS_ALPHA_MODE_STRAIGHT)
+ builder.append(" DDS_ALPHA_MODE_STRAIGHT"sv);
+ if ((header10.misc_flag2 & Misc2Flags::DDS_ALPHA_MODE_PREMULTIPLIED) == Misc2Flags::DDS_ALPHA_MODE_PREMULTIPLIED)
+ builder.append(" DDS_ALPHA_MODE_PREMULTIPLIED"sv);
+ if ((header10.misc_flag2 & Misc2Flags::DDS_ALPHA_MODE_OPAQUE) == Misc2Flags::DDS_ALPHA_MODE_OPAQUE)
+ builder.append(" DDS_ALPHA_MODE_OPAQUE"sv);
+ if ((header10.misc_flag2 & Misc2Flags::DDS_ALPHA_MODE_CUSTOM) == Misc2Flags::DDS_ALPHA_MODE_CUSTOM)
+ builder.append(" DDS_ALPHA_MODE_CUSTOM"sv);
+ builder.append("\n"sv);
+
+ dbgln("{}", builder.to_deprecated_string());
+}
+
+DDSImageDecoderPlugin::DDSImageDecoderPlugin(u8 const* data, size_t size)
+{
+ m_context = make<DDSLoadingContext>();
+ m_context->data = data;
+ m_context->data_size = size;
+}
+
+DDSImageDecoderPlugin::~DDSImageDecoderPlugin() = default;
+
+IntSize DDSImageDecoderPlugin::size()
+{
+ if (m_context->state == DDSLoadingContext::State::Error)
+ return {};
+
+ if (m_context->state == DDSLoadingContext::State::BitmapDecoded)
+ return { m_context->header.width, m_context->header.height };
+
+ return {};
+}
+
+void DDSImageDecoderPlugin::set_volatile()
+{
+ if (m_context->bitmap)
+ m_context->bitmap->set_volatile();
+}
+
+bool DDSImageDecoderPlugin::set_nonvolatile(bool& was_purged)
+{
+ if (!m_context->bitmap)
+ return false;
+ return m_context->bitmap->set_nonvolatile(was_purged);
+}
+
+bool DDSImageDecoderPlugin::initialize()
+{
+ // The header is always at least 128 bytes, so if the file is smaller, it can't be a DDS.
+ return m_context->data_size > 128
+ && m_context->data[0] == 0x44
+ && m_context->data[1] == 0x44
+ && m_context->data[2] == 0x53
+ && m_context->data[3] == 0x20;
+}
+
+bool DDSImageDecoderPlugin::sniff(ReadonlyBytes data)
+{
+ // The header is always at least 128 bytes, so if the file is smaller, it can't be a DDS.
+ return data.size() > 128
+ && data.data()[0] == 0x44
+ && data.data()[1] == 0x44
+ && data.data()[2] == 0x53
+ && data.data()[3] == 0x20;
+}
+
+ErrorOr<NonnullOwnPtr<ImageDecoderPlugin>> DDSImageDecoderPlugin::create(ReadonlyBytes data)
+{
+ return adopt_nonnull_own_or_enomem(new (nothrow) DDSImageDecoderPlugin(data.data(), data.size()));
+}
+
+bool DDSImageDecoderPlugin::is_animated()
+{
+ return false;
+}
+
+size_t DDSImageDecoderPlugin::loop_count()
+{
+ return 0;
+}
+
+size_t DDSImageDecoderPlugin::frame_count()
+{
+ return 1;
+}
+
+ErrorOr<ImageFrameDescriptor> DDSImageDecoderPlugin::frame(size_t index)
+{
+ if (index > 0)
+ return Error::from_string_literal("DDSImageDecoderPlugin: Invalid frame index");
+
+ if (m_context->state == DDSLoadingContext::State::Error)
+ return Error::from_string_literal("DDSImageDecoderPlugin: Decoding failed");
+
+ if (m_context->state < DDSLoadingContext::State::BitmapDecoded) {
+ TRY(decode_dds(*m_context));
+ }
+
+ VERIFY(m_context->bitmap);
+ return ImageFrameDescriptor { m_context->bitmap, 0 };
+}
+
+ErrorOr<Optional<ReadonlyBytes>> DDSImageDecoderPlugin::icc_data()
+{
+ return OptionalNone {};
+}
+
+}
diff --git a/Userland/Libraries/LibGfx/ImageFormats/DDSLoader.h b/Userland/Libraries/LibGfx/ImageFormats/DDSLoader.h
new file mode 100644
index 0000000000..4e7d213add
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/DDSLoader.h
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) 2021, the SerenityOS developers.
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#pragma once
+
+#include <LibGfx/ImageFormats/ImageDecoder.h>
+
+namespace Gfx {
+
+enum MiscFlags : u32 {
+ DDS_RESOURCE_MISC_TEXTURECUBE = 0x4,
+};
+
+enum Misc2Flags : u32 {
+ DDS_ALPHA_MODE_UNKNOWN = 0x0,
+ DDS_ALPHA_MODE_STRAIGHT = 0x1,
+ DDS_ALPHA_MODE_PREMULTIPLIED = 0x2,
+ DDS_ALPHA_MODE_OPAQUE = 0x3,
+ DDS_ALPHA_MODE_CUSTOM = 0x4,
+};
+
+enum Caps1Flags : u32 {
+ DDSCAPS_COMPLEX = 0x8,
+ DDSCAPS_TEXTURE = 0x1000,
+ DDSCAPS_MIPMAP = 0x400000,
+};
+
+enum Caps2Flags : u32 {
+ DDSCAPS2_CUBEMAP = 0x200,
+ DDSCAPS2_CUBEMAP_POSITIVEX = 0x400,
+ DDSCAPS2_CUBEMAP_NEGATIVEX = 0x800,
+ DDSCAPS2_CUBEMAP_POSITIVEY = 0x1000,
+ DDSCAPS2_CUBEMAP_NEGATIVEY = 0x2000,
+ DDSCAPS2_CUBEMAP_POSITIVEZ = 0x4000,
+ DDSCAPS2_CUBEMAP_NEGATIVEZ = 0x8000,
+ DDSCAPS2_VOLUME = 0x200000,
+};
+
+enum ResourceDimensions : u32 {
+ DDS_DIMENSION_UNKNOWN,
+ DDS_DIMENSION_BUFFER,
+ DDS_DIMENSION_TEXTURE1D = 2,
+ DDS_DIMENSION_TEXTURE2D = 3,
+ DDS_DIMENSION_TEXTURE3D = 4,
+};
+
+enum DXGIFormat : u32 {
+ DXGI_FORMAT_UNKNOWN = 0,
+ DXGI_FORMAT_R32G32B32A32_TYPELESS,
+ DXGI_FORMAT_R32G32B32A32_FLOAT,
+ DXGI_FORMAT_R32G32B32A32_UINT,
+ DXGI_FORMAT_R32G32B32A32_SINT,
+ DXGI_FORMAT_R32G32B32_TYPELESS,
+ DXGI_FORMAT_R32G32B32_FLOAT,
+ DXGI_FORMAT_R32G32B32_UINT,
+ DXGI_FORMAT_R32G32B32_SINT,
+ DXGI_FORMAT_R16G16B16A16_TYPELESS,
+ DXGI_FORMAT_R16G16B16A16_FLOAT,
+ DXGI_FORMAT_R16G16B16A16_UNORM,
+ DXGI_FORMAT_R16G16B16A16_UINT,
+ DXGI_FORMAT_R16G16B16A16_SNORM,
+ DXGI_FORMAT_R16G16B16A16_SINT,
+ DXGI_FORMAT_R32G32_TYPELESS,
+ DXGI_FORMAT_R32G32_FLOAT,
+ DXGI_FORMAT_R32G32_UINT,
+ DXGI_FORMAT_R32G32_SINT,
+ DXGI_FORMAT_R32G8X24_TYPELESS,
+ DXGI_FORMAT_D32_FLOAT_S8X24_UINT,
+ DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS,
+ DXGI_FORMAT_X32_TYPELESS_G8X24_UINT,
+ DXGI_FORMAT_R10G10B10A2_TYPELESS,
+ DXGI_FORMAT_R10G10B10A2_UNORM,
+ DXGI_FORMAT_R10G10B10A2_UINT,
+ DXGI_FORMAT_R11G11B10_FLOAT,
+ DXGI_FORMAT_R8G8B8A8_TYPELESS,
+ DXGI_FORMAT_R8G8B8A8_UNORM,
+ DXGI_FORMAT_R8G8B8A8_UNORM_SRGB,
+ DXGI_FORMAT_R8G8B8A8_UINT,
+ DXGI_FORMAT_R8G8B8A8_SNORM,
+ DXGI_FORMAT_R8G8B8A8_SINT,
+ DXGI_FORMAT_R16G16_TYPELESS,
+ DXGI_FORMAT_R16G16_FLOAT,
+ DXGI_FORMAT_R16G16_UNORM,
+ DXGI_FORMAT_R16G16_UINT,
+ DXGI_FORMAT_R16G16_SNORM,
+ DXGI_FORMAT_R16G16_SINT,
+ DXGI_FORMAT_R32_TYPELESS,
+ DXGI_FORMAT_D32_FLOAT,
+ DXGI_FORMAT_R32_FLOAT,
+ DXGI_FORMAT_R32_UINT,
+ DXGI_FORMAT_R32_SINT,
+ DXGI_FORMAT_R24G8_TYPELESS,
+ DXGI_FORMAT_D24_UNORM_S8_UINT,
+ DXGI_FORMAT_R24_UNORM_X8_TYPELESS,
+ DXGI_FORMAT_X24_TYPELESS_G8_UINT,
+ DXGI_FORMAT_R8G8_TYPELESS,
+ DXGI_FORMAT_R8G8_UNORM,
+ DXGI_FORMAT_R8G8_UINT,
+ DXGI_FORMAT_R8G8_SNORM,
+ DXGI_FORMAT_R8G8_SINT,
+ DXGI_FORMAT_R16_TYPELESS,
+ DXGI_FORMAT_R16_FLOAT,
+ DXGI_FORMAT_D16_UNORM,
+ DXGI_FORMAT_R16_UNORM,
+ DXGI_FORMAT_R16_UINT,
+ DXGI_FORMAT_R16_SNORM,
+ DXGI_FORMAT_R16_SINT,
+ DXGI_FORMAT_R8_TYPELESS,
+ DXGI_FORMAT_R8_UNORM,
+ DXGI_FORMAT_R8_UINT,
+ DXGI_FORMAT_R8_SNORM,
+ DXGI_FORMAT_R8_SINT,
+ DXGI_FORMAT_A8_UNORM,
+ DXGI_FORMAT_R1_UNORM,
+ DXGI_FORMAT_R9G9B9E5_SHAREDEXP,
+ DXGI_FORMAT_R8G8_B8G8_UNORM,
+ DXGI_FORMAT_G8R8_G8B8_UNORM,
+ DXGI_FORMAT_BC1_TYPELESS,
+ DXGI_FORMAT_BC1_UNORM,
+ DXGI_FORMAT_BC1_UNORM_SRGB,
+ DXGI_FORMAT_BC2_TYPELESS,
+ DXGI_FORMAT_BC2_UNORM,
+ DXGI_FORMAT_BC2_UNORM_SRGB,
+ DXGI_FORMAT_BC3_TYPELESS,
+ DXGI_FORMAT_BC3_UNORM,
+ DXGI_FORMAT_BC3_UNORM_SRGB,
+ DXGI_FORMAT_BC4_TYPELESS,
+ DXGI_FORMAT_BC4_UNORM,
+ DXGI_FORMAT_BC4_SNORM,
+ DXGI_FORMAT_BC5_TYPELESS,
+ DXGI_FORMAT_BC5_UNORM,
+ DXGI_FORMAT_BC5_SNORM,
+ DXGI_FORMAT_B5G6R5_UNORM,
+ DXGI_FORMAT_B5G5R5A1_UNORM,
+ DXGI_FORMAT_B8G8R8A8_UNORM,
+ DXGI_FORMAT_B8G8R8X8_UNORM,
+ DXGI_FORMAT_R10G10B10_XR_BIAS_A2_UNORM,
+ DXGI_FORMAT_B8G8R8A8_TYPELESS,
+ DXGI_FORMAT_B8G8R8A8_UNORM_SRGB,
+ DXGI_FORMAT_B8G8R8X8_TYPELESS,
+ DXGI_FORMAT_B8G8R8X8_UNORM_SRGB,
+ DXGI_FORMAT_BC6H_TYPELESS,
+ DXGI_FORMAT_BC6H_UF16,
+ DXGI_FORMAT_BC6H_SF16,
+ DXGI_FORMAT_BC7_TYPELESS,
+ DXGI_FORMAT_BC7_UNORM,
+ DXGI_FORMAT_BC7_UNORM_SRGB,
+ DXGI_FORMAT_AYUV,
+ DXGI_FORMAT_Y410,
+ DXGI_FORMAT_Y416,
+ DXGI_FORMAT_NV12,
+ DXGI_FORMAT_P010,
+ DXGI_FORMAT_P016,
+ DXGI_FORMAT_420_OPAQUE,
+ DXGI_FORMAT_YUY2,
+ DXGI_FORMAT_Y210,
+ DXGI_FORMAT_Y216,
+ DXGI_FORMAT_NV11,
+ DXGI_FORMAT_AI44,
+ DXGI_FORMAT_IA44,
+ DXGI_FORMAT_P8,
+ DXGI_FORMAT_A8P8,
+ DXGI_FORMAT_B4G4R4A4_UNORM,
+ DXGI_FORMAT_P208,
+ DXGI_FORMAT_V208,
+ DXGI_FORMAT_V408,
+ DXGI_FORMAT_SAMPLER_FEEDBACK_MIN_MIP_OPAQUE,
+ DXGI_FORMAT_SAMPLER_FEEDBACK_MIP_REGION_USED_OPAQUE,
+ DXGI_FORMAT_FORCE_UINT
+};
+
+enum DDSFlags : u32 {
+ DDSD_CAPS = 0x1,
+ DDSD_HEIGHT = 0x2,
+ DDSD_WIDTH = 0x4,
+ DDSD_PITCH = 0x8,
+ DDSD_PIXELFORMAT = 0x1000,
+ DDSD_MIPMAPCOUNT = 0x20000,
+ DDSD_LINEARSIZE = 0x80000,
+ DDSD_DEPTH = 0x800000,
+};
+
+enum PixelFormatFlags : u32 {
+ DDPF_ALPHAPIXELS = 0x1,
+ DDPF_ALPHA = 0x2,
+ DDPF_FOURCC = 0x4,
+ DDPF_PALETTEINDEXED8 = 0x20,
+ DDPF_RGB = 0x40,
+ DDPF_YUV = 0x200,
+ DDPF_LUMINANCE = 0x20000,
+ DDPF_BUMPDUDV = 0x80000,
+ DDPF_NORMAL = 0x80000000,
+};
+
+struct [[gnu::packed]] DDSPixelFormat {
+ u32 size {};
+ u32 flags {};
+ u32 four_cc {};
+ u32 rgb_bit_count {};
+ u32 r_bit_mask {};
+ u32 g_bit_mask {};
+ u32 b_bit_mask {};
+ u32 a_bit_mask {};
+};
+
+struct [[gnu::packed]] DDSHeader {
+ u32 size {};
+ u32 flags {};
+ u32 height {};
+ u32 width {};
+ u32 pitch {};
+ u32 depth {};
+ u32 mip_map_count {};
+ u32 reserved[11];
+ DDSPixelFormat pixel_format;
+ u32 caps1 {};
+ u32 caps2 {};
+ u32 caps3 {};
+ u32 caps4 {};
+ u32 reserved2 {};
+};
+
+struct [[gnu::packed]] DDSHeaderDXT10 {
+ DXGIFormat format {};
+ u32 resource_dimension {};
+ u32 misc_flag {};
+ u32 array_size {};
+ u32 misc_flag2 {};
+};
+
+struct DDSLoadingContext;
+
+class DDSImageDecoderPlugin final : public ImageDecoderPlugin {
+public:
+ static bool sniff(ReadonlyBytes);
+ static ErrorOr<NonnullOwnPtr<ImageDecoderPlugin>> create(ReadonlyBytes);
+
+ virtual ~DDSImageDecoderPlugin() override;
+
+ virtual IntSize size() override;
+ virtual void set_volatile() override;
+ [[nodiscard]] virtual bool set_nonvolatile(bool& was_purged) override;
+ virtual bool initialize() override;
+ virtual bool is_animated() override;
+ virtual size_t loop_count() override;
+ virtual size_t frame_count() override;
+ virtual ErrorOr<ImageFrameDescriptor> frame(size_t index) override;
+ virtual ErrorOr<Optional<ReadonlyBytes>> icc_data() override;
+
+private:
+ DDSImageDecoderPlugin(u8 const*, size_t);
+
+ OwnPtr<DDSLoadingContext> m_context;
+};
+
+}
+
+template<>
+struct AK::Traits<Gfx::DDSHeader> : public AK::GenericTraits<Gfx::DDSHeader> {
+ static constexpr bool is_trivially_serializable() { return true; }
+};
+
+template<>
+struct AK::Traits<Gfx::DDSHeaderDXT10> : public AK::GenericTraits<Gfx::DDSHeaderDXT10> {
+ static constexpr bool is_trivially_serializable() { return true; }
+};
diff --git a/Userland/Libraries/LibGfx/ImageFormats/GIFLoader.cpp b/Userland/Libraries/LibGfx/ImageFormats/GIFLoader.cpp
new file mode 100644
index 0000000000..79da2033d0
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/GIFLoader.cpp
@@ -0,0 +1,673 @@
+/*
+ * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
+ * Copyright (c) 2022, the SerenityOS developers.
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <AK/Array.h>
+#include <AK/Debug.h>
+#include <AK/Endian.h>
+#include <AK/Error.h>
+#include <AK/IntegralMath.h>
+#include <AK/Memory.h>
+#include <AK/MemoryStream.h>
+#include <AK/Try.h>
+#include <LibGfx/ImageFormats/GIFLoader.h>
+#include <string.h>
+
+namespace Gfx {
+
+// Row strides and offsets for each interlace pass.
+static constexpr Array<int, 4> INTERLACE_ROW_STRIDES = { 8, 8, 4, 2 };
+static constexpr Array<int, 4> INTERLACE_ROW_OFFSETS = { 0, 4, 2, 1 };
+
+struct GIFImageDescriptor {
+ u16 x { 0 };
+ u16 y { 0 };
+ u16 width { 0 };
+ u16 height { 0 };
+ bool use_global_color_map { true };
+ bool interlaced { false };
+ Color color_map[256];
+ u8 lzw_min_code_size { 0 };
+ Vector<u8> lzw_encoded_bytes;
+
+ // Fields from optional graphic control extension block
+ enum DisposalMethod : u8 {
+ None = 0,
+ InPlace = 1,
+ RestoreBackground = 2,
+ RestorePrevious = 3,
+ };
+ DisposalMethod disposal_method { None };
+ u8 transparency_index { 0 };
+ u16 duration { 0 };
+ bool transparent { false };
+ bool user_input { false };
+
+ const IntRect rect() const
+ {
+ return { this->x, this->y, this->width, this->height };
+ }
+};
+
+struct LogicalScreen {
+ u16 width;
+ u16 height;
+ Color color_map[256];
+};
+
+struct GIFLoadingContext {
+ enum State {
+ NotDecoded = 0,
+ FrameDescriptorsLoaded,
+ FrameComplete,
+ };
+ State state { NotDecoded };
+ enum ErrorState {
+ NoError = 0,
+ FailedToDecodeAllFrames,
+ FailedToDecodeAnyFrame,
+ FailedToLoadFrameDescriptors,
+ };
+ ErrorState error_state { NoError };
+ u8 const* data { nullptr };
+ size_t data_size { 0 };
+ LogicalScreen logical_screen {};
+ u8 background_color_index { 0 };
+ Vector<NonnullOwnPtr<GIFImageDescriptor>> images {};
+ size_t loops { 1 };
+ RefPtr<Gfx::Bitmap> frame_buffer;
+ size_t current_frame { 0 };
+ RefPtr<Gfx::Bitmap> prev_frame_buffer;
+};
+
+enum class GIFFormat {
+ GIF87a,
+ GIF89a,
+};
+
+static ErrorOr<GIFFormat> decode_gif_header(Stream& stream)
+{
+ static auto valid_header_87 = "GIF87a"sv;
+ static auto valid_header_89 = "GIF89a"sv;
+
+ Array<u8, 6> header;
+ TRY(stream.read_until_filled(header));
+
+ if (header.span() == valid_header_87.bytes())
+ return GIFFormat::GIF87a;
+ if (header.span() == valid_header_89.bytes())
+ return GIFFormat::GIF89a;
+
+ return Error::from_string_literal("GIF header unknown");
+}
+
+class LZWDecoder {
+private:
+ static constexpr int max_code_size = 12;
+
+public:
+ explicit LZWDecoder(Vector<u8> const& lzw_bytes, u8 min_code_size)
+ : m_lzw_bytes(lzw_bytes)
+ , m_code_size(min_code_size)
+ , m_original_code_size(min_code_size)
+ , m_table_capacity(AK::exp2<u32>(min_code_size))
+ {
+ init_code_table();
+ }
+
+ u16 add_control_code()
+ {
+ const u16 control_code = m_code_table.size();
+ m_code_table.append(Vector<u8> {});
+ m_original_code_table.append(Vector<u8> {});
+ if (m_code_table.size() >= m_table_capacity && m_code_size < max_code_size) {
+
+ ++m_code_size;
+ ++m_original_code_size;
+ m_table_capacity *= 2;
+ }
+ return control_code;
+ }
+
+ void reset()
+ {
+ m_code_table.clear();
+ m_code_table.extend(m_original_code_table);
+ m_code_size = m_original_code_size;
+ m_table_capacity = AK::exp2<u32>(m_code_size);
+ m_output.clear();
+ }
+
+ ErrorOr<u16> next_code()
+ {
+ size_t current_byte_index = m_current_bit_index / 8;
+ if (current_byte_index >= m_lzw_bytes.size()) {
+ return Error::from_string_literal("LZWDecoder tries to read ouf of bounds");
+ }
+
+ // Extract the code bits using a 32-bit mask to cover the possibility that if
+ // the current code size > 9 bits then the code can span 3 bytes.
+ u8 current_bit_offset = m_current_bit_index % 8;
+ u32 mask = (u32)(m_table_capacity - 1) << current_bit_offset;
+
+ // Make a padded copy of the final bytes in the data to ensure we don't read past the end.
+ if (current_byte_index + sizeof(mask) > m_lzw_bytes.size()) {
+ u8 padded_last_bytes[sizeof(mask)] = { 0 };
+ for (int i = 0; current_byte_index + i < m_lzw_bytes.size(); ++i) {
+ padded_last_bytes[i] = m_lzw_bytes[current_byte_index + i];
+ }
+ u32 const* addr = (u32 const*)&padded_last_bytes;
+ m_current_code = (*addr & mask) >> current_bit_offset;
+ } else {
+ u32 tmp_word;
+ memcpy(&tmp_word, &m_lzw_bytes.at(current_byte_index), sizeof(u32));
+ m_current_code = (tmp_word & mask) >> current_bit_offset;
+ }
+
+ if (m_current_code > m_code_table.size()) {
+ dbgln_if(GIF_DEBUG, "Corrupted LZW stream, invalid code: {} at bit index {}, code table size: {}",
+ m_current_code,
+ m_current_bit_index,
+ m_code_table.size());
+ return Error::from_string_literal("Corrupted LZW stream, invalid code");
+ } else if (m_current_code == m_code_table.size() && m_output.is_empty()) {
+ dbgln_if(GIF_DEBUG, "Corrupted LZW stream, valid new code but output buffer is empty: {} at bit index {}, code table size: {}",
+ m_current_code,
+ m_current_bit_index,
+ m_code_table.size());
+ return Error::from_string_literal("Corrupted LZW stream, valid new code but output buffer is empty");
+ }
+
+ m_current_bit_index += m_code_size;
+
+ return m_current_code;
+ }
+
+ Vector<u8>& get_output()
+ {
+ VERIFY(m_current_code <= m_code_table.size());
+ if (m_current_code < m_code_table.size()) {
+ Vector<u8> new_entry = m_output;
+ m_output = m_code_table.at(m_current_code);
+ new_entry.append(m_output[0]);
+ extend_code_table(new_entry);
+ } else if (m_current_code == m_code_table.size()) {
+ VERIFY(!m_output.is_empty());
+ m_output.append(m_output[0]);
+ extend_code_table(m_output);
+ }
+ return m_output;
+ }
+
+private:
+ void init_code_table()
+ {
+ m_code_table.ensure_capacity(m_table_capacity);
+ for (u16 i = 0; i < m_table_capacity; ++i) {
+ m_code_table.unchecked_append({ (u8)i });
+ }
+ m_original_code_table = m_code_table;
+ }
+
+ void extend_code_table(Vector<u8> const& entry)
+ {
+ if (entry.size() > 1 && m_code_table.size() < 4096) {
+ m_code_table.append(entry);
+ if (m_code_table.size() >= m_table_capacity && m_code_size < max_code_size) {
+ ++m_code_size;
+ m_table_capacity *= 2;
+ }
+ }
+ }
+
+ Vector<u8> const& m_lzw_bytes;
+
+ int m_current_bit_index { 0 };
+
+ Vector<Vector<u8>> m_code_table {};
+ Vector<Vector<u8>> m_original_code_table {};
+
+ u8 m_code_size { 0 };
+ u8 m_original_code_size { 0 };
+
+ u32 m_table_capacity { 0 };
+
+ u16 m_current_code { 0 };
+ Vector<u8> m_output {};
+};
+
+static void copy_frame_buffer(Bitmap& dest, Bitmap const& src)
+{
+ VERIFY(dest.size_in_bytes() == src.size_in_bytes());
+ memcpy(dest.scanline(0), src.scanline(0), dest.size_in_bytes());
+}
+
+static void clear_rect(Bitmap& bitmap, IntRect const& rect, Color color)
+{
+ auto intersection_rect = rect.intersected(bitmap.rect());
+ if (intersection_rect.is_empty())
+ return;
+
+ ARGB32* dst = bitmap.scanline(intersection_rect.top()) + intersection_rect.left();
+ const size_t dst_skip = bitmap.pitch() / sizeof(ARGB32);
+
+ for (int i = intersection_rect.height() - 1; i >= 0; --i) {
+ fast_u32_fill(dst, color.value(), intersection_rect.width());
+ dst += dst_skip;
+ }
+}
+
+static ErrorOr<void> decode_frame(GIFLoadingContext& context, size_t frame_index)
+{
+ if (frame_index >= context.images.size()) {
+ return Error::from_string_literal("frame_index size too high");
+ }
+
+ if (context.state >= GIFLoadingContext::State::FrameComplete && frame_index == context.current_frame) {
+ return {};
+ }
+
+ size_t start_frame = context.current_frame + 1;
+ if (context.state < GIFLoadingContext::State::FrameComplete) {
+ start_frame = 0;
+ context.frame_buffer = TRY(Bitmap::create(BitmapFormat::BGRA8888, { context.logical_screen.width, context.logical_screen.height }));
+ context.prev_frame_buffer = TRY(Bitmap::create(BitmapFormat::BGRA8888, { context.logical_screen.width, context.logical_screen.height }));
+
+ } else if (frame_index < context.current_frame) {
+ start_frame = 0;
+ }
+
+ for (size_t i = start_frame; i <= frame_index; ++i) {
+ auto& image = context.images.at(i);
+
+ auto const previous_image_disposal_method = i > 0 ? context.images.at(i - 1)->disposal_method : GIFImageDescriptor::DisposalMethod::None;
+
+ if (i == 0) {
+ context.frame_buffer->fill(Color::Transparent);
+ } else if (i > 0 && image->disposal_method == GIFImageDescriptor::DisposalMethod::RestorePrevious
+ && previous_image_disposal_method != GIFImageDescriptor::DisposalMethod::RestorePrevious) {
+ // This marks the start of a run of frames that once disposed should be restored to the
+ // previous underlying image contents. Therefore we make a copy of the current frame
+ // buffer so that it can be restored later.
+ copy_frame_buffer(*context.prev_frame_buffer, *context.frame_buffer);
+ }
+
+ if (previous_image_disposal_method == GIFImageDescriptor::DisposalMethod::RestoreBackground) {
+ // Note: RestoreBackground could be interpreted either as restoring the underlying
+ // background of the entire image (e.g. container element's background-color), or the
+ // background color of the GIF itself. It appears that all major browsers and most other
+ // GIF decoders adhere to the former interpretation, therefore we will do the same by
+ // clearing the entire frame buffer to transparent.
+ clear_rect(*context.frame_buffer, context.images[i - 1]->rect(), Color::Transparent);
+ } else if (i > 0 && previous_image_disposal_method == GIFImageDescriptor::DisposalMethod::RestorePrevious) {
+ // Previous frame indicated that once disposed, it should be restored to *its* previous
+ // underlying image contents, therefore we restore the saved previous frame buffer.
+ copy_frame_buffer(*context.frame_buffer, *context.prev_frame_buffer);
+ }
+
+ if (image->lzw_min_code_size > 8)
+ return Error::from_string_literal("LZW minimum code size is greater than 8");
+
+ LZWDecoder decoder(image->lzw_encoded_bytes, image->lzw_min_code_size);
+
+ // Add GIF-specific control codes
+ int const clear_code = decoder.add_control_code();
+ int const end_of_information_code = decoder.add_control_code();
+
+ auto const& color_map = image->use_global_color_map ? context.logical_screen.color_map : image->color_map;
+
+ int pixel_index = 0;
+ int row = 0;
+ int interlace_pass = 0;
+ while (true) {
+ ErrorOr<u16> code = decoder.next_code();
+ if (code.is_error()) {
+ dbgln_if(GIF_DEBUG, "Unexpectedly reached end of gif frame data");
+ return code.release_error();
+ }
+
+ if (code.value() == clear_code) {
+ decoder.reset();
+ continue;
+ }
+ if (code.value() == end_of_information_code)
+ break;
+ if (!image->width)
+ continue;
+
+ auto colors = decoder.get_output();
+ for (auto const& color : colors) {
+ auto c = color_map[color];
+
+ int x = pixel_index % image->width + image->x;
+ int y = row + image->y;
+
+ if (context.frame_buffer->rect().contains(x, y) && (!image->transparent || color != image->transparency_index)) {
+ context.frame_buffer->set_pixel(x, y, c);
+ }
+
+ ++pixel_index;
+ if (pixel_index % image->width == 0) {
+ if (image->interlaced) {
+ if (interlace_pass < 4) {
+ if (row + INTERLACE_ROW_STRIDES[interlace_pass] >= image->height) {
+ ++interlace_pass;
+ if (interlace_pass < 4)
+ row = INTERLACE_ROW_OFFSETS[interlace_pass];
+ } else {
+ row += INTERLACE_ROW_STRIDES[interlace_pass];
+ }
+ }
+ } else {
+ ++row;
+ }
+ }
+ }
+ }
+
+ context.current_frame = i;
+ context.state = GIFLoadingContext::State::FrameComplete;
+ }
+
+ return {};
+}
+
+static ErrorOr<void> load_gif_frame_descriptors(GIFLoadingContext& context)
+{
+ if (context.data_size < 32)
+ return Error::from_string_literal("Size too short for GIF frame descriptors");
+
+ FixedMemoryStream stream { { context.data, context.data_size } };
+
+ TRY(decode_gif_header(stream));
+
+ context.logical_screen.width = TRY(stream.read_value<LittleEndian<u16>>());
+ context.logical_screen.height = TRY(stream.read_value<LittleEndian<u16>>());
+
+ if (context.logical_screen.width > maximum_width_for_decoded_images || context.logical_screen.height > maximum_height_for_decoded_images) {
+ dbgln("This GIF is too large for comfort: {}x{}", context.logical_screen.width, context.logical_screen.height);
+ return Error::from_string_literal("This GIF is too large for comfort");
+ }
+
+ auto gcm_info = TRY(stream.read_value<u8>());
+ context.background_color_index = TRY(stream.read_value<u8>());
+ [[maybe_unused]] auto pixel_aspect_ratio = TRY(stream.read_value<u8>());
+
+ u8 bits_per_pixel = (gcm_info & 7) + 1;
+ int color_map_entry_count = 1;
+ for (int i = 0; i < bits_per_pixel; ++i)
+ color_map_entry_count *= 2;
+
+ for (int i = 0; i < color_map_entry_count; ++i) {
+ u8 r = TRY(stream.read_value<u8>());
+ u8 g = TRY(stream.read_value<u8>());
+ u8 b = TRY(stream.read_value<u8>());
+ context.logical_screen.color_map[i] = { r, g, b };
+ }
+
+ NonnullOwnPtr<GIFImageDescriptor> current_image = make<GIFImageDescriptor>();
+ for (;;) {
+ u8 sentinel = TRY(stream.read_value<u8>());
+
+ if (sentinel == '!') {
+ u8 extension_type = TRY(stream.read_value<u8>());
+
+ u8 sub_block_length = 0;
+
+ Vector<u8> sub_block {};
+ for (;;) {
+ sub_block_length = TRY(stream.read_value<u8>());
+ if (sub_block_length == 0)
+ break;
+
+ TRY(sub_block.try_resize(sub_block.size() + sub_block_length));
+ TRY(stream.read_until_filled(sub_block.span().slice_from_end(sub_block_length)));
+ }
+
+ if (extension_type == 0xF9) {
+ if (sub_block.size() != 4) {
+ dbgln_if(GIF_DEBUG, "Unexpected graphic control size");
+ continue;
+ }
+
+ u8 disposal_method = (sub_block[0] & 0x1C) >> 2;
+ current_image->disposal_method = (GIFImageDescriptor::DisposalMethod)disposal_method;
+
+ u8 user_input = (sub_block[0] & 0x2) >> 1;
+ current_image->user_input = user_input == 1;
+
+ u8 transparent = sub_block[0] & 1;
+ current_image->transparent = transparent == 1;
+
+ u16 duration = sub_block[1] + ((u16)sub_block[2] << 8);
+ current_image->duration = duration;
+
+ current_image->transparency_index = sub_block[3];
+ }
+
+ if (extension_type == 0xFF) {
+ if (sub_block.size() != 14) {
+ dbgln_if(GIF_DEBUG, "Unexpected application extension size: {}", sub_block.size());
+ continue;
+ }
+
+ if (sub_block[11] != 1) {
+ dbgln_if(GIF_DEBUG, "Unexpected application extension format");
+ continue;
+ }
+
+ u16 loops = sub_block[12] + (sub_block[13] << 8);
+ context.loops = loops;
+ }
+
+ continue;
+ }
+
+ if (sentinel == ',') {
+ context.images.append(move(current_image));
+ auto& image = context.images.last();
+
+ image->x = TRY(stream.read_value<LittleEndian<u16>>());
+ image->y = TRY(stream.read_value<LittleEndian<u16>>());
+ image->width = TRY(stream.read_value<LittleEndian<u16>>());
+ image->height = TRY(stream.read_value<LittleEndian<u16>>());
+
+ auto packed_fields = TRY(stream.read_value<u8>());
+
+ image->use_global_color_map = !(packed_fields & 0x80);
+ image->interlaced = (packed_fields & 0x40) != 0;
+
+ if (!image->use_global_color_map) {
+ size_t local_color_table_size = AK::exp2<size_t>((packed_fields & 7) + 1);
+
+ for (size_t i = 0; i < local_color_table_size; ++i) {
+ u8 r = TRY(stream.read_value<u8>());
+ u8 g = TRY(stream.read_value<u8>());
+ u8 b = TRY(stream.read_value<u8>());
+ image->color_map[i] = { r, g, b };
+ }
+ }
+
+ image->lzw_min_code_size = TRY(stream.read_value<u8>());
+
+ u8 lzw_encoded_bytes_expected = 0;
+
+ for (;;) {
+ lzw_encoded_bytes_expected = TRY(stream.read_value<u8>());
+ if (lzw_encoded_bytes_expected == 0)
+ break;
+
+ Array<u8, 256> buffer;
+ TRY(stream.read_until_filled(buffer.span().trim(lzw_encoded_bytes_expected)));
+
+ for (int i = 0; i < lzw_encoded_bytes_expected; ++i) {
+ image->lzw_encoded_bytes.append(buffer[i]);
+ }
+ }
+
+ current_image = make<GIFImageDescriptor>();
+ continue;
+ }
+
+ if (sentinel == ';') {
+ break;
+ }
+
+ return Error::from_string_literal("Unexpected sentinel");
+ }
+
+ context.state = GIFLoadingContext::State::FrameDescriptorsLoaded;
+ return {};
+}
+
+GIFImageDecoderPlugin::GIFImageDecoderPlugin(u8 const* data, size_t size)
+{
+ m_context = make<GIFLoadingContext>();
+ m_context->data = data;
+ m_context->data_size = size;
+}
+
+GIFImageDecoderPlugin::~GIFImageDecoderPlugin() = default;
+
+IntSize GIFImageDecoderPlugin::size()
+{
+ if (m_context->error_state == GIFLoadingContext::ErrorState::FailedToLoadFrameDescriptors) {
+ return {};
+ }
+
+ if (m_context->state < GIFLoadingContext::State::FrameDescriptorsLoaded) {
+ if (load_gif_frame_descriptors(*m_context).is_error()) {
+ m_context->error_state = GIFLoadingContext::ErrorState::FailedToLoadFrameDescriptors;
+ return {};
+ }
+ }
+
+ return { m_context->logical_screen.width, m_context->logical_screen.height };
+}
+
+void GIFImageDecoderPlugin::set_volatile()
+{
+ if (m_context->frame_buffer) {
+ m_context->frame_buffer->set_volatile();
+ }
+}
+
+bool GIFImageDecoderPlugin::set_nonvolatile(bool& was_purged)
+{
+ if (!m_context->frame_buffer)
+ return false;
+ return m_context->frame_buffer->set_nonvolatile(was_purged);
+}
+
+bool GIFImageDecoderPlugin::initialize()
+{
+ FixedMemoryStream stream { { m_context->data, m_context->data_size } };
+ return !decode_gif_header(stream).is_error();
+}
+
+bool GIFImageDecoderPlugin::sniff(ReadonlyBytes data)
+{
+ FixedMemoryStream stream { data };
+ return !decode_gif_header(stream).is_error();
+}
+
+ErrorOr<NonnullOwnPtr<ImageDecoderPlugin>> GIFImageDecoderPlugin::create(ReadonlyBytes data)
+{
+ return adopt_nonnull_own_or_enomem(new (nothrow) GIFImageDecoderPlugin(data.data(), data.size()));
+}
+
+bool GIFImageDecoderPlugin::is_animated()
+{
+ if (m_context->error_state != GIFLoadingContext::ErrorState::NoError) {
+ return false;
+ }
+
+ if (m_context->state < GIFLoadingContext::State::FrameDescriptorsLoaded) {
+ if (load_gif_frame_descriptors(*m_context).is_error()) {
+ m_context->error_state = GIFLoadingContext::ErrorState::FailedToLoadFrameDescriptors;
+ return false;
+ }
+ }
+
+ return m_context->images.size() > 1;
+}
+
+size_t GIFImageDecoderPlugin::loop_count()
+{
+ if (m_context->error_state != GIFLoadingContext::ErrorState::NoError) {
+ return 0;
+ }
+
+ if (m_context->state < GIFLoadingContext::State::FrameDescriptorsLoaded) {
+ if (load_gif_frame_descriptors(*m_context).is_error()) {
+ m_context->error_state = GIFLoadingContext::ErrorState::FailedToLoadFrameDescriptors;
+ return 0;
+ }
+ }
+
+ return m_context->loops;
+}
+
+size_t GIFImageDecoderPlugin::frame_count()
+{
+ if (m_context->error_state != GIFLoadingContext::ErrorState::NoError) {
+ return 1;
+ }
+
+ if (m_context->state < GIFLoadingContext::State::FrameDescriptorsLoaded) {
+ if (load_gif_frame_descriptors(*m_context).is_error()) {
+ m_context->error_state = GIFLoadingContext::ErrorState::FailedToLoadFrameDescriptors;
+ return 1;
+ }
+ }
+
+ return m_context->images.size();
+}
+
+ErrorOr<ImageFrameDescriptor> GIFImageDecoderPlugin::frame(size_t index)
+{
+ if (m_context->error_state >= GIFLoadingContext::ErrorState::FailedToDecodeAnyFrame) {
+ return Error::from_string_literal("GIFImageDecoderPlugin: Decoding failed");
+ }
+
+ if (m_context->state < GIFLoadingContext::State::FrameDescriptorsLoaded) {
+ if (auto result = load_gif_frame_descriptors(*m_context); result.is_error()) {
+ m_context->error_state = GIFLoadingContext::ErrorState::FailedToLoadFrameDescriptors;
+ return result.release_error();
+ }
+ }
+
+ if (m_context->error_state == GIFLoadingContext::ErrorState::NoError) {
+ if (auto result = decode_frame(*m_context, index); result.is_error()) {
+ if (m_context->state < GIFLoadingContext::State::FrameComplete) {
+ m_context->error_state = GIFLoadingContext::ErrorState::FailedToDecodeAnyFrame;
+ return result.release_error();
+ }
+ if (auto result = decode_frame(*m_context, 0); result.is_error()) {
+ m_context->error_state = GIFLoadingContext::ErrorState::FailedToDecodeAnyFrame;
+ return result.release_error();
+ }
+ m_context->error_state = GIFLoadingContext::ErrorState::FailedToDecodeAllFrames;
+ }
+ }
+
+ ImageFrameDescriptor frame {};
+ frame.image = TRY(m_context->frame_buffer->clone());
+ frame.duration = m_context->images[index]->duration * 10;
+
+ if (frame.duration <= 10) {
+ frame.duration = 100;
+ }
+
+ return frame;
+}
+
+ErrorOr<Optional<ReadonlyBytes>> GIFImageDecoderPlugin::icc_data()
+{
+ return OptionalNone {};
+}
+
+}
diff --git a/Userland/Libraries/LibGfx/ImageFormats/GIFLoader.h b/Userland/Libraries/LibGfx/ImageFormats/GIFLoader.h
new file mode 100644
index 0000000000..88975716be
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/GIFLoader.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#pragma once
+
+#include <LibGfx/Bitmap.h>
+#include <LibGfx/ImageFormats/ImageDecoder.h>
+
+namespace Gfx {
+
+struct GIFLoadingContext;
+
+class GIFImageDecoderPlugin final : public ImageDecoderPlugin {
+public:
+ static bool sniff(ReadonlyBytes);
+ static ErrorOr<NonnullOwnPtr<ImageDecoderPlugin>> create(ReadonlyBytes);
+
+ virtual ~GIFImageDecoderPlugin() override;
+
+ virtual IntSize size() override;
+ virtual void set_volatile() override;
+ [[nodiscard]] virtual bool set_nonvolatile(bool& was_purged) override;
+ virtual bool initialize() override;
+ virtual bool is_animated() override;
+ virtual size_t loop_count() override;
+ virtual size_t frame_count() override;
+ virtual ErrorOr<ImageFrameDescriptor> frame(size_t index) override;
+ virtual ErrorOr<Optional<ReadonlyBytes>> icc_data() override;
+
+private:
+ GIFImageDecoderPlugin(u8 const*, size_t);
+
+ OwnPtr<GIFLoadingContext> m_context;
+};
+
+}
diff --git a/Userland/Libraries/LibGfx/ImageFormats/ICOLoader.cpp b/Userland/Libraries/LibGfx/ImageFormats/ICOLoader.cpp
new file mode 100644
index 0000000000..87aa56ffe8
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/ICOLoader.cpp
@@ -0,0 +1,281 @@
+/*
+ * Copyright (c) 2020, Paul Roukema <roukemap@gmail.com>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <AK/ByteBuffer.h>
+#include <AK/Debug.h>
+#include <AK/MemoryStream.h>
+#include <AK/Types.h>
+#include <LibGfx/ImageFormats/BMPLoader.h>
+#include <LibGfx/ImageFormats/ICOLoader.h>
+#include <LibGfx/ImageFormats/PNGLoader.h>
+#include <string.h>
+
+namespace Gfx {
+
+// FIXME: This is in little-endian order. Maybe need a NetworkOrdered<T> equivalent eventually.
+struct ICONDIR {
+ u16 must_be_0 = 0;
+ u16 must_be_1 = 0;
+ u16 image_count = 0;
+};
+static_assert(AssertSize<ICONDIR, 6>());
+
+struct ICONDIRENTRY {
+ u8 width;
+ u8 height;
+ u8 color_count;
+ u8 reserved_0;
+ u16 planes;
+ u16 bits_per_pixel;
+ u32 size;
+ u32 offset;
+};
+static_assert(AssertSize<ICONDIRENTRY, 16>());
+
+};
+
+template<>
+class AK::Traits<Gfx::ICONDIR> : public GenericTraits<Gfx::ICONDIR> {
+public:
+ static constexpr bool is_trivially_serializable() { return true; }
+};
+
+template<>
+class AK::Traits<Gfx::ICONDIRENTRY> : public GenericTraits<Gfx::ICONDIRENTRY> {
+public:
+ static constexpr bool is_trivially_serializable() { return true; }
+};
+
+namespace Gfx {
+
+struct ICOImageDescriptor {
+ u16 width;
+ u16 height;
+ u16 bits_per_pixel;
+ size_t offset;
+ size_t size;
+ RefPtr<Gfx::Bitmap> bitmap;
+};
+
+struct ICOLoadingContext {
+ enum State {
+ NotDecoded = 0,
+ Error,
+ DirectoryDecoded,
+ BitmapDecoded
+ };
+ State state { NotDecoded };
+ u8 const* data { nullptr };
+ size_t data_size { 0 };
+ Vector<ICOImageDescriptor> images;
+ size_t largest_index;
+};
+
+static ErrorOr<size_t> decode_ico_header(Stream& stream)
+{
+ auto header = TRY(stream.read_value<ICONDIR>());
+ if (header.must_be_0 != 0 || header.must_be_1 != 1)
+ return Error::from_string_literal("Invalid ICO header");
+ return { header.image_count };
+}
+
+static ErrorOr<ICOImageDescriptor> decode_ico_direntry(Stream& stream)
+{
+ auto entry = TRY(stream.read_value<ICONDIRENTRY>());
+ ICOImageDescriptor desc = { entry.width, entry.height, entry.bits_per_pixel, entry.offset, entry.size, nullptr };
+ if (desc.width == 0)
+ desc.width = 256;
+ if (desc.height == 0)
+ desc.height = 256;
+
+ return { desc };
+}
+
+static size_t find_largest_image(ICOLoadingContext const& context)
+{
+ size_t max_area = 0;
+ size_t index = 0;
+ size_t largest_index = 0;
+ u16 max_bits_per_pixel = 0;
+ for (auto const& desc : context.images) {
+ if (static_cast<size_t>(desc.width) * static_cast<size_t>(desc.height) >= max_area) {
+ if (desc.bits_per_pixel > max_bits_per_pixel) {
+ max_area = desc.width * desc.height;
+ largest_index = index;
+ max_bits_per_pixel = desc.bits_per_pixel;
+ }
+ }
+ ++index;
+ }
+ return largest_index;
+}
+
+static ErrorOr<void> load_ico_directory(ICOLoadingContext& context)
+{
+ FixedMemoryStream stream { { context.data, context.data_size } };
+
+ auto image_count = TRY(decode_ico_header(stream));
+ if (image_count == 0)
+ return Error::from_string_literal("ICO file has no images");
+
+ for (size_t i = 0; i < image_count; ++i) {
+ auto desc = TRY(decode_ico_direntry(stream));
+ if (desc.offset + desc.size < desc.offset // detect integer overflow
+ || (desc.offset + desc.size) > context.data_size) {
+ dbgln_if(ICO_DEBUG, "load_ico_directory: offset: {} size: {} doesn't fit in ICO size: {}", desc.offset, desc.size, context.data_size);
+ return Error::from_string_literal("ICO size too large");
+ }
+ dbgln_if(ICO_DEBUG, "load_ico_directory: index {} width: {} height: {} offset: {} size: {}", i, desc.width, desc.height, desc.offset, desc.size);
+ TRY(context.images.try_append(desc));
+ }
+ context.largest_index = find_largest_image(context);
+ context.state = ICOLoadingContext::State::DirectoryDecoded;
+ return {};
+}
+
+ErrorOr<void> ICOImageDecoderPlugin::load_ico_bitmap(ICOLoadingContext& context, Optional<size_t> index)
+{
+ if (context.state < ICOLoadingContext::State::DirectoryDecoded)
+ TRY(load_ico_directory(context));
+
+ size_t real_index = context.largest_index;
+ if (index.has_value())
+ real_index = index.value();
+ if (real_index >= context.images.size())
+ return Error::from_string_literal("Index out of bounds");
+
+ ICOImageDescriptor& desc = context.images[real_index];
+ if (PNGImageDecoderPlugin::sniff({ context.data + desc.offset, desc.size })) {
+ auto png_decoder = TRY(PNGImageDecoderPlugin::create({ context.data + desc.offset, desc.size }));
+ if (png_decoder->initialize()) {
+ auto decoded_png_frame = TRY(png_decoder->frame(0));
+ if (!decoded_png_frame.image) {
+ dbgln_if(ICO_DEBUG, "load_ico_bitmap: failed to load PNG encoded image index: {}", real_index);
+ return Error::from_string_literal("Encoded image not null");
+ }
+ desc.bitmap = decoded_png_frame.image;
+ return {};
+ }
+ return Error::from_string_literal("Couldn't initialize PNG Decoder");
+ } else {
+ auto bmp_decoder = TRY(BMPImageDecoderPlugin::create_as_included_in_ico({}, { context.data + desc.offset, desc.size }));
+ // NOTE: We don't initialize a BMP decoder in the usual way, but rather
+ // we just create an object and try to sniff for a frame when it's included
+ // inside an ICO image.
+ if (bmp_decoder->sniff_dib()) {
+ auto decoded_bmp_frame = TRY(bmp_decoder->frame(0));
+ if (!decoded_bmp_frame.image) {
+ dbgln_if(ICO_DEBUG, "load_ico_bitmap: failed to load BMP encoded image index: {}", real_index);
+ return Error::from_string_literal("Encoded image not null");
+ }
+ desc.bitmap = decoded_bmp_frame.image;
+ } else {
+ dbgln_if(ICO_DEBUG, "load_ico_bitmap: encoded image not supported at index: {}", real_index);
+ return Error::from_string_literal("Encoded image not supported");
+ }
+ return {};
+ }
+}
+
+bool ICOImageDecoderPlugin::sniff(ReadonlyBytes data)
+{
+ FixedMemoryStream stream { data };
+ return !decode_ico_header(stream).is_error();
+}
+
+ErrorOr<NonnullOwnPtr<ImageDecoderPlugin>> ICOImageDecoderPlugin::create(ReadonlyBytes data)
+{
+ return adopt_nonnull_own_or_enomem(new (nothrow) ICOImageDecoderPlugin(data.data(), data.size()));
+}
+
+ICOImageDecoderPlugin::ICOImageDecoderPlugin(u8 const* data, size_t size)
+{
+ m_context = make<ICOLoadingContext>();
+ m_context->data = data;
+ m_context->data_size = size;
+}
+
+ICOImageDecoderPlugin::~ICOImageDecoderPlugin() = default;
+
+IntSize ICOImageDecoderPlugin::size()
+{
+ if (m_context->state == ICOLoadingContext::State::Error) {
+ return {};
+ }
+
+ if (m_context->state < ICOLoadingContext::State::DirectoryDecoded) {
+ if (!load_ico_directory(*m_context).is_error()) {
+ m_context->state = ICOLoadingContext::State::Error;
+ return {};
+ }
+ m_context->state = ICOLoadingContext::State::DirectoryDecoded;
+ }
+
+ return { m_context->images[m_context->largest_index].width, m_context->images[m_context->largest_index].height };
+}
+
+void ICOImageDecoderPlugin::set_volatile()
+{
+ if (m_context->images[0].bitmap)
+ m_context->images[0].bitmap->set_volatile();
+}
+
+bool ICOImageDecoderPlugin::set_nonvolatile(bool& was_purged)
+{
+ if (!m_context->images[0].bitmap)
+ return false;
+ return m_context->images[0].bitmap->set_nonvolatile(was_purged);
+}
+
+bool ICOImageDecoderPlugin::initialize()
+{
+ FixedMemoryStream stream { { m_context->data, m_context->data_size } };
+ return !decode_ico_header(stream).is_error();
+}
+
+bool ICOImageDecoderPlugin::is_animated()
+{
+ return false;
+}
+
+size_t ICOImageDecoderPlugin::loop_count()
+{
+ return 0;
+}
+
+size_t ICOImageDecoderPlugin::frame_count()
+{
+ return 1;
+}
+
+ErrorOr<ImageFrameDescriptor> ICOImageDecoderPlugin::frame(size_t index)
+{
+ if (index > 0)
+ return Error::from_string_literal("ICOImageDecoderPlugin: Invalid frame index");
+
+ if (m_context->state == ICOLoadingContext::State::Error)
+ return Error::from_string_literal("ICOImageDecoderPlugin: Decoding failed");
+
+ if (m_context->state < ICOLoadingContext::State::BitmapDecoded) {
+ // NOTE: This forces the chunk decoding to happen.
+ auto maybe_error = load_ico_bitmap(*m_context, {});
+ if (maybe_error.is_error()) {
+ m_context->state = ICOLoadingContext::State::Error;
+ return Error::from_string_literal("ICOImageDecoderPlugin: Decoding failed");
+ }
+ m_context->state = ICOLoadingContext::State::BitmapDecoded;
+ }
+
+ VERIFY(m_context->images[m_context->largest_index].bitmap);
+ return ImageFrameDescriptor { m_context->images[m_context->largest_index].bitmap, 0 };
+}
+
+ErrorOr<Optional<ReadonlyBytes>> ICOImageDecoderPlugin::icc_data()
+{
+ return OptionalNone {};
+}
+
+}
diff --git a/Userland/Libraries/LibGfx/ImageFormats/ICOLoader.h b/Userland/Libraries/LibGfx/ImageFormats/ICOLoader.h
new file mode 100644
index 0000000000..bef1c4cf87
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/ICOLoader.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020, Paul Roukema <roukemap@gmail.com>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#pragma once
+
+#include <LibGfx/ImageFormats/ImageDecoder.h>
+
+namespace Gfx {
+
+struct ICOLoadingContext;
+
+class ICOImageDecoderPlugin final : public ImageDecoderPlugin {
+public:
+ static bool sniff(ReadonlyBytes);
+ static ErrorOr<NonnullOwnPtr<ImageDecoderPlugin>> create(ReadonlyBytes);
+
+ virtual ~ICOImageDecoderPlugin() override;
+
+ virtual IntSize size() override;
+ virtual void set_volatile() override;
+ [[nodiscard]] virtual bool set_nonvolatile(bool& was_purged) override;
+ virtual bool initialize() override;
+ virtual bool is_animated() override;
+ virtual size_t loop_count() override;
+ virtual size_t frame_count() override;
+ virtual ErrorOr<ImageFrameDescriptor> frame(size_t index) override;
+ virtual ErrorOr<Optional<ReadonlyBytes>> icc_data() override;
+
+private:
+ ICOImageDecoderPlugin(u8 const*, size_t);
+ static ErrorOr<void> load_ico_bitmap(ICOLoadingContext& context, Optional<size_t> index);
+
+ OwnPtr<ICOLoadingContext> m_context;
+};
+
+}
diff --git a/Userland/Libraries/LibGfx/ImageFormats/ImageDecoder.cpp b/Userland/Libraries/LibGfx/ImageFormats/ImageDecoder.cpp
new file mode 100644
index 0000000000..b8a8ff4207
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/ImageDecoder.cpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <AK/LexicalPath.h>
+#include <LibGfx/ImageFormats/BMPLoader.h>
+#include <LibGfx/ImageFormats/DDSLoader.h>
+#include <LibGfx/ImageFormats/GIFLoader.h>
+#include <LibGfx/ImageFormats/ICOLoader.h>
+#include <LibGfx/ImageFormats/ImageDecoder.h>
+#include <LibGfx/ImageFormats/JPEGLoader.h>
+#include <LibGfx/ImageFormats/PBMLoader.h>
+#include <LibGfx/ImageFormats/PGMLoader.h>
+#include <LibGfx/ImageFormats/PNGLoader.h>
+#include <LibGfx/ImageFormats/PPMLoader.h>
+#include <LibGfx/ImageFormats/QOILoader.h>
+#include <LibGfx/ImageFormats/TGALoader.h>
+#include <LibGfx/ImageFormats/WebPLoader.h>
+
+namespace Gfx {
+
+struct ImagePluginInitializer {
+ bool (*sniff)(ReadonlyBytes) = nullptr;
+ ErrorOr<NonnullOwnPtr<ImageDecoderPlugin>> (*create)(ReadonlyBytes) = nullptr;
+};
+
+static constexpr ImagePluginInitializer s_initializers[] = {
+ { PNGImageDecoderPlugin::sniff, PNGImageDecoderPlugin::create },
+ { GIFImageDecoderPlugin::sniff, GIFImageDecoderPlugin::create },
+ { BMPImageDecoderPlugin::sniff, BMPImageDecoderPlugin::create },
+ { PBMImageDecoderPlugin::sniff, PBMImageDecoderPlugin::create },
+ { PGMImageDecoderPlugin::sniff, PGMImageDecoderPlugin::create },
+ { PPMImageDecoderPlugin::sniff, PPMImageDecoderPlugin::create },
+ { ICOImageDecoderPlugin::sniff, ICOImageDecoderPlugin::create },
+ { JPEGImageDecoderPlugin::sniff, JPEGImageDecoderPlugin::create },
+ { DDSImageDecoderPlugin::sniff, DDSImageDecoderPlugin::create },
+ { QOIImageDecoderPlugin::sniff, QOIImageDecoderPlugin::create },
+ { WebPImageDecoderPlugin::sniff, WebPImageDecoderPlugin::create },
+};
+
+struct ImagePluginWithMIMETypeInitializer {
+ ErrorOr<bool> (*validate_before_create)(ReadonlyBytes) = nullptr;
+ ErrorOr<NonnullOwnPtr<ImageDecoderPlugin>> (*create)(ReadonlyBytes) = nullptr;
+ StringView mime_type;
+};
+
+static constexpr ImagePluginWithMIMETypeInitializer s_initializers_with_mime_type[] = {
+ { TGAImageDecoderPlugin::validate_before_create, TGAImageDecoderPlugin::create, "image/x-targa"sv },
+};
+
+static OwnPtr<ImageDecoderPlugin> probe_and_sniff_for_appropriate_plugin(ReadonlyBytes bytes)
+{
+ for (auto& plugin : s_initializers) {
+ auto sniff_result = plugin.sniff(bytes);
+ if (!sniff_result)
+ continue;
+ auto plugin_decoder = plugin.create(bytes).release_value_but_fixme_should_propagate_errors();
+ if (plugin_decoder->initialize())
+ return plugin_decoder;
+ }
+ return {};
+}
+
+static OwnPtr<ImageDecoderPlugin> probe_and_sniff_for_appropriate_plugin_with_known_mime_type(StringView mime_type, ReadonlyBytes bytes)
+{
+ for (auto& plugin : s_initializers_with_mime_type) {
+ if (plugin.mime_type != mime_type)
+ continue;
+ auto validation_result = plugin.validate_before_create(bytes).release_value_but_fixme_should_propagate_errors();
+ if (!validation_result)
+ continue;
+ auto plugin_decoder = plugin.create(bytes).release_value_but_fixme_should_propagate_errors();
+ if (plugin_decoder->initialize())
+ return plugin_decoder;
+ }
+ return {};
+}
+
+RefPtr<ImageDecoder> ImageDecoder::try_create_for_raw_bytes(ReadonlyBytes bytes, Optional<DeprecatedString> mime_type)
+{
+ OwnPtr<ImageDecoderPlugin> plugin = probe_and_sniff_for_appropriate_plugin(bytes);
+ if (!plugin) {
+ if (mime_type.has_value()) {
+ plugin = probe_and_sniff_for_appropriate_plugin_with_known_mime_type(mime_type.value(), bytes);
+ if (!plugin)
+ return {};
+ } else {
+ return {};
+ }
+ }
+ return adopt_ref_if_nonnull(new (nothrow) ImageDecoder(plugin.release_nonnull()));
+}
+
+ImageDecoder::ImageDecoder(NonnullOwnPtr<ImageDecoderPlugin> plugin)
+ : m_plugin(move(plugin))
+{
+}
+
+}
diff --git a/Userland/Libraries/LibGfx/ImageFormats/ImageDecoder.h b/Userland/Libraries/LibGfx/ImageFormats/ImageDecoder.h
new file mode 100644
index 0000000000..f44248ee92
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/ImageDecoder.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#pragma once
+
+#include <AK/ByteBuffer.h>
+#include <AK/OwnPtr.h>
+#include <AK/RefCounted.h>
+#include <AK/RefPtr.h>
+#include <LibGfx/Bitmap.h>
+#include <LibGfx/Size.h>
+
+namespace Gfx {
+
+class Bitmap;
+
+static constexpr size_t maximum_width_for_decoded_images = 16384;
+static constexpr size_t maximum_height_for_decoded_images = 16384;
+
+struct ImageFrameDescriptor {
+ RefPtr<Bitmap> image;
+ int duration { 0 };
+};
+
+class ImageDecoderPlugin {
+public:
+ virtual ~ImageDecoderPlugin() = default;
+
+ virtual IntSize size() = 0;
+
+ virtual void set_volatile() = 0;
+ [[nodiscard]] virtual bool set_nonvolatile(bool& was_purged) = 0;
+
+ virtual bool initialize() = 0;
+
+ virtual bool is_animated() = 0;
+ virtual size_t loop_count() = 0;
+ virtual size_t frame_count() = 0;
+ virtual ErrorOr<ImageFrameDescriptor> frame(size_t index) = 0;
+ virtual ErrorOr<Optional<ReadonlyBytes>> icc_data() = 0;
+
+protected:
+ ImageDecoderPlugin() = default;
+};
+
+class ImageDecoder : public RefCounted<ImageDecoder> {
+public:
+ static RefPtr<ImageDecoder> try_create_for_raw_bytes(ReadonlyBytes, Optional<DeprecatedString> mime_type = {});
+ ~ImageDecoder() = default;
+
+ IntSize size() const { return m_plugin->size(); }
+ int width() const { return size().width(); }
+ int height() const { return size().height(); }
+ void set_volatile() { m_plugin->set_volatile(); }
+ [[nodiscard]] bool set_nonvolatile(bool& was_purged) { return m_plugin->set_nonvolatile(was_purged); }
+ bool is_animated() const { return m_plugin->is_animated(); }
+ size_t loop_count() const { return m_plugin->loop_count(); }
+ size_t frame_count() const { return m_plugin->frame_count(); }
+ ErrorOr<ImageFrameDescriptor> frame(size_t index) const { return m_plugin->frame(index); }
+ ErrorOr<Optional<ReadonlyBytes>> icc_data() const { return m_plugin->icc_data(); }
+
+private:
+ explicit ImageDecoder(NonnullOwnPtr<ImageDecoderPlugin>);
+
+ NonnullOwnPtr<ImageDecoderPlugin> mutable m_plugin;
+};
+
+}
diff --git a/Userland/Libraries/LibGfx/ImageFormats/JPEGLoader.cpp b/Userland/Libraries/LibGfx/ImageFormats/JPEGLoader.cpp
new file mode 100644
index 0000000000..d0040c727c
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/JPEGLoader.cpp
@@ -0,0 +1,1625 @@
+/*
+ * Copyright (c) 2020, the SerenityOS developers.
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <AK/Debug.h>
+#include <AK/Endian.h>
+#include <AK/Error.h>
+#include <AK/FixedArray.h>
+#include <AK/HashMap.h>
+#include <AK/Math.h>
+#include <AK/MemoryStream.h>
+#include <AK/String.h>
+#include <AK/Try.h>
+#include <AK/Vector.h>
+#include <LibGfx/ImageFormats/JPEGLoader.h>
+
+#define JPEG_INVALID 0X0000
+
+// These names are defined in B.1.1.3 - Marker assignments
+
+#define JPEG_APPN0 0XFFE0
+#define JPEG_APPN1 0XFFE1
+#define JPEG_APPN2 0XFFE2
+#define JPEG_APPN3 0XFFE3
+#define JPEG_APPN4 0XFFE4
+#define JPEG_APPN5 0XFFE5
+#define JPEG_APPN6 0XFFE6
+#define JPEG_APPN7 0XFFE7
+#define JPEG_APPN8 0XFFE8
+#define JPEG_APPN9 0XFFE9
+#define JPEG_APPN10 0XFFEA
+#define JPEG_APPN11 0XFFEB
+#define JPEG_APPN12 0XFFEC
+#define JPEG_APPN13 0XFFED
+#define JPEG_APPN14 0xFFEE
+#define JPEG_APPN15 0xFFEF
+
+#define JPEG_RESERVED1 0xFFF1
+#define JPEG_RESERVED2 0xFFF2
+#define JPEG_RESERVED3 0xFFF3
+#define JPEG_RESERVED4 0xFFF4
+#define JPEG_RESERVED5 0xFFF5
+#define JPEG_RESERVED6 0xFFF6
+#define JPEG_RESERVED7 0xFFF7
+#define JPEG_RESERVED8 0xFFF8
+#define JPEG_RESERVED9 0xFFF9
+#define JPEG_RESERVEDA 0xFFFA
+#define JPEG_RESERVEDB 0xFFFB
+#define JPEG_RESERVEDC 0xFFFC
+#define JPEG_RESERVEDD 0xFFFD
+
+#define JPEG_RST0 0xFFD0
+#define JPEG_RST1 0xFFD1
+#define JPEG_RST2 0xFFD2
+#define JPEG_RST3 0xFFD3
+#define JPEG_RST4 0xFFD4
+#define JPEG_RST5 0xFFD5
+#define JPEG_RST6 0xFFD6
+#define JPEG_RST7 0xFFD7
+
+#define JPEG_ZRL 0xF0
+
+#define JPEG_DHP 0xFFDE
+#define JPEG_EXP 0xFFDF
+
+#define JPEG_DAC 0XFFCC
+#define JPEG_DHT 0XFFC4
+#define JPEG_DQT 0XFFDB
+#define JPEG_EOI 0xFFD9
+#define JPEG_DRI 0XFFDD
+#define JPEG_SOF0 0XFFC0
+#define JPEG_SOF2 0xFFC2
+#define JPEG_SOF15 0xFFCF
+#define JPEG_SOI 0XFFD8
+#define JPEG_SOS 0XFFDA
+#define JPEG_COM 0xFFFE
+
+namespace Gfx {
+
+constexpr static u8 zigzag_map[64] {
+ 0, 1, 8, 16, 9, 2, 3, 10,
+ 17, 24, 32, 25, 18, 11, 4, 5,
+ 12, 19, 26, 33, 40, 48, 41, 34,
+ 27, 20, 13, 6, 7, 14, 21, 28,
+ 35, 42, 49, 56, 57, 50, 43, 36,
+ 29, 22, 15, 23, 30, 37, 44, 51,
+ 58, 59, 52, 45, 38, 31, 39, 46,
+ 53, 60, 61, 54, 47, 55, 62, 63
+};
+
+using Marker = u16;
+
+/**
+ * MCU means group of data units that are coded together. A data unit is an 8x8
+ * block of component data. In interleaved scans, number of non-interleaved data
+ * units of a component C is Ch * Cv, where Ch and Cv represent the horizontal &
+ * vertical subsampling factors of the component, respectively. A MacroBlock is
+ * an 8x8 block of RGB values before encoding, and 8x8 block of YCbCr values when
+ * we're done decoding the huffman stream.
+ */
+struct Macroblock {
+ union {
+ i32 y[64] = { 0 };
+ i32 r[64];
+ };
+
+ union {
+ i32 cb[64] = { 0 };
+ i32 g[64];
+ };
+
+ union {
+ i32 cr[64] = { 0 };
+ i32 b[64];
+ };
+};
+
+struct MacroblockMeta {
+ u32 total { 0 };
+ u32 padded_total { 0 };
+ u32 hcount { 0 };
+ u32 vcount { 0 };
+ u32 hpadded_count { 0 };
+ u32 vpadded_count { 0 };
+};
+
+// In the JPEG format, components are defined first at the frame level, then
+// referenced in each scan and aggregated with scan-specific information. The
+// two following structs mimic this hierarchy.
+
+struct Component {
+ // B.2.2 - Frame header syntax
+ u8 id { 0 }; // Ci, Component identifier
+ u8 hsample_factor { 1 }; // Hi, Horizontal sampling factor
+ u8 vsample_factor { 1 }; // Vi, Vertical sampling factor
+ u8 qtable_id { 0 }; // Tqi, Quantization table destination selector
+
+ // The JPEG specification does not specify which component corresponds to
+ // Y, Cb or Cr. This field (actually the index in the parent Vector) will
+ // act as an authority to determine the *real* component.
+ // Please note that this is implementation specific.
+ u8 index { 0 };
+};
+
+struct ScanComponent {
+ // B.2.3 - Scan header syntax
+ Component& component;
+ u8 dc_destination_id { 0 }; // Tdj, DC entropy coding table destination selector
+ u8 ac_destination_id { 0 }; // Taj, AC entropy coding table destination selector
+};
+
+struct StartOfFrame {
+
+ // Of these, only the first 3 are in mainstream use, and refers to SOF0-2.
+ enum class FrameType {
+ Baseline_DCT = 0,
+ Extended_Sequential_DCT = 1,
+ Progressive_DCT = 2,
+ Sequential_Lossless = 3,
+ Differential_Sequential_DCT = 5,
+ Differential_Progressive_DCT = 6,
+ Differential_Sequential_Lossless = 7,
+ Extended_Sequential_DCT_Arithmetic = 9,
+ Progressive_DCT_Arithmetic = 10,
+ Sequential_Lossless_Arithmetic = 11,
+ Differential_Sequential_DCT_Arithmetic = 13,
+ Differential_Progressive_DCT_Arithmetic = 14,
+ Differential_Sequential_Lossless_Arithmetic = 15,
+ };
+
+ FrameType type { FrameType::Baseline_DCT };
+ u8 precision { 0 };
+ u16 height { 0 };
+ u16 width { 0 };
+};
+
+struct HuffmanTableSpec {
+ u8 type { 0 };
+ u8 destination_id { 0 };
+ u8 code_counts[16] = { 0 };
+ Vector<u8> symbols;
+ Vector<u16> codes;
+};
+
+struct HuffmanStreamState {
+ Vector<u8> stream;
+ u8 bit_offset { 0 };
+ size_t byte_offset { 0 };
+};
+
+struct ICCMultiChunkState {
+ u8 seen_number_of_icc_chunks { 0 };
+ FixedArray<ByteBuffer> chunks;
+};
+
+struct Scan {
+ // B.2.3 - Scan header syntax
+ Vector<ScanComponent, 3> components;
+
+ u8 spectral_selection_start {};
+ u8 spectral_selection_end {};
+ u8 successive_approximation {};
+
+ HuffmanStreamState huffman_stream;
+
+ u64 end_of_bands_run_count { 0 };
+
+ // See the note on Figure B.4 - Scan header syntax
+ bool are_components_interleaved() const
+ {
+ return components.size() != 1;
+ }
+};
+
+enum class ColorTransform {
+ // https://www.itu.int/rec/dologin_pub.asp?lang=e&id=T-REC-T.872-201206-I!!PDF-E&type=items
+ // 6.5.3 - APP14 marker segment for colour encoding
+ CmykOrRgb = 0,
+ YCbCr = 1,
+ YCCK = 2,
+};
+
+struct JPEGLoadingContext {
+ enum State {
+ NotDecoded = 0,
+ Error,
+ FrameDecoded,
+ HeaderDecoded,
+ BitmapDecoded
+ };
+
+ State state { State::NotDecoded };
+
+ u32 luma_table[64] = { 0 };
+ u32 chroma_table[64] = { 0 };
+ StartOfFrame frame;
+ u8 hsample_factor { 0 };
+ u8 vsample_factor { 0 };
+
+ Scan current_scan;
+
+ Vector<Component, 3> components;
+ RefPtr<Gfx::Bitmap> bitmap;
+ u16 dc_restart_interval { 0 };
+ HashMap<u8, HuffmanTableSpec> dc_tables;
+ HashMap<u8, HuffmanTableSpec> ac_tables;
+ i32 previous_dc_values[3] = { 0 };
+ MacroblockMeta mblock_meta;
+ OwnPtr<FixedMemoryStream> stream;
+
+ Optional<ColorTransform> color_transform {};
+
+ Optional<ICCMultiChunkState> icc_multi_chunk_state;
+ Optional<ByteBuffer> icc_data;
+};
+
+static void generate_huffman_codes(HuffmanTableSpec& table)
+{
+ unsigned code = 0;
+ for (auto number_of_codes : table.code_counts) {
+ for (int i = 0; i < number_of_codes; i++)
+ table.codes.append(code++);
+ code <<= 1;
+ }
+}
+
+static ErrorOr<size_t> read_huffman_bits(HuffmanStreamState& hstream, size_t count = 1)
+{
+ if (count > (8 * sizeof(size_t))) {
+ dbgln_if(JPEG_DEBUG, "Can't read {} bits at once!", count);
+ return Error::from_string_literal("Reading too much huffman bits at once");
+ }
+ size_t value = 0;
+ while (count--) {
+ if (hstream.byte_offset >= hstream.stream.size()) {
+ dbgln_if(JPEG_DEBUG, "Huffman stream exhausted. This could be an error!");
+ return Error::from_string_literal("Huffman stream exhausted.");
+ }
+ u8 current_byte = hstream.stream[hstream.byte_offset];
+ u8 current_bit = 1u & (u32)(current_byte >> (7 - hstream.bit_offset)); // MSB first.
+ hstream.bit_offset++;
+ value = (value << 1) | (size_t)current_bit;
+ if (hstream.bit_offset == 8) {
+ hstream.byte_offset++;
+ hstream.bit_offset = 0;
+ }
+ }
+ return value;
+}
+
+static ErrorOr<u8> get_next_symbol(HuffmanStreamState& hstream, HuffmanTableSpec const& table)
+{
+ unsigned code = 0;
+ size_t code_cursor = 0;
+ for (int i = 0; i < 16; i++) { // Codes can't be longer than 16 bits.
+ auto result = TRY(read_huffman_bits(hstream));
+ code = (code << 1) | (i32)result;
+ for (int j = 0; j < table.code_counts[i]; j++) {
+ if (code == table.codes[code_cursor])
+ return table.symbols[code_cursor];
+ code_cursor++;
+ }
+ }
+
+ dbgln_if(JPEG_DEBUG, "If you're seeing this...the jpeg decoder needs to support more kinds of JPEGs!");
+ return Error::from_string_literal("This kind of JPEG is not yet supported by the decoder");
+}
+
+static inline i32* get_component(Macroblock& block, unsigned component)
+{
+ switch (component) {
+ case 0:
+ return block.y;
+ case 1:
+ return block.cb;
+ default:
+ return block.cr;
+ }
+}
+
+static ErrorOr<void> add_dc(JPEGLoadingContext& context, Macroblock& macroblock, ScanComponent const& scan_component)
+{
+ auto maybe_table = context.dc_tables.get(scan_component.dc_destination_id);
+ if (!maybe_table.has_value()) {
+ dbgln_if(JPEG_DEBUG, "Unable to find a DC table with id: {}", scan_component.dc_destination_id);
+ return Error::from_string_literal("Unable to find corresponding DC table");
+ }
+
+ auto& dc_table = maybe_table.value();
+ auto& scan = context.current_scan;
+
+ // For DC coefficients, symbol encodes the length of the coefficient.
+ auto dc_length = TRY(get_next_symbol(scan.huffman_stream, dc_table));
+ if (dc_length > 11) {
+ dbgln_if(JPEG_DEBUG, "DC coefficient too long: {}!", dc_length);
+ return Error::from_string_literal("DC coefficient too long");
+ }
+
+ // DC coefficients are encoded as the difference between previous and current DC values.
+ i32 dc_diff = TRY(read_huffman_bits(scan.huffman_stream, dc_length));
+
+ // If MSB in diff is 0, the difference is -ve. Otherwise +ve.
+ if (dc_length != 0 && dc_diff < (1 << (dc_length - 1)))
+ dc_diff -= (1 << dc_length) - 1;
+
+ auto* select_component = get_component(macroblock, scan_component.component.index);
+ auto& previous_dc = context.previous_dc_values[scan_component.component.index];
+ select_component[0] = previous_dc += dc_diff;
+
+ return {};
+}
+
+static ErrorOr<bool> read_eob(Scan& scan, u32 symbol)
+{
+ // G.1.2.2 - Progressive encoding of AC coefficients with Huffman coding
+ // Note: We also use it for non-progressive encoding as it supports both EOB and ZRL
+
+ if (auto const eob = symbol & 0x0F; eob == 0 && symbol != JPEG_ZRL) {
+ // We encountered an EOB marker
+ auto const eob_base = symbol >> 4;
+ auto const additional_value = TRY(read_huffman_bits(scan.huffman_stream, eob_base));
+
+ scan.end_of_bands_run_count = additional_value + (1 << eob_base) - 1;
+
+ return true;
+ }
+
+ return false;
+}
+
+static ErrorOr<void> add_ac(JPEGLoadingContext& context, Macroblock& macroblock, ScanComponent const& scan_component)
+{
+ auto maybe_table = context.ac_tables.get(scan_component.ac_destination_id);
+ if (!maybe_table.has_value()) {
+ dbgln_if(JPEG_DEBUG, "Unable to find a AC table with id: {}", scan_component.ac_destination_id);
+ return Error::from_string_literal("Unable to find corresponding AC table");
+ }
+
+ auto& ac_table = maybe_table.value();
+ auto* select_component = get_component(macroblock, scan_component.component.index);
+
+ auto& scan = context.current_scan;
+
+ // Compute the AC coefficients.
+
+ // 0th coefficient is the dc, which is already handled
+ auto first_coefficient = max(1, scan.spectral_selection_start);
+
+ for (int j = first_coefficient; j <= scan.spectral_selection_end;) {
+ // AC symbols encode 2 pieces of information, the high 4 bits represent
+ // number of zeroes to be stuffed before reading the coefficient. Low 4
+ // bits represent the magnitude of the coefficient.
+ auto ac_symbol = TRY(get_next_symbol(scan.huffman_stream, ac_table));
+
+ if (TRY(read_eob(scan, ac_symbol)))
+ break;
+
+ // ac_symbol = JPEG_ZRL means we need to skip 16 zeroes.
+ u8 run_length = ac_symbol == JPEG_ZRL ? 16 : ac_symbol >> 4;
+ j += run_length;
+
+ if (j > scan.spectral_selection_end) {
+ dbgln_if(JPEG_DEBUG, "Run-length exceeded boundaries. Cursor: {}, Skipping: {}!", j, run_length);
+ return Error::from_string_literal("Run-length exceeded boundaries");
+ }
+
+ u8 coeff_length = ac_symbol & 0x0F;
+ if (coeff_length > 10) {
+ dbgln_if(JPEG_DEBUG, "AC coefficient too long: {}!", coeff_length);
+ return Error::from_string_literal("AC coefficient too long");
+ }
+
+ if (coeff_length != 0) {
+ i32 ac_coefficient = TRY(read_huffman_bits(scan.huffman_stream, coeff_length));
+ if (ac_coefficient < (1 << (coeff_length - 1)))
+ ac_coefficient -= (1 << coeff_length) - 1;
+
+ select_component[zigzag_map[j++]] = ac_coefficient;
+ }
+ }
+
+ return {};
+}
+
+/**
+ * Build the macroblocks possible by reading single (MCU) subsampled pair of CbCr.
+ * Depending on the sampling factors, we may not see triples of y, cb, cr in that
+ * order. If sample factors differ from one, we'll read more than one block of y-
+ * coefficients before we get to read a cb-cr block.
+
+ * In the function below, `hcursor` and `vcursor` denote the location of the block
+ * we're building in the macroblock matrix. `vfactor_i` and `hfactor_i` are cursors
+ * that iterate over the vertical and horizontal subsampling factors, respectively.
+ * When we finish one iteration of the innermost loop, we'll have the coefficients
+ * of one of the components of block at position `mb_index`. When the outermost loop
+ * finishes first iteration, we'll have all the luminance coefficients for all the
+ * macroblocks that share the chrominance data. Next two iterations (assuming that
+ * we are dealing with three components) will fill up the blocks with chroma data.
+ */
+static ErrorOr<void> build_macroblocks(JPEGLoadingContext& context, Vector<Macroblock>& macroblocks, u32 hcursor, u32 vcursor)
+{
+ for (auto const& scan_component : context.current_scan.components) {
+ for (u8 vfactor_i = 0; vfactor_i < scan_component.component.vsample_factor; vfactor_i++) {
+ for (u8 hfactor_i = 0; hfactor_i < scan_component.component.hsample_factor; hfactor_i++) {
+ // A.2.3 - Interleaved order
+ u32 mb_index = (vcursor + vfactor_i) * context.mblock_meta.hpadded_count + (hfactor_i + hcursor);
+ if (!context.current_scan.are_components_interleaved())
+ mb_index = vcursor * context.mblock_meta.hpadded_count + (hfactor_i + (hcursor * scan_component.component.vsample_factor) + (vfactor_i * scan_component.component.hsample_factor));
+
+ // G.1.2.2 - Progressive encoding of AC coefficients with Huffman coding
+ if (context.current_scan.end_of_bands_run_count > 0) {
+ --context.current_scan.end_of_bands_run_count;
+ continue;
+ }
+
+ Macroblock& block = macroblocks[mb_index];
+
+ if (context.current_scan.spectral_selection_start == 0)
+ TRY(add_dc(context, block, scan_component));
+ if (context.current_scan.spectral_selection_end != 0)
+ TRY(add_ac(context, block, scan_component));
+ }
+ }
+ }
+
+ return {};
+}
+
+static bool is_dct_based(StartOfFrame::FrameType frame_type)
+{
+ return frame_type == StartOfFrame::FrameType::Baseline_DCT
+ || frame_type == StartOfFrame::FrameType::Extended_Sequential_DCT
+ || frame_type == StartOfFrame::FrameType::Progressive_DCT
+ || frame_type == StartOfFrame::FrameType::Differential_Sequential_DCT
+ || frame_type == StartOfFrame::FrameType::Differential_Progressive_DCT
+ || frame_type == StartOfFrame::FrameType::Progressive_DCT_Arithmetic
+ || frame_type == StartOfFrame::FrameType::Differential_Sequential_DCT_Arithmetic
+ || frame_type == StartOfFrame::FrameType::Differential_Progressive_DCT_Arithmetic;
+}
+
+static void reset_decoder(JPEGLoadingContext& context)
+{
+ // G.1.2.2 - Progressive encoding of AC coefficients with Huffman coding
+ context.current_scan.end_of_bands_run_count = 0;
+
+ // E.2.4 Control procedure for decoding a restart interval
+ if (is_dct_based(context.frame.type)) {
+ context.previous_dc_values[0] = 0;
+ context.previous_dc_values[1] = 0;
+ context.previous_dc_values[2] = 0;
+ return;
+ }
+
+ VERIFY_NOT_REACHED();
+}
+
+static ErrorOr<void> decode_huffman_stream(JPEGLoadingContext& context, Vector<Macroblock>& macroblocks)
+{
+ // Compute huffman codes for DC and AC tables.
+ for (auto it = context.dc_tables.begin(); it != context.dc_tables.end(); ++it)
+ generate_huffman_codes(it->value);
+
+ for (auto it = context.ac_tables.begin(); it != context.ac_tables.end(); ++it)
+ generate_huffman_codes(it->value);
+
+ for (u32 vcursor = 0; vcursor < context.mblock_meta.vcount; vcursor += context.vsample_factor) {
+ for (u32 hcursor = 0; hcursor < context.mblock_meta.hcount; hcursor += context.hsample_factor) {
+ u32 i = vcursor * context.mblock_meta.hpadded_count + hcursor;
+
+ auto& huffman_stream = context.current_scan.huffman_stream;
+
+ if (context.dc_restart_interval > 0) {
+ if (i != 0 && i % (context.dc_restart_interval * context.vsample_factor * context.hsample_factor) == 0) {
+ reset_decoder(context);
+
+ // Restart markers are stored in byte boundaries. Advance the huffman stream cursor to
+ // the 0th bit of the next byte.
+ if (huffman_stream.byte_offset < huffman_stream.stream.size()) {
+ if (huffman_stream.bit_offset > 0) {
+ huffman_stream.bit_offset = 0;
+ huffman_stream.byte_offset++;
+ }
+
+ // Skip the restart marker (RSTn).
+ huffman_stream.byte_offset++;
+ }
+ }
+ }
+
+ if (auto result = build_macroblocks(context, macroblocks, hcursor, vcursor); result.is_error()) {
+ if constexpr (JPEG_DEBUG) {
+ dbgln("Failed to build Macroblock {}: {}", i, result.error());
+ dbgln("Huffman stream byte offset {}", huffman_stream.byte_offset);
+ dbgln("Huffman stream bit offset {}", huffman_stream.bit_offset);
+ }
+ return result.release_error();
+ }
+ }
+ }
+ return {};
+}
+
+static inline ErrorOr<void> ensure_bounds_okay(const size_t cursor, const size_t delta, const size_t bound)
+{
+ if (Checked<size_t>::addition_would_overflow(delta, cursor))
+ return Error::from_string_literal("Bounds are not ok: addition would overflow");
+ if (delta + cursor >= bound)
+ return Error::from_string_literal("Bounds are not ok");
+ return {};
+}
+
+static bool is_frame_marker(Marker const marker)
+{
+ // B.1.1.3 - Marker assignments
+ bool const is_sof_marker = marker >= JPEG_SOF0 && marker <= JPEG_SOF15;
+
+ // Start of frame markers are valid for JPEG_SOF0 to JPEG_SOF15 except number 4, 8 (reserved) and 12.
+ bool const is_defined_marker = marker != JPEG_DHT && marker != 0xFFC8 && marker != JPEG_DAC;
+
+ return is_sof_marker && is_defined_marker;
+}
+
+static inline bool is_supported_marker(Marker const marker)
+{
+ if (marker >= JPEG_APPN0 && marker <= JPEG_APPN15) {
+
+ if (marker != JPEG_APPN0 && marker != JPEG_APPN14)
+ dbgln_if(JPEG_DEBUG, "{:#04x} not supported yet. The decoder may fail!", marker);
+ return true;
+ }
+ if (marker >= JPEG_RESERVED1 && marker <= JPEG_RESERVEDD)
+ return true;
+ if (marker >= JPEG_RST0 && marker <= JPEG_RST7)
+ return true;
+ switch (marker) {
+ case JPEG_COM:
+ case JPEG_DHP:
+ case JPEG_EXP:
+ case JPEG_DHT:
+ case JPEG_DQT:
+ case JPEG_DRI:
+ case JPEG_EOI:
+ case JPEG_SOF0:
+ case JPEG_SOF2:
+ case JPEG_SOI:
+ case JPEG_SOS:
+ return true;
+ }
+
+ if (is_frame_marker(marker))
+ dbgln_if(JPEG_DEBUG, "Decoding this frame-type (SOF{}) is not currently supported. Decoder will fail!", marker & 0xf);
+
+ return false;
+}
+
+static inline ErrorOr<Marker> read_marker_at_cursor(Stream& stream)
+{
+ u16 marker = TRY(stream.read_value<BigEndian<u16>>());
+ if (is_supported_marker(marker))
+ return marker;
+ if (marker != 0xFFFF)
+ return JPEG_INVALID;
+ u8 next;
+ do {
+ next = TRY(stream.read_value<u8>());
+ if (next == 0x00)
+ return JPEG_INVALID;
+ } while (next == 0xFF);
+ marker = 0xFF00 | (u16)next;
+ return is_supported_marker(marker) ? marker : JPEG_INVALID;
+}
+
+static ErrorOr<void> read_start_of_scan(AK::SeekableStream& stream, JPEGLoadingContext& context)
+{
+ // B.2.3 - Scan header syntax
+
+ if (context.state < JPEGLoadingContext::State::FrameDecoded) {
+ dbgln_if(JPEG_DEBUG, "{}: SOS found before reading a SOF!", TRY(stream.tell()));
+ return Error::from_string_literal("SOS found before reading a SOF");
+ }
+
+ u16 bytes_to_read = TRY(stream.read_value<BigEndian<u16>>()) - 2;
+ TRY(ensure_bounds_okay(TRY(stream.tell()), bytes_to_read, TRY(stream.size())));
+ u8 const component_count = TRY(stream.read_value<u8>());
+
+ Scan current_scan;
+ current_scan.huffman_stream.stream.ensure_capacity(50 * KiB);
+
+ Optional<u8> last_read;
+ u8 component_read = 0;
+ for (auto& component : context.components) {
+ // See the Csj paragraph:
+ // [...] the ordering in the scan header shall follow the ordering in the frame header.
+ if (component_read == component_count)
+ break;
+
+ if (!last_read.has_value())
+ last_read = TRY(stream.read_value<u8>());
+
+ if (component.id != *last_read)
+ continue;
+
+ u8 table_ids = TRY(stream.read_value<u8>());
+
+ current_scan.components.empend(component, static_cast<u8>(table_ids >> 4), static_cast<u8>(table_ids & 0x0F));
+
+ component_read++;
+ last_read.clear();
+ }
+
+ current_scan.spectral_selection_start = TRY(stream.read_value<u8>());
+ current_scan.spectral_selection_end = TRY(stream.read_value<u8>());
+ current_scan.successive_approximation = TRY(stream.read_value<u8>());
+
+ dbgln_if(JPEG_DEBUG, "Start of Selection: {}, End of Selection: {}, Successive Approximation: {}",
+ current_scan.spectral_selection_start,
+ current_scan.spectral_selection_end,
+ current_scan.successive_approximation);
+
+ // FIXME: Support SOF2 jpegs with current_scan.successive_approximation != 0
+ if (current_scan.spectral_selection_start > 63 || current_scan.spectral_selection_end > 63 || current_scan.successive_approximation != 0) {
+ dbgln_if(JPEG_DEBUG, "{}: ERROR! Start of Selection: {}, End of Selection: {}, Successive Approximation: {}!",
+ TRY(stream.tell()),
+ current_scan.spectral_selection_start,
+ current_scan.spectral_selection_end,
+ current_scan.successive_approximation);
+ return Error::from_string_literal("Spectral selection is not [0,63] or successive approximation is not null");
+ }
+
+ context.current_scan = move(current_scan);
+
+ return {};
+}
+
+static ErrorOr<void> read_restart_interval(AK::SeekableStream& stream, JPEGLoadingContext& context)
+{
+ // B.2.4.4 - Restart interval definition syntax
+ u16 bytes_to_read = TRY(stream.read_value<BigEndian<u16>>()) - 2;
+ if (bytes_to_read != 2) {
+ dbgln_if(JPEG_DEBUG, "{}: Malformed DRI marker found!", TRY(stream.tell()));
+ return Error::from_string_literal("Malformed DRI marker found");
+ }
+ context.dc_restart_interval = TRY(stream.read_value<BigEndian<u16>>());
+ return {};
+}
+
+static ErrorOr<void> read_huffman_table(AK::SeekableStream& stream, JPEGLoadingContext& context)
+{
+ i32 bytes_to_read = TRY(stream.read_value<BigEndian<u16>>());
+ TRY(ensure_bounds_okay(TRY(stream.tell()), bytes_to_read, TRY(stream.size())));
+ bytes_to_read -= 2;
+ while (bytes_to_read > 0) {
+ HuffmanTableSpec table;
+ u8 table_info = TRY(stream.read_value<u8>());
+ u8 table_type = table_info >> 4;
+ u8 table_destination_id = table_info & 0x0F;
+ if (table_type > 1) {
+ dbgln_if(JPEG_DEBUG, "{}: Unrecognized huffman table: {}!", TRY(stream.tell()), table_type);
+ return Error::from_string_literal("Unrecognized huffman table");
+ }
+ if (table_destination_id > 1) {
+ dbgln_if(JPEG_DEBUG, "{}: Invalid huffman table destination id: {}!", TRY(stream.tell()), table_destination_id);
+ return Error::from_string_literal("Invalid huffman table destination id");
+ }
+
+ table.type = table_type;
+ table.destination_id = table_destination_id;
+ u32 total_codes = 0;
+
+ // Read code counts. At each index K, the value represents the number of K+1 bit codes in this header.
+ for (int i = 0; i < 16; i++) {
+ u8 count = TRY(stream.read_value<u8>());
+ total_codes += count;
+ table.code_counts[i] = count;
+ }
+
+ table.codes.ensure_capacity(total_codes);
+
+ // Read symbols. Read X bytes, where X is the sum of the counts of codes read in the previous step.
+ for (u32 i = 0; i < total_codes; i++) {
+ u8 symbol = TRY(stream.read_value<u8>());
+ table.symbols.append(symbol);
+ }
+
+ auto& huffman_table = table.type == 0 ? context.dc_tables : context.ac_tables;
+ huffman_table.set(table.destination_id, table);
+ VERIFY(huffman_table.size() <= 2);
+
+ bytes_to_read -= 1 + 16 + total_codes;
+ }
+
+ if (bytes_to_read != 0) {
+ dbgln_if(JPEG_DEBUG, "{}: Extra bytes detected in huffman header!", TRY(stream.tell()));
+ return Error::from_string_literal("Extra bytes detected in huffman header");
+ }
+ return {};
+}
+
+static ErrorOr<void> read_icc_profile(SeekableStream& stream, JPEGLoadingContext& context, int bytes_to_read)
+{
+ if (bytes_to_read <= 2)
+ return Error::from_string_literal("icc marker too small");
+
+ auto chunk_sequence_number = TRY(stream.read_value<u8>()); // 1-based
+ auto number_of_chunks = TRY(stream.read_value<u8>());
+ bytes_to_read -= 2;
+
+ if (!context.icc_multi_chunk_state.has_value())
+ context.icc_multi_chunk_state.emplace(ICCMultiChunkState { 0, TRY(FixedArray<ByteBuffer>::create(number_of_chunks)) });
+ auto& chunk_state = context.icc_multi_chunk_state;
+
+ if (chunk_state->seen_number_of_icc_chunks >= number_of_chunks)
+ return Error::from_string_literal("Too many ICC chunks");
+
+ if (chunk_state->chunks.size() != number_of_chunks)
+ return Error::from_string_literal("Inconsistent number of total ICC chunks");
+
+ if (chunk_sequence_number == 0)
+ return Error::from_string_literal("ICC chunk sequence number not 1 based");
+ u8 index = chunk_sequence_number - 1;
+
+ if (index >= chunk_state->chunks.size())
+ return Error::from_string_literal("ICC chunk sequence number larger than number of chunks");
+
+ if (!chunk_state->chunks[index].is_empty())
+ return Error::from_string_literal("Duplicate ICC chunk at sequence number");
+
+ chunk_state->chunks[index] = TRY(ByteBuffer::create_zeroed(bytes_to_read));
+ TRY(stream.read_until_filled(chunk_state->chunks[index]));
+
+ chunk_state->seen_number_of_icc_chunks++;
+
+ if (chunk_state->seen_number_of_icc_chunks != chunk_state->chunks.size())
+ return {};
+
+ if (number_of_chunks == 1) {
+ context.icc_data = move(chunk_state->chunks[0]);
+ return {};
+ }
+
+ size_t total_size = 0;
+ for (auto const& chunk : chunk_state->chunks)
+ total_size += chunk.size();
+
+ auto icc_bytes = TRY(ByteBuffer::create_zeroed(total_size));
+ size_t start = 0;
+ for (auto const& chunk : chunk_state->chunks) {
+ memcpy(icc_bytes.data() + start, chunk.data(), chunk.size());
+ start += chunk.size();
+ }
+
+ context.icc_data = move(icc_bytes);
+
+ return {};
+}
+
+static ErrorOr<void> read_colour_encoding(SeekableStream& stream, [[maybe_unused]] JPEGLoadingContext& context, int bytes_to_read)
+{
+ // The App 14 segment is application specific in the first JPEG standard.
+ // However, the Adobe implementation is globally accepted and the value of the color transform
+ // was latter standardized as a JPEG-1 extension.
+
+ // For the structure of the App 14 segment, see:
+ // https://www.pdfa.org/norm-refs/5116.DCT_Filter.pdf
+ // 18 Adobe Application-Specific JPEG Marker
+
+ // For the value of color_transform, see:
+ // https://www.itu.int/rec/dologin_pub.asp?lang=e&id=T-REC-T.872-201206-I!!PDF-E&type=items
+ // 6.5.3 - APP14 marker segment for colour encoding
+
+ if (bytes_to_read < 6)
+ return Error::from_string_literal("App14 segment too small");
+
+ [[maybe_unused]] auto const version = TRY(stream.read_value<u8>());
+ [[maybe_unused]] u16 const flag0 = TRY(stream.read_value<BigEndian<u16>>());
+ [[maybe_unused]] u16 const flag1 = TRY(stream.read_value<BigEndian<u16>>());
+ auto const color_transform = TRY(stream.read_value<u8>());
+
+ if (bytes_to_read > 6) {
+ dbgln_if(JPEG_DEBUG, "Unread bytes in App14 segment: {}", bytes_to_read - 1);
+ TRY(stream.discard(bytes_to_read - 1));
+ }
+
+ switch (color_transform) {
+ case 0:
+ context.color_transform = ColorTransform::CmykOrRgb;
+ break;
+ case 1:
+ context.color_transform = ColorTransform::YCbCr;
+ break;
+ case 2:
+ context.color_transform = ColorTransform::YCCK;
+ break;
+ default:
+ dbgln("0x{:x} is not a specified transform flag value, ignoring", color_transform);
+ }
+
+ return {};
+}
+
+static ErrorOr<void> read_app_marker(SeekableStream& stream, JPEGLoadingContext& context, int app_marker_number)
+{
+ i32 bytes_to_read = TRY(stream.read_value<BigEndian<u16>>());
+ TRY(ensure_bounds_okay(TRY(stream.tell()), bytes_to_read, TRY(stream.size())));
+
+ if (bytes_to_read <= 2)
+ return Error::from_string_literal("app marker size too small");
+ bytes_to_read -= 2;
+
+ StringBuilder builder;
+ for (;;) {
+ if (bytes_to_read == 0)
+ return Error::from_string_literal("app marker size too small for identifier");
+
+ auto c = TRY(stream.read_value<char>());
+ bytes_to_read--;
+
+ if (c == '\0')
+ break;
+
+ TRY(builder.try_append(c));
+ }
+
+ auto app_id = TRY(builder.to_string());
+
+ if (app_marker_number == 2 && app_id == "ICC_PROFILE"sv)
+ return read_icc_profile(stream, context, bytes_to_read);
+ if (app_marker_number == 14 && app_id == "Adobe"sv)
+ return read_colour_encoding(stream, context, bytes_to_read);
+
+ return stream.discard(bytes_to_read);
+}
+
+static inline bool validate_luma_and_modify_context(Component const& luma, JPEGLoadingContext& context)
+{
+ if ((luma.hsample_factor == 1 || luma.hsample_factor == 2) && (luma.vsample_factor == 1 || luma.vsample_factor == 2)) {
+ context.mblock_meta.hpadded_count += luma.hsample_factor == 1 ? 0 : context.mblock_meta.hcount % 2;
+ context.mblock_meta.vpadded_count += luma.vsample_factor == 1 ? 0 : context.mblock_meta.vcount % 2;
+ context.mblock_meta.padded_total = context.mblock_meta.hpadded_count * context.mblock_meta.vpadded_count;
+ // For easy reference to relevant sample factors.
+ context.hsample_factor = luma.hsample_factor;
+ context.vsample_factor = luma.vsample_factor;
+
+ if constexpr (JPEG_DEBUG) {
+ dbgln("Horizontal Subsampling Factor: {}", luma.hsample_factor);
+ dbgln("Vertical Subsampling Factor: {}", luma.vsample_factor);
+ }
+
+ return true;
+ }
+ return false;
+}
+
+static inline void set_macroblock_metadata(JPEGLoadingContext& context)
+{
+ context.mblock_meta.hcount = (context.frame.width + 7) / 8;
+ context.mblock_meta.vcount = (context.frame.height + 7) / 8;
+ context.mblock_meta.hpadded_count = context.mblock_meta.hcount;
+ context.mblock_meta.vpadded_count = context.mblock_meta.vcount;
+ context.mblock_meta.total = context.mblock_meta.hcount * context.mblock_meta.vcount;
+}
+
+static ErrorOr<void> read_start_of_frame(AK::SeekableStream& stream, JPEGLoadingContext& context)
+{
+ if (context.state == JPEGLoadingContext::FrameDecoded) {
+ dbgln_if(JPEG_DEBUG, "{}: SOF repeated!", TRY(stream.tell()));
+ return Error::from_string_literal("SOF repeated");
+ }
+
+ i32 bytes_to_read = TRY(stream.read_value<BigEndian<u16>>());
+
+ bytes_to_read -= 2;
+ TRY(ensure_bounds_okay(TRY(stream.tell()), bytes_to_read, TRY(stream.size())));
+
+ context.frame.precision = TRY(stream.read_value<u8>());
+ if (context.frame.precision != 8) {
+ dbgln_if(JPEG_DEBUG, "{}: SOF precision != 8!", TRY(stream.tell()));
+ return Error::from_string_literal("SOF precision != 8");
+ }
+
+ context.frame.height = TRY(stream.read_value<BigEndian<u16>>());
+ context.frame.width = TRY(stream.read_value<BigEndian<u16>>());
+ if (!context.frame.width || !context.frame.height) {
+ dbgln_if(JPEG_DEBUG, "{}: ERROR! Image height: {}, Image width: {}!", TRY(stream.tell()), context.frame.height, context.frame.width);
+ return Error::from_string_literal("Image frame height of width null");
+ }
+
+ if (context.frame.width > maximum_width_for_decoded_images || context.frame.height > maximum_height_for_decoded_images) {
+ dbgln("This JPEG is too large for comfort: {}x{}", context.frame.width, context.frame.height);
+ return Error::from_string_literal("JPEG too large for comfort");
+ }
+
+ set_macroblock_metadata(context);
+
+ auto component_count = TRY(stream.read_value<u8>());
+ if (component_count != 1 && component_count != 3) {
+ dbgln_if(JPEG_DEBUG, "{}: Unsupported number of components in SOF: {}!", TRY(stream.tell()), component_count);
+ return Error::from_string_literal("Unsupported number of components in SOF");
+ }
+
+ for (u8 i = 0; i < component_count; i++) {
+ Component component;
+ component.id = TRY(stream.read_value<u8>());
+ component.index = i;
+
+ u8 subsample_factors = TRY(stream.read_value<u8>());
+ component.hsample_factor = subsample_factors >> 4;
+ component.vsample_factor = subsample_factors & 0x0F;
+
+ if (i == 0) {
+ // By convention, downsampling is applied only on chroma components. So we should
+ // hope to see the maximum sampling factor in the luma component.
+ if (!validate_luma_and_modify_context(component, context)) {
+ dbgln_if(JPEG_DEBUG, "{}: Unsupported luma subsampling factors: horizontal: {}, vertical: {}",
+ TRY(stream.tell()),
+ component.hsample_factor,
+ component.vsample_factor);
+ return Error::from_string_literal("Unsupported luma subsampling factors");
+ }
+ } else {
+ if (component.hsample_factor != 1 || component.vsample_factor != 1) {
+ dbgln_if(JPEG_DEBUG, "{}: Unsupported chroma subsampling factors: horizontal: {}, vertical: {}",
+ TRY(stream.tell()),
+ component.hsample_factor,
+ component.vsample_factor);
+ return Error::from_string_literal("Unsupported chroma subsampling factors");
+ }
+ }
+
+ component.qtable_id = TRY(stream.read_value<u8>());
+ if (component.qtable_id > 1) {
+ dbgln_if(JPEG_DEBUG, "{}: Unsupported quantization table id: {}!", TRY(stream.tell()), component.qtable_id);
+ return Error::from_string_literal("Unsupported quantization table id");
+ }
+
+ context.components.append(move(component));
+ }
+
+ return {};
+}
+
+static ErrorOr<void> read_quantization_table(AK::SeekableStream& stream, JPEGLoadingContext& context)
+{
+ i32 bytes_to_read = TRY(stream.read_value<BigEndian<u16>>()) - 2;
+ TRY(ensure_bounds_okay(TRY(stream.tell()), bytes_to_read, TRY(stream.size())));
+ while (bytes_to_read > 0) {
+ u8 info_byte = TRY(stream.read_value<u8>());
+ u8 element_unit_hint = info_byte >> 4;
+ if (element_unit_hint > 1) {
+ dbgln_if(JPEG_DEBUG, "{}: Unsupported unit hint in quantization table: {}!", TRY(stream.tell()), element_unit_hint);
+ return Error::from_string_literal("Unsupported unit hint in quantization table");
+ }
+ u8 table_id = info_byte & 0x0F;
+ if (table_id > 1) {
+ dbgln_if(JPEG_DEBUG, "{}: Unsupported quantization table id: {}!", TRY(stream.tell()), table_id);
+ return Error::from_string_literal("Unsupported quantization table id");
+ }
+ u32* table = table_id == 0 ? context.luma_table : context.chroma_table;
+ for (int i = 0; i < 64; i++) {
+ if (element_unit_hint == 0) {
+ u8 tmp = TRY(stream.read_value<u8>());
+ table[zigzag_map[i]] = tmp;
+ } else {
+ table[zigzag_map[i]] = TRY(stream.read_value<BigEndian<u16>>());
+ }
+ }
+
+ bytes_to_read -= 1 + (element_unit_hint == 0 ? 64 : 128);
+ }
+ if (bytes_to_read != 0) {
+ dbgln_if(JPEG_DEBUG, "{}: Invalid length for one or more quantization tables!", TRY(stream.tell()));
+ return Error::from_string_literal("Invalid length for one or more quantization tables");
+ }
+
+ return {};
+}
+
+static ErrorOr<void> skip_segment(Stream& stream)
+{
+ u16 bytes_to_skip = TRY(stream.read_value<BigEndian<u16>>()) - 2;
+ TRY(stream.discard(bytes_to_skip));
+ return {};
+}
+
+static void dequantize(JPEGLoadingContext& context, Vector<Macroblock>& macroblocks)
+{
+ for (u32 vcursor = 0; vcursor < context.mblock_meta.vcount; vcursor += context.vsample_factor) {
+ for (u32 hcursor = 0; hcursor < context.mblock_meta.hcount; hcursor += context.hsample_factor) {
+ for (u32 i = 0; i < context.components.size(); i++) {
+ auto& component = context.components[i];
+ u32 const* table = component.qtable_id == 0 ? context.luma_table : context.chroma_table;
+ for (u32 vfactor_i = 0; vfactor_i < component.vsample_factor; vfactor_i++) {
+ for (u32 hfactor_i = 0; hfactor_i < component.hsample_factor; hfactor_i++) {
+ u32 mb_index = (vcursor + vfactor_i) * context.mblock_meta.hpadded_count + (hfactor_i + hcursor);
+ Macroblock& block = macroblocks[mb_index];
+ int* block_component = get_component(block, i);
+ for (u32 k = 0; k < 64; k++)
+ block_component[k] *= table[k];
+ }
+ }
+ }
+ }
+ }
+}
+
+static void inverse_dct(JPEGLoadingContext const& context, Vector<Macroblock>& macroblocks)
+{
+ static float const m0 = 2.0f * AK::cos(1.0f / 16.0f * 2.0f * AK::Pi<float>);
+ static float const m1 = 2.0f * AK::cos(2.0f / 16.0f * 2.0f * AK::Pi<float>);
+ static float const m3 = 2.0f * AK::cos(2.0f / 16.0f * 2.0f * AK::Pi<float>);
+ static float const m5 = 2.0f * AK::cos(3.0f / 16.0f * 2.0f * AK::Pi<float>);
+ static float const m2 = m0 - m5;
+ static float const m4 = m0 + m5;
+ static float const s0 = AK::cos(0.0f / 16.0f * AK::Pi<float>) * AK::rsqrt(8.0f);
+ static float const s1 = AK::cos(1.0f / 16.0f * AK::Pi<float>) / 2.0f;
+ static float const s2 = AK::cos(2.0f / 16.0f * AK::Pi<float>) / 2.0f;
+ static float const s3 = AK::cos(3.0f / 16.0f * AK::Pi<float>) / 2.0f;
+ static float const s4 = AK::cos(4.0f / 16.0f * AK::Pi<float>) / 2.0f;
+ static float const s5 = AK::cos(5.0f / 16.0f * AK::Pi<float>) / 2.0f;
+ static float const s6 = AK::cos(6.0f / 16.0f * AK::Pi<float>) / 2.0f;
+ static float const s7 = AK::cos(7.0f / 16.0f * AK::Pi<float>) / 2.0f;
+
+ for (u32 vcursor = 0; vcursor < context.mblock_meta.vcount; vcursor += context.vsample_factor) {
+ for (u32 hcursor = 0; hcursor < context.mblock_meta.hcount; hcursor += context.hsample_factor) {
+ for (u32 component_i = 0; component_i < context.components.size(); component_i++) {
+ auto& component = context.components[component_i];
+ for (u8 vfactor_i = 0; vfactor_i < component.vsample_factor; vfactor_i++) {
+ for (u8 hfactor_i = 0; hfactor_i < component.hsample_factor; hfactor_i++) {
+ u32 mb_index = (vcursor + vfactor_i) * context.mblock_meta.hpadded_count + (hfactor_i + hcursor);
+ Macroblock& block = macroblocks[mb_index];
+ i32* block_component = get_component(block, component_i);
+ for (u32 k = 0; k < 8; ++k) {
+ float const g0 = block_component[0 * 8 + k] * s0;
+ float const g1 = block_component[4 * 8 + k] * s4;
+ float const g2 = block_component[2 * 8 + k] * s2;
+ float const g3 = block_component[6 * 8 + k] * s6;
+ float const g4 = block_component[5 * 8 + k] * s5;
+ float const g5 = block_component[1 * 8 + k] * s1;
+ float const g6 = block_component[7 * 8 + k] * s7;
+ float const g7 = block_component[3 * 8 + k] * s3;
+
+ float const f0 = g0;
+ float const f1 = g1;
+ float const f2 = g2;
+ float const f3 = g3;
+ float const f4 = g4 - g7;
+ float const f5 = g5 + g6;
+ float const f6 = g5 - g6;
+ float const f7 = g4 + g7;
+
+ float const e0 = f0;
+ float const e1 = f1;
+ float const e2 = f2 - f3;
+ float const e3 = f2 + f3;
+ float const e4 = f4;
+ float const e5 = f5 - f7;
+ float const e6 = f6;
+ float const e7 = f5 + f7;
+ float const e8 = f4 + f6;
+
+ float const d0 = e0;
+ float const d1 = e1;
+ float const d2 = e2 * m1;
+ float const d3 = e3;
+ float const d4 = e4 * m2;
+ float const d5 = e5 * m3;
+ float const d6 = e6 * m4;
+ float const d7 = e7;
+ float const d8 = e8 * m5;
+
+ float const c0 = d0 + d1;
+ float const c1 = d0 - d1;
+ float const c2 = d2 - d3;
+ float const c3 = d3;
+ float const c4 = d4 + d8;
+ float const c5 = d5 + d7;
+ float const c6 = d6 - d8;
+ float const c7 = d7;
+ float const c8 = c5 - c6;
+
+ float const b0 = c0 + c3;
+ float const b1 = c1 + c2;
+ float const b2 = c1 - c2;
+ float const b3 = c0 - c3;
+ float const b4 = c4 - c8;
+ float const b5 = c8;
+ float const b6 = c6 - c7;
+ float const b7 = c7;
+
+ block_component[0 * 8 + k] = b0 + b7;
+ block_component[1 * 8 + k] = b1 + b6;
+ block_component[2 * 8 + k] = b2 + b5;
+ block_component[3 * 8 + k] = b3 + b4;
+ block_component[4 * 8 + k] = b3 - b4;
+ block_component[5 * 8 + k] = b2 - b5;
+ block_component[6 * 8 + k] = b1 - b6;
+ block_component[7 * 8 + k] = b0 - b7;
+ }
+ for (u32 l = 0; l < 8; ++l) {
+ float const g0 = block_component[l * 8 + 0] * s0;
+ float const g1 = block_component[l * 8 + 4] * s4;
+ float const g2 = block_component[l * 8 + 2] * s2;
+ float const g3 = block_component[l * 8 + 6] * s6;
+ float const g4 = block_component[l * 8 + 5] * s5;
+ float const g5 = block_component[l * 8 + 1] * s1;
+ float const g6 = block_component[l * 8 + 7] * s7;
+ float const g7 = block_component[l * 8 + 3] * s3;
+
+ float const f0 = g0;
+ float const f1 = g1;
+ float const f2 = g2;
+ float const f3 = g3;
+ float const f4 = g4 - g7;
+ float const f5 = g5 + g6;
+ float const f6 = g5 - g6;
+ float const f7 = g4 + g7;
+
+ float const e0 = f0;
+ float const e1 = f1;
+ float const e2 = f2 - f3;
+ float const e3 = f2 + f3;
+ float const e4 = f4;
+ float const e5 = f5 - f7;
+ float const e6 = f6;
+ float const e7 = f5 + f7;
+ float const e8 = f4 + f6;
+
+ float const d0 = e0;
+ float const d1 = e1;
+ float const d2 = e2 * m1;
+ float const d3 = e3;
+ float const d4 = e4 * m2;
+ float const d5 = e5 * m3;
+ float const d6 = e6 * m4;
+ float const d7 = e7;
+ float const d8 = e8 * m5;
+
+ float const c0 = d0 + d1;
+ float const c1 = d0 - d1;
+ float const c2 = d2 - d3;
+ float const c3 = d3;
+ float const c4 = d4 + d8;
+ float const c5 = d5 + d7;
+ float const c6 = d6 - d8;
+ float const c7 = d7;
+ float const c8 = c5 - c6;
+
+ float const b0 = c0 + c3;
+ float const b1 = c1 + c2;
+ float const b2 = c1 - c2;
+ float const b3 = c0 - c3;
+ float const b4 = c4 - c8;
+ float const b5 = c8;
+ float const b6 = c6 - c7;
+ float const b7 = c7;
+
+ block_component[l * 8 + 0] = b0 + b7;
+ block_component[l * 8 + 1] = b1 + b6;
+ block_component[l * 8 + 2] = b2 + b5;
+ block_component[l * 8 + 3] = b3 + b4;
+ block_component[l * 8 + 4] = b3 - b4;
+ block_component[l * 8 + 5] = b2 - b5;
+ block_component[l * 8 + 6] = b1 - b6;
+ block_component[l * 8 + 7] = b0 - b7;
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+static void ycbcr_to_rgb(JPEGLoadingContext const& context, Vector<Macroblock>& macroblocks)
+{
+ for (u32 vcursor = 0; vcursor < context.mblock_meta.vcount; vcursor += context.vsample_factor) {
+ for (u32 hcursor = 0; hcursor < context.mblock_meta.hcount; hcursor += context.hsample_factor) {
+ const u32 chroma_block_index = vcursor * context.mblock_meta.hpadded_count + hcursor;
+ Macroblock const& chroma = macroblocks[chroma_block_index];
+ // Overflows are intentional.
+ for (u8 vfactor_i = context.vsample_factor - 1; vfactor_i < context.vsample_factor; --vfactor_i) {
+ for (u8 hfactor_i = context.hsample_factor - 1; hfactor_i < context.hsample_factor; --hfactor_i) {
+ u32 mb_index = (vcursor + vfactor_i) * context.mblock_meta.hpadded_count + (hcursor + hfactor_i);
+ i32* y = macroblocks[mb_index].y;
+ i32* cb = macroblocks[mb_index].cb;
+ i32* cr = macroblocks[mb_index].cr;
+ for (u8 i = 7; i < 8; --i) {
+ for (u8 j = 7; j < 8; --j) {
+ const u8 pixel = i * 8 + j;
+ const u32 chroma_pxrow = (i / context.vsample_factor) + 4 * vfactor_i;
+ const u32 chroma_pxcol = (j / context.hsample_factor) + 4 * hfactor_i;
+ const u32 chroma_pixel = chroma_pxrow * 8 + chroma_pxcol;
+ int r = y[pixel] + 1.402f * chroma.cr[chroma_pixel] + 128;
+ int g = y[pixel] - 0.344f * chroma.cb[chroma_pixel] - 0.714f * chroma.cr[chroma_pixel] + 128;
+ int b = y[pixel] + 1.772f * chroma.cb[chroma_pixel] + 128;
+ y[pixel] = r < 0 ? 0 : (r > 255 ? 255 : r);
+ cb[pixel] = g < 0 ? 0 : (g > 255 ? 255 : g);
+ cr[pixel] = b < 0 ? 0 : (b > 255 ? 255 : b);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+static void signed_rgb_to_unsigned(JPEGLoadingContext const& context, Vector<Macroblock>& macroblocks)
+{
+ for (u32 vcursor = 0; vcursor < context.mblock_meta.vcount; vcursor += context.vsample_factor) {
+ for (u32 hcursor = 0; hcursor < context.mblock_meta.hcount; hcursor += context.hsample_factor) {
+ for (u8 vfactor_i = 0; vfactor_i < context.vsample_factor; ++vfactor_i) {
+ for (u8 hfactor_i = 0; hfactor_i < context.hsample_factor; ++hfactor_i) {
+ u32 mb_index = (vcursor + vfactor_i) * context.mblock_meta.hpadded_count + (hcursor + hfactor_i);
+ for (u8 i = 0; i < 8; ++i) {
+ for (u8 j = 0; j < 8; ++j) {
+ macroblocks[mb_index].r[i * 8 + j] = clamp(macroblocks[mb_index].r[i * 8 + j] + 128, 0, 255);
+ macroblocks[mb_index].g[i * 8 + j] = clamp(macroblocks[mb_index].g[i * 8 + j] + 128, 0, 255);
+ macroblocks[mb_index].b[i * 8 + j] = clamp(macroblocks[mb_index].b[i * 8 + j] + 128, 0, 255);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+static ErrorOr<void> handle_color_transform(JPEGLoadingContext const& context, Vector<Macroblock>& macroblocks)
+{
+ if (context.color_transform.has_value()) {
+ // https://www.itu.int/rec/dologin_pub.asp?lang=e&id=T-REC-T.872-201206-I!!PDF-E&type=items
+ // 6.5.3 - APP14 marker segment for colour encoding
+
+ switch (*context.color_transform) {
+ case ColorTransform::CmykOrRgb:
+ if (context.components.size() == 4) {
+ // FIXME: implement CMYK
+ dbgln("CMYK isn't supported yet");
+ } else if (context.components.size() == 3) {
+ signed_rgb_to_unsigned(context, macroblocks);
+ } else {
+ return Error::from_string_literal("Wrong number of components for CMYK or RGB, aborting.");
+ }
+ break;
+ case ColorTransform::YCbCr:
+ ycbcr_to_rgb(context, macroblocks);
+ break;
+ case ColorTransform::YCCK:
+ // FIXME: implement YCCK
+ dbgln("YCCK isn't supported yet");
+ break;
+ }
+
+ return {};
+ }
+
+ // No App14 segment is present, assuming :
+ // - 1 components means grayscale
+ // - 3 components means YCbCr
+ // - 4 components means CMYK
+ if (context.components.size() == 4) {
+ // FIXME: implement CMYK
+ dbgln("CMYK isn't supported yet");
+ }
+ if (context.components.size() == 3)
+ ycbcr_to_rgb(context, macroblocks);
+
+ if (context.components.size() == 1) {
+ // FIXME: This is what we used to do for grayscale,
+ // we should at least document it and maybe change it.
+ ycbcr_to_rgb(context, macroblocks);
+ }
+
+ return {};
+}
+
+static ErrorOr<void> compose_bitmap(JPEGLoadingContext& context, Vector<Macroblock> const& macroblocks)
+{
+ context.bitmap = TRY(Bitmap::create(BitmapFormat::BGRx8888, { context.frame.width, context.frame.height }));
+
+ for (u32 y = context.frame.height - 1; y < context.frame.height; y--) {
+ const u32 block_row = y / 8;
+ const u32 pixel_row = y % 8;
+ for (u32 x = 0; x < context.frame.width; x++) {
+ const u32 block_column = x / 8;
+ auto& block = macroblocks[block_row * context.mblock_meta.hpadded_count + block_column];
+ const u32 pixel_column = x % 8;
+ const u32 pixel_index = pixel_row * 8 + pixel_column;
+ const Color color { (u8)block.y[pixel_index], (u8)block.cb[pixel_index], (u8)block.cr[pixel_index] };
+ context.bitmap->set_pixel(x, y, color);
+ }
+ }
+
+ return {};
+}
+
+static bool is_app_marker(Marker const marker)
+{
+ return marker >= JPEG_APPN0 && marker <= JPEG_APPN15;
+}
+
+static bool is_miscellaneous_or_table_marker(Marker const marker)
+{
+ // B.2.4 - Table-specification and miscellaneous marker segment syntax
+ // See also B.6 - Summary: Figure B.17 – Flow of marker segment
+
+ bool const is_misc = marker == JPEG_COM || marker == JPEG_DRI || is_app_marker(marker);
+ bool const is_table = marker == JPEG_DQT || marker == JPEG_DAC || marker == JPEG_DHT;
+
+ return is_misc || is_table;
+}
+
+static ErrorOr<void> handle_miscellaneous_or_table(AK::SeekableStream& stream, JPEGLoadingContext& context, Marker const marker)
+{
+ if (is_app_marker(marker)) {
+ TRY(read_app_marker(stream, context, marker - JPEG_APPN0));
+ return {};
+ }
+
+ switch (marker) {
+ case JPEG_COM:
+ case JPEG_DAC:
+ dbgln_if(JPEG_DEBUG, "TODO: implement marker \"{:x}\"", marker);
+ if (auto result = skip_segment(stream); result.is_error()) {
+ dbgln_if(JPEG_DEBUG, "{}: Error skipping marker: {:x}!", TRY(stream.tell()), marker);
+ return result.release_error();
+ }
+ break;
+ case JPEG_DHT:
+ TRY(read_huffman_table(stream, context));
+ break;
+ case JPEG_DQT:
+ TRY(read_quantization_table(stream, context));
+ break;
+ case JPEG_DRI:
+ TRY(read_restart_interval(stream, context));
+ break;
+ default:
+ dbgln("Unexpected marker: {:x}", marker);
+ VERIFY_NOT_REACHED();
+ }
+
+ return {};
+}
+
+static ErrorOr<void> parse_header(AK::SeekableStream& stream, JPEGLoadingContext& context)
+{
+ auto marker = TRY(read_marker_at_cursor(stream));
+ if (marker != JPEG_SOI) {
+ dbgln_if(JPEG_DEBUG, "{}: SOI not found: {:x}!", TRY(stream.tell()), marker);
+ return Error::from_string_literal("SOI not found");
+ }
+ for (;;) {
+ marker = TRY(read_marker_at_cursor(stream));
+
+ if (is_miscellaneous_or_table_marker(marker)) {
+ TRY(handle_miscellaneous_or_table(stream, context, marker));
+ continue;
+ }
+
+ // Set frame type if the marker marks a new frame.
+ if (is_frame_marker(marker))
+ context.frame.type = static_cast<StartOfFrame::FrameType>(marker & 0xF);
+
+ switch (marker) {
+ case JPEG_INVALID:
+ case JPEG_RST0:
+ case JPEG_RST1:
+ case JPEG_RST2:
+ case JPEG_RST3:
+ case JPEG_RST4:
+ case JPEG_RST5:
+ case JPEG_RST6:
+ case JPEG_RST7:
+ case JPEG_SOI:
+ case JPEG_EOI:
+ dbgln_if(JPEG_DEBUG, "{}: Unexpected marker {:x}!", TRY(stream.tell()), marker);
+ return Error::from_string_literal("Unexpected marker");
+ case JPEG_SOF0:
+ case JPEG_SOF2:
+ TRY(read_start_of_frame(stream, context));
+ context.state = JPEGLoadingContext::FrameDecoded;
+ return {};
+ default:
+ if (auto result = skip_segment(stream); result.is_error()) {
+ dbgln_if(JPEG_DEBUG, "{}: Error skipping marker: {:x}!", TRY(stream.tell()), marker);
+ return result.release_error();
+ }
+ break;
+ }
+ }
+
+ VERIFY_NOT_REACHED();
+}
+
+static ErrorOr<void> scan_huffman_stream(AK::SeekableStream& stream, HuffmanStreamState& huffman_stream)
+{
+ u8 last_byte;
+ u8 current_byte = TRY(stream.read_value<u8>());
+
+ for (;;) {
+ last_byte = current_byte;
+ current_byte = TRY(stream.read_value<u8>());
+
+ if (last_byte == 0xFF) {
+ if (current_byte == 0xFF)
+ continue;
+ if (current_byte == 0x00) {
+ current_byte = TRY(stream.read_value<u8>());
+ huffman_stream.stream.append(last_byte);
+ continue;
+ }
+ Marker marker = 0xFF00 | current_byte;
+ if (marker >= JPEG_RST0 && marker <= JPEG_RST7) {
+ huffman_stream.stream.append(marker);
+ current_byte = TRY(stream.read_value<u8>());
+ continue;
+ }
+
+ // Rollback the marker we just read
+ TRY(stream.seek(-2, AK::SeekMode::FromCurrentPosition));
+ return {};
+ } else {
+ huffman_stream.stream.append(last_byte);
+ }
+ }
+
+ VERIFY_NOT_REACHED();
+}
+
+static ErrorOr<void> decode_header(JPEGLoadingContext& context)
+{
+ if (context.state < JPEGLoadingContext::State::HeaderDecoded) {
+ if (auto result = parse_header(*context.stream, context); result.is_error()) {
+ context.state = JPEGLoadingContext::State::Error;
+ return result.release_error();
+ }
+
+ if constexpr (JPEG_DEBUG) {
+ dbgln("Image width: {}", context.frame.width);
+ dbgln("Image height: {}", context.frame.height);
+ dbgln("Macroblocks in a row: {}", context.mblock_meta.hpadded_count);
+ dbgln("Macroblocks in a column: {}", context.mblock_meta.vpadded_count);
+ dbgln("Macroblock meta padded total: {}", context.mblock_meta.padded_total);
+ }
+
+ context.state = JPEGLoadingContext::State::HeaderDecoded;
+ }
+ return {};
+}
+
+static ErrorOr<Vector<Macroblock>> construct_macroblocks(JPEGLoadingContext& context)
+{
+ // B.6 - Summary
+ // See: Figure B.16 – Flow of compressed data syntax
+ // This function handles the "Multi-scan" loop.
+
+ Vector<Macroblock> macroblocks;
+ TRY(macroblocks.try_resize(context.mblock_meta.padded_total));
+
+ Marker marker = TRY(read_marker_at_cursor(*context.stream));
+ while (true) {
+ if (is_miscellaneous_or_table_marker(marker)) {
+ TRY(handle_miscellaneous_or_table(*context.stream, context, marker));
+ } else if (marker == JPEG_SOS) {
+ TRY(read_start_of_scan(*context.stream, context));
+ TRY(scan_huffman_stream(*context.stream, context.current_scan.huffman_stream));
+ TRY(decode_huffman_stream(context, macroblocks));
+ } else if (marker == JPEG_EOI) {
+ return macroblocks;
+ } else {
+ dbgln_if(JPEG_DEBUG, "{}: Unexpected marker {:x}!", TRY(context.stream->tell()), marker);
+ return Error::from_string_literal("Unexpected marker");
+ }
+
+ marker = TRY(read_marker_at_cursor(*context.stream));
+ }
+}
+
+static ErrorOr<void> decode_jpeg(JPEGLoadingContext& context)
+{
+ TRY(decode_header(context));
+ auto macroblocks = TRY(construct_macroblocks(context));
+ dequantize(context, macroblocks);
+ inverse_dct(context, macroblocks);
+ TRY(handle_color_transform(context, macroblocks));
+ TRY(compose_bitmap(context, macroblocks));
+ context.stream.clear();
+ return {};
+}
+
+JPEGImageDecoderPlugin::JPEGImageDecoderPlugin(NonnullOwnPtr<FixedMemoryStream> stream)
+{
+ m_context = make<JPEGLoadingContext>();
+ m_context->stream = move(stream);
+}
+
+JPEGImageDecoderPlugin::~JPEGImageDecoderPlugin() = default;
+
+IntSize JPEGImageDecoderPlugin::size()
+{
+ if (m_context->state == JPEGLoadingContext::State::Error)
+ return {};
+ if (m_context->state >= JPEGLoadingContext::State::FrameDecoded)
+ return { m_context->frame.width, m_context->frame.height };
+
+ return {};
+}
+
+void JPEGImageDecoderPlugin::set_volatile()
+{
+ if (m_context->bitmap)
+ m_context->bitmap->set_volatile();
+}
+
+bool JPEGImageDecoderPlugin::set_nonvolatile(bool& was_purged)
+{
+ if (!m_context->bitmap)
+ return false;
+ return m_context->bitmap->set_nonvolatile(was_purged);
+}
+
+bool JPEGImageDecoderPlugin::initialize()
+{
+ return true;
+}
+
+bool JPEGImageDecoderPlugin::sniff(ReadonlyBytes data)
+{
+ return data.size() > 3
+ && data.data()[0] == 0xFF
+ && data.data()[1] == 0xD8
+ && data.data()[2] == 0xFF;
+}
+
+ErrorOr<NonnullOwnPtr<ImageDecoderPlugin>> JPEGImageDecoderPlugin::create(ReadonlyBytes data)
+{
+ auto stream = TRY(try_make<FixedMemoryStream>(data));
+ return adopt_nonnull_own_or_enomem(new (nothrow) JPEGImageDecoderPlugin(move(stream)));
+}
+
+bool JPEGImageDecoderPlugin::is_animated()
+{
+ return false;
+}
+
+size_t JPEGImageDecoderPlugin::loop_count()
+{
+ return 0;
+}
+
+size_t JPEGImageDecoderPlugin::frame_count()
+{
+ return 1;
+}
+
+ErrorOr<ImageFrameDescriptor> JPEGImageDecoderPlugin::frame(size_t index)
+{
+ if (index > 0)
+ return Error::from_string_literal("JPEGImageDecoderPlugin: Invalid frame index");
+
+ if (m_context->state == JPEGLoadingContext::State::Error)
+ return Error::from_string_literal("JPEGImageDecoderPlugin: Decoding failed");
+
+ if (m_context->state < JPEGLoadingContext::State::BitmapDecoded) {
+ if (auto result = decode_jpeg(*m_context); result.is_error()) {
+ m_context->state = JPEGLoadingContext::State::Error;
+ return result.release_error();
+ }
+ m_context->state = JPEGLoadingContext::State::BitmapDecoded;
+ }
+
+ return ImageFrameDescriptor { m_context->bitmap, 0 };
+}
+
+ErrorOr<Optional<ReadonlyBytes>> JPEGImageDecoderPlugin::icc_data()
+{
+ TRY(decode_header(*m_context));
+
+ if (m_context->icc_data.has_value())
+ return *m_context->icc_data;
+ return OptionalNone {};
+}
+
+}
diff --git a/Userland/Libraries/LibGfx/ImageFormats/JPEGLoader.h b/Userland/Libraries/LibGfx/ImageFormats/JPEGLoader.h
new file mode 100644
index 0000000000..fc650f8fb8
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/JPEGLoader.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2020, the SerenityOS developers.
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#pragma once
+
+#include <AK/MemoryStream.h>
+#include <LibGfx/ImageFormats/ImageDecoder.h>
+
+namespace Gfx {
+
+struct JPEGLoadingContext;
+
+// For the specification, see: https://www.w3.org/Graphics/JPEG/itu-t81.pdf
+
+class JPEGImageDecoderPlugin : public ImageDecoderPlugin {
+public:
+ static bool sniff(ReadonlyBytes);
+ static ErrorOr<NonnullOwnPtr<ImageDecoderPlugin>> create(ReadonlyBytes);
+
+ virtual ~JPEGImageDecoderPlugin() override;
+ virtual IntSize size() override;
+ virtual void set_volatile() override;
+ [[nodiscard]] virtual bool set_nonvolatile(bool& was_purged) override;
+ virtual bool initialize() override;
+ virtual bool is_animated() override;
+ virtual size_t loop_count() override;
+ virtual size_t frame_count() override;
+ virtual ErrorOr<ImageFrameDescriptor> frame(size_t index) override;
+ virtual ErrorOr<Optional<ReadonlyBytes>> icc_data() override;
+
+private:
+ JPEGImageDecoderPlugin(NonnullOwnPtr<FixedMemoryStream>);
+
+ OwnPtr<JPEGLoadingContext> m_context;
+};
+
+}
diff --git a/Userland/Libraries/LibGfx/ImageFormats/PBMLoader.cpp b/Userland/Libraries/LibGfx/ImageFormats/PBMLoader.cpp
new file mode 100644
index 0000000000..dff6a12d66
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/PBMLoader.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2020, Hüseyin ASLITÜRK <asliturk@hotmail.com>
+ * Copyright (c) 2022, the SerenityOS developers.
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include "PBMLoader.h"
+#include "AK/Endian.h"
+#include "PortableImageLoaderCommon.h"
+#include "Userland/Libraries/LibGfx/Streamer.h"
+#include <string.h>
+
+namespace Gfx {
+
+bool read_image_data(PBMLoadingContext& context, Streamer& streamer)
+{
+ u8 byte;
+ Vector<Gfx::Color> color_data;
+
+ if (context.type == PBMLoadingContext::Type::ASCII) {
+ while (streamer.read(byte)) {
+ if (byte == '0') {
+ color_data.append(Color::White);
+ } else if (byte == '1') {
+ color_data.append(Color::Black);
+ }
+ }
+ } else if (context.type == PBMLoadingContext::Type::RAWBITS) {
+ size_t color_index = 0;
+
+ while (streamer.read(byte)) {
+ for (int i = 0; i < 8; i++) {
+ int val = byte & 0x80;
+
+ if (val == 0) {
+ color_data.append(Color::White);
+ } else {
+ color_data.append(Color::Black);
+ }
+
+ byte = byte << 1;
+ color_index++;
+
+ if (color_index % context.width == 0) {
+ break;
+ }
+ }
+ }
+ }
+
+ size_t context_size = (u32)context.width * (u32)context.height;
+ if (context_size != color_data.size()) {
+ dbgln("Not enough color data in image.");
+ return false;
+ }
+
+ if (!create_bitmap(context)) {
+ return false;
+ }
+
+ set_pixels(context, color_data);
+
+ context.state = PBMLoadingContext::State::Bitmap;
+ return true;
+}
+}
diff --git a/Userland/Libraries/LibGfx/ImageFormats/PBMLoader.h b/Userland/Libraries/LibGfx/ImageFormats/PBMLoader.h
new file mode 100644
index 0000000000..37705432b3
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/PBMLoader.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2020, Hüseyin ASLITÜRK <asliturk@hotmail.com>
+ * Copyright (c) 2022, the SerenityOS developers.
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#pragma once
+
+#include <AK/StringView.h>
+#include <LibGfx/ImageFormats/ImageDecoder.h>
+#include <LibGfx/ImageFormats/PortableImageMapLoader.h>
+
+namespace Gfx {
+
+struct PBM {
+ static constexpr auto ascii_magic_number = '1';
+ static constexpr auto binary_magic_number = '4';
+ static constexpr StringView image_type = "PBM"sv;
+};
+
+using PBMLoadingContext = PortableImageMapLoadingContext<PBM>;
+using PBMImageDecoderPlugin = PortableImageDecoderPlugin<PBMLoadingContext>;
+
+bool read_image_data(PBMLoadingContext& context, Streamer& streamer);
+}
diff --git a/Userland/Libraries/LibGfx/ImageFormats/PGMLoader.cpp b/Userland/Libraries/LibGfx/ImageFormats/PGMLoader.cpp
new file mode 100644
index 0000000000..0240548013
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/PGMLoader.cpp
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2020, Hüseyin ASLITÜRK <asliturk@hotmail.com>
+ * Copyright (c) 2022, the SerenityOS developers.
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <AK/Endian.h>
+#include <LibGfx/ImageFormats/PGMLoader.h>
+#include <LibGfx/ImageFormats/PortableImageLoaderCommon.h>
+#include <LibGfx/Streamer.h>
+#include <string.h>
+
+namespace Gfx {
+
+static void set_adjusted_pixels(PGMLoadingContext& context, Vector<Gfx::Color> const& color_data)
+{
+ size_t index = 0;
+ for (size_t y = 0; y < context.height; ++y) {
+ for (size_t x = 0; x < context.width; ++x) {
+ Color color = color_data.at(index);
+ if (context.format_details.max_val < 255) {
+ color = adjust_color(context.format_details.max_val, color);
+ }
+ context.bitmap->set_pixel(x, y, color);
+ ++index;
+ }
+ }
+}
+
+bool read_image_data(PGMLoadingContext& context, Streamer& streamer)
+{
+ Vector<Gfx::Color> color_data;
+
+ if (context.type == PGMLoadingContext::Type::ASCII) {
+ u16 value;
+
+ while (true) {
+ if (!read_number(streamer, &value))
+ break;
+
+ if (!read_whitespace(context, streamer))
+ break;
+
+ color_data.append({ (u8)value, (u8)value, (u8)value });
+ }
+ } else if (context.type == PGMLoadingContext::Type::RAWBITS) {
+ u8 pixel;
+ while (streamer.read(pixel)) {
+ color_data.append({ pixel, pixel, pixel });
+ }
+ }
+
+ size_t context_size = (u32)context.width * (u32)context.height;
+ if (context_size != color_data.size()) {
+ dbgln("Not enough color data in image.");
+ return false;
+ }
+
+ if (!create_bitmap(context))
+ return false;
+
+ set_adjusted_pixels(context, color_data);
+
+ context.state = PGMLoadingContext::State::Bitmap;
+ return true;
+}
+}
diff --git a/Userland/Libraries/LibGfx/ImageFormats/PGMLoader.h b/Userland/Libraries/LibGfx/ImageFormats/PGMLoader.h
new file mode 100644
index 0000000000..087a5a4c6b
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/PGMLoader.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2020, Hüseyin ASLITÜRK <asliturk@hotmail.com>
+ * Copyright (c) 2022, the SerenityOS developers.
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#pragma once
+
+#include <AK/StringView.h>
+#include <LibGfx/ImageFormats/ImageDecoder.h>
+#include <LibGfx/ImageFormats/PortableImageMapLoader.h>
+
+namespace Gfx {
+
+struct PGM {
+ static constexpr auto ascii_magic_number = '2';
+ static constexpr auto binary_magic_number = '5';
+ static constexpr StringView image_type = "PGM"sv;
+ u16 max_val { 0 };
+};
+
+using PGMLoadingContext = PortableImageMapLoadingContext<PGM>;
+using PGMImageDecoderPlugin = PortableImageDecoderPlugin<PGMLoadingContext>;
+
+bool read_image_data(PGMLoadingContext& context, Streamer& streamer);
+}
diff --git a/Userland/Libraries/LibGfx/ImageFormats/PNGLoader.cpp b/Userland/Libraries/LibGfx/ImageFormats/PNGLoader.cpp
new file mode 100644
index 0000000000..2ec92ef34f
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/PNGLoader.cpp
@@ -0,0 +1,1111 @@
+/*
+ * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
+ * Copyright (c) 2022, the SerenityOS developers.
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <AK/Debug.h>
+#include <AK/Endian.h>
+#include <AK/Vector.h>
+#include <LibCompress/Zlib.h>
+#include <LibGfx/ImageFormats/PNGLoader.h>
+#include <LibGfx/ImageFormats/PNGShared.h>
+#include <string.h>
+
+namespace Gfx {
+
+struct PNG_IHDR {
+ NetworkOrdered<u32> width;
+ NetworkOrdered<u32> height;
+ u8 bit_depth { 0 };
+ PNG::ColorType color_type { 0 };
+ u8 compression_method { 0 };
+ u8 filter_method { 0 };
+ u8 interlace_method { 0 };
+};
+
+static_assert(AssertSize<PNG_IHDR, 13>());
+
+struct ChromaticitiesAndWhitepoint {
+ NetworkOrdered<u32> white_point_x;
+ NetworkOrdered<u32> white_point_y;
+ NetworkOrdered<u32> red_x;
+ NetworkOrdered<u32> red_y;
+ NetworkOrdered<u32> green_x;
+ NetworkOrdered<u32> green_y;
+ NetworkOrdered<u32> blue_x;
+ NetworkOrdered<u32> blue_y;
+};
+static_assert(AssertSize<ChromaticitiesAndWhitepoint, 32>());
+
+struct CodingIndependentCodePoints {
+ u8 color_primaries;
+ u8 transfer_function;
+ u8 matrix_coefficients;
+ u8 video_full_range_flag;
+};
+static_assert(AssertSize<CodingIndependentCodePoints, 4>());
+
+struct EmbeddedICCProfile {
+ StringView profile_name;
+ ReadonlyBytes compressed_data;
+};
+
+struct Scanline {
+ PNG::FilterType filter;
+ ReadonlyBytes data {};
+};
+
+struct [[gnu::packed]] PaletteEntry {
+ u8 r;
+ u8 g;
+ u8 b;
+ // u8 a;
+};
+
+template<typename T>
+struct [[gnu::packed]] Tuple {
+ T gray;
+ T a;
+};
+
+template<typename T>
+struct [[gnu::packed]] Triplet {
+ T r;
+ T g;
+ T b;
+
+ bool operator==(Triplet const& other) const = default;
+};
+
+template<typename T>
+struct [[gnu::packed]] Quartet {
+ T r;
+ T g;
+ T b;
+ T a;
+};
+
+enum PngInterlaceMethod {
+ Null = 0,
+ Adam7 = 1
+};
+
+enum RenderingIntent {
+ Perceptual = 0,
+ RelativeColorimetric = 1,
+ Saturation = 2,
+ AbsoluteColorimetric = 3,
+};
+
+struct PNGLoadingContext {
+ enum State {
+ NotDecoded = 0,
+ Error,
+ HeaderDecoded,
+ SizeDecoded,
+ ChunksDecoded,
+ BitmapDecoded,
+ };
+ State state { State::NotDecoded };
+ u8 const* data { nullptr };
+ size_t data_size { 0 };
+ int width { -1 };
+ int height { -1 };
+ u8 bit_depth { 0 };
+ PNG::ColorType color_type { 0 };
+ u8 compression_method { 0 };
+ u8 filter_method { 0 };
+ u8 interlace_method { 0 };
+ u8 channels { 0 };
+ bool has_seen_zlib_header { false };
+ bool has_alpha() const { return to_underlying(color_type) & 4 || palette_transparency_data.size() > 0; }
+ Vector<Scanline> scanlines;
+ ByteBuffer unfiltered_data;
+ RefPtr<Gfx::Bitmap> bitmap;
+ ByteBuffer* decompression_buffer { nullptr };
+ Vector<u8> compressed_data;
+ Vector<PaletteEntry> palette_data;
+ Vector<u8> palette_transparency_data;
+
+ Optional<ChromaticitiesAndWhitepoint> chromaticities_and_whitepoint;
+ Optional<CodingIndependentCodePoints> coding_independent_code_points;
+ Optional<u32> gamma;
+ Optional<EmbeddedICCProfile> embedded_icc_profile;
+ Optional<ByteBuffer> decompressed_icc_profile;
+ Optional<RenderingIntent> sRGB_rendering_intent;
+
+ Checked<int> compute_row_size_for_width(int width)
+ {
+ Checked<int> row_size = width;
+ row_size *= channels;
+ row_size *= bit_depth;
+ row_size += 7;
+ row_size /= 8;
+ if (row_size.has_overflow()) {
+ dbgln("PNG too large, integer overflow while computing row size");
+ state = State::Error;
+ }
+ return row_size;
+ }
+};
+
+class Streamer {
+public:
+ Streamer(u8 const* data, size_t size)
+ : m_data_ptr(data)
+ , m_size_remaining(size)
+ {
+ }
+
+ template<typename T>
+ bool read(T& value)
+ {
+ if (m_size_remaining < sizeof(T))
+ return false;
+ value = *((NetworkOrdered<T> const*)m_data_ptr);
+ m_data_ptr += sizeof(T);
+ m_size_remaining -= sizeof(T);
+ return true;
+ }
+
+ bool read_bytes(u8* buffer, size_t count)
+ {
+ if (m_size_remaining < count)
+ return false;
+ memcpy(buffer, m_data_ptr, count);
+ m_data_ptr += count;
+ m_size_remaining -= count;
+ return true;
+ }
+
+ bool wrap_bytes(ReadonlyBytes& buffer, size_t count)
+ {
+ if (m_size_remaining < count)
+ return false;
+ buffer = ReadonlyBytes { m_data_ptr, count };
+ m_data_ptr += count;
+ m_size_remaining -= count;
+ return true;
+ }
+
+ bool at_end() const { return !m_size_remaining; }
+
+private:
+ u8 const* m_data_ptr { nullptr };
+ size_t m_size_remaining { 0 };
+};
+
+static bool process_chunk(Streamer&, PNGLoadingContext& context);
+
+union [[gnu::packed]] Pixel {
+ ARGB32 rgba { 0 };
+ u8 v[4];
+ struct {
+ u8 r;
+ u8 g;
+ u8 b;
+ u8 a;
+ };
+};
+static_assert(AssertSize<Pixel, 4>());
+
+static void unfilter_scanline(PNG::FilterType filter, Bytes scanline_data, ReadonlyBytes previous_scanlines_data, u8 bytes_per_complete_pixel)
+{
+ VERIFY(filter != PNG::FilterType::None);
+
+ switch (filter) {
+ case PNG::FilterType::Sub:
+ // This loop starts at bytes_per_complete_pixel because all bytes before that are
+ // guaranteed to have no valid byte at index (i - bytes_per_complete pixel).
+ // All such invalid byte indexes should be treated as 0, and adding 0 to the current
+ // byte would do nothing, so the first bytes_per_complete_pixel bytes can instead
+ // just be skipped.
+ for (size_t i = bytes_per_complete_pixel; i < scanline_data.size(); ++i) {
+ u8 left = scanline_data[i - bytes_per_complete_pixel];
+ scanline_data[i] += left;
+ }
+ break;
+ case PNG::FilterType::Up:
+ for (size_t i = 0; i < scanline_data.size(); ++i) {
+ u8 above = previous_scanlines_data[i];
+ scanline_data[i] += above;
+ }
+ break;
+ case PNG::FilterType::Average:
+ for (size_t i = 0; i < scanline_data.size(); ++i) {
+ u32 left = (i < bytes_per_complete_pixel) ? 0 : scanline_data[i - bytes_per_complete_pixel];
+ u32 above = previous_scanlines_data[i];
+ u8 average = (left + above) / 2;
+ scanline_data[i] += average;
+ }
+ break;
+ case PNG::FilterType::Paeth:
+ for (size_t i = 0; i < scanline_data.size(); ++i) {
+ u8 left = (i < bytes_per_complete_pixel) ? 0 : scanline_data[i - bytes_per_complete_pixel];
+ u8 above = previous_scanlines_data[i];
+ u8 upper_left = (i < bytes_per_complete_pixel) ? 0 : previous_scanlines_data[i - bytes_per_complete_pixel];
+ i32 predictor = left + above - upper_left;
+ u32 predictor_left = abs(predictor - left);
+ u32 predictor_above = abs(predictor - above);
+ u32 predictor_upper_left = abs(predictor - upper_left);
+ u8 nearest;
+ if (predictor_left <= predictor_above && predictor_left <= predictor_upper_left) {
+ nearest = left;
+ } else if (predictor_above <= predictor_upper_left) {
+ nearest = above;
+ } else {
+ nearest = upper_left;
+ }
+ scanline_data[i] += nearest;
+ }
+ break;
+ default:
+ VERIFY_NOT_REACHED();
+ }
+}
+
+template<typename T>
+ALWAYS_INLINE static void unpack_grayscale_without_alpha(PNGLoadingContext& context)
+{
+ for (int y = 0; y < context.height; ++y) {
+ auto* gray_values = reinterpret_cast<T const*>(context.scanlines[y].data.data());
+ for (int i = 0; i < context.width; ++i) {
+ auto& pixel = (Pixel&)context.bitmap->scanline(y)[i];
+ pixel.r = gray_values[i];
+ pixel.g = gray_values[i];
+ pixel.b = gray_values[i];
+ pixel.a = 0xff;
+ }
+ }
+}
+
+template<typename T>
+ALWAYS_INLINE static void unpack_grayscale_with_alpha(PNGLoadingContext& context)
+{
+ for (int y = 0; y < context.height; ++y) {
+ auto* tuples = reinterpret_cast<Tuple<T> const*>(context.scanlines[y].data.data());
+ for (int i = 0; i < context.width; ++i) {
+ auto& pixel = (Pixel&)context.bitmap->scanline(y)[i];
+ pixel.r = tuples[i].gray;
+ pixel.g = tuples[i].gray;
+ pixel.b = tuples[i].gray;
+ pixel.a = tuples[i].a;
+ }
+ }
+}
+
+template<typename T>
+ALWAYS_INLINE static void unpack_triplets_without_alpha(PNGLoadingContext& context)
+{
+ for (int y = 0; y < context.height; ++y) {
+ auto* triplets = reinterpret_cast<Triplet<T> const*>(context.scanlines[y].data.data());
+ for (int i = 0; i < context.width; ++i) {
+ auto& pixel = (Pixel&)context.bitmap->scanline(y)[i];
+ pixel.r = triplets[i].r;
+ pixel.g = triplets[i].g;
+ pixel.b = triplets[i].b;
+ pixel.a = 0xff;
+ }
+ }
+}
+
+template<typename T>
+ALWAYS_INLINE static void unpack_triplets_with_transparency_value(PNGLoadingContext& context, Triplet<T> transparency_value)
+{
+ for (int y = 0; y < context.height; ++y) {
+ auto* triplets = reinterpret_cast<Triplet<T> const*>(context.scanlines[y].data.data());
+ for (int i = 0; i < context.width; ++i) {
+ auto& pixel = (Pixel&)context.bitmap->scanline(y)[i];
+ pixel.r = triplets[i].r;
+ pixel.g = triplets[i].g;
+ pixel.b = triplets[i].b;
+ if (triplets[i] == transparency_value)
+ pixel.a = 0x00;
+ else
+ pixel.a = 0xff;
+ }
+ }
+}
+
+NEVER_INLINE FLATTEN static ErrorOr<void> unfilter(PNGLoadingContext& context)
+{
+ // First unfilter the scanlines:
+
+ // FIXME: Instead of creating a separate buffer for the scanlines that need to be
+ // mutated, the mutation could be done in place (if the data was non-const).
+ size_t bytes_per_scanline = context.scanlines[0].data.size();
+ size_t bytes_needed_for_all_unfiltered_scanlines = 0;
+ for (int y = 0; y < context.height; ++y) {
+ if (context.scanlines[y].filter != PNG::FilterType::None) {
+ bytes_needed_for_all_unfiltered_scanlines += bytes_per_scanline;
+ }
+ }
+ context.unfiltered_data = TRY(ByteBuffer::create_uninitialized(bytes_needed_for_all_unfiltered_scanlines));
+
+ // From section 6.3 of http://www.libpng.org/pub/png/spec/1.2/PNG-Filters.html
+ // "bpp is defined as the number of bytes per complete pixel, rounding up to one.
+ // For example, for color type 2 with a bit depth of 16, bpp is equal to 6
+ // (three samples, two bytes per sample); for color type 0 with a bit depth of 2,
+ // bpp is equal to 1 (rounding up); for color type 4 with a bit depth of 16, bpp
+ // is equal to 4 (two-byte grayscale sample, plus two-byte alpha sample)."
+ u8 bytes_per_complete_pixel = (context.bit_depth + 7) / 8 * context.channels;
+
+ u8 dummy_scanline_bytes[bytes_per_scanline];
+ memset(dummy_scanline_bytes, 0, sizeof(dummy_scanline_bytes));
+ auto previous_scanlines_data = ReadonlyBytes { dummy_scanline_bytes, sizeof(dummy_scanline_bytes) };
+
+ for (int y = 0, data_start = 0; y < context.height; ++y) {
+ if (context.scanlines[y].filter != PNG::FilterType::None) {
+ auto scanline_data_slice = context.unfiltered_data.bytes().slice(data_start, bytes_per_scanline);
+
+ // Copy the current values over and set the scanline's data to the to-be-mutated slice
+ context.scanlines[y].data.copy_to(scanline_data_slice);
+ context.scanlines[y].data = scanline_data_slice;
+
+ unfilter_scanline(context.scanlines[y].filter, scanline_data_slice, previous_scanlines_data, bytes_per_complete_pixel);
+
+ data_start += bytes_per_scanline;
+ }
+ previous_scanlines_data = context.scanlines[y].data;
+ }
+
+ // Now unpack the scanlines to RGBA:
+ switch (context.color_type) {
+ case PNG::ColorType::Greyscale:
+ if (context.bit_depth == 8) {
+ unpack_grayscale_without_alpha<u8>(context);
+ } else if (context.bit_depth == 16) {
+ unpack_grayscale_without_alpha<u16>(context);
+ } else if (context.bit_depth == 1 || context.bit_depth == 2 || context.bit_depth == 4) {
+ auto bit_depth_squared = context.bit_depth * context.bit_depth;
+ auto pixels_per_byte = 8 / context.bit_depth;
+ auto mask = (1 << context.bit_depth) - 1;
+ for (int y = 0; y < context.height; ++y) {
+ auto* gray_values = context.scanlines[y].data.data();
+ for (int x = 0; x < context.width; ++x) {
+ auto bit_offset = (8 - context.bit_depth) - (context.bit_depth * (x % pixels_per_byte));
+ auto value = (gray_values[x / pixels_per_byte] >> bit_offset) & mask;
+ auto& pixel = (Pixel&)context.bitmap->scanline(y)[x];
+ pixel.r = value * (0xff / bit_depth_squared);
+ pixel.g = value * (0xff / bit_depth_squared);
+ pixel.b = value * (0xff / bit_depth_squared);
+ pixel.a = 0xff;
+ }
+ }
+ } else {
+ VERIFY_NOT_REACHED();
+ }
+ break;
+ case PNG::ColorType::GreyscaleWithAlpha:
+ if (context.bit_depth == 8) {
+ unpack_grayscale_with_alpha<u8>(context);
+ } else if (context.bit_depth == 16) {
+ unpack_grayscale_with_alpha<u16>(context);
+ } else {
+ VERIFY_NOT_REACHED();
+ }
+ break;
+ case PNG::ColorType::Truecolor:
+ if (context.palette_transparency_data.size() == 6) {
+ if (context.bit_depth == 8) {
+ unpack_triplets_with_transparency_value<u8>(context, Triplet<u8> { context.palette_transparency_data[0], context.palette_transparency_data[2], context.palette_transparency_data[4] });
+ } else if (context.bit_depth == 16) {
+ u16 tr = context.palette_transparency_data[0] | context.palette_transparency_data[1] << 8;
+ u16 tg = context.palette_transparency_data[2] | context.palette_transparency_data[3] << 8;
+ u16 tb = context.palette_transparency_data[4] | context.palette_transparency_data[5] << 8;
+ unpack_triplets_with_transparency_value<u16>(context, Triplet<u16> { tr, tg, tb });
+ } else {
+ VERIFY_NOT_REACHED();
+ }
+ } else {
+ if (context.bit_depth == 8)
+ unpack_triplets_without_alpha<u8>(context);
+ else if (context.bit_depth == 16)
+ unpack_triplets_without_alpha<u16>(context);
+ else
+ VERIFY_NOT_REACHED();
+ }
+ break;
+ case PNG::ColorType::TruecolorWithAlpha:
+ if (context.bit_depth == 8) {
+ for (int y = 0; y < context.height; ++y) {
+ memcpy(context.bitmap->scanline(y), context.scanlines[y].data.data(), context.scanlines[y].data.size());
+ }
+ } else if (context.bit_depth == 16) {
+ for (int y = 0; y < context.height; ++y) {
+ auto* quartets = reinterpret_cast<Quartet<u16> const*>(context.scanlines[y].data.data());
+ for (int i = 0; i < context.width; ++i) {
+ auto& pixel = (Pixel&)context.bitmap->scanline(y)[i];
+ pixel.r = quartets[i].r & 0xFF;
+ pixel.g = quartets[i].g & 0xFF;
+ pixel.b = quartets[i].b & 0xFF;
+ pixel.a = quartets[i].a & 0xFF;
+ }
+ }
+ } else {
+ VERIFY_NOT_REACHED();
+ }
+ break;
+ case PNG::ColorType::IndexedColor:
+ if (context.bit_depth == 8) {
+ for (int y = 0; y < context.height; ++y) {
+ auto* palette_index = context.scanlines[y].data.data();
+ for (int i = 0; i < context.width; ++i) {
+ auto& pixel = (Pixel&)context.bitmap->scanline(y)[i];
+ if (palette_index[i] >= context.palette_data.size())
+ return Error::from_string_literal("PNGImageDecoderPlugin: Palette index out of range");
+ auto& color = context.palette_data.at((int)palette_index[i]);
+ auto transparency = context.palette_transparency_data.size() >= palette_index[i] + 1u
+ ? context.palette_transparency_data.data()[palette_index[i]]
+ : 0xff;
+ pixel.r = color.r;
+ pixel.g = color.g;
+ pixel.b = color.b;
+ pixel.a = transparency;
+ }
+ }
+ } else if (context.bit_depth == 1 || context.bit_depth == 2 || context.bit_depth == 4) {
+ auto pixels_per_byte = 8 / context.bit_depth;
+ auto mask = (1 << context.bit_depth) - 1;
+ for (int y = 0; y < context.height; ++y) {
+ auto* palette_indices = context.scanlines[y].data.data();
+ for (int i = 0; i < context.width; ++i) {
+ auto bit_offset = (8 - context.bit_depth) - (context.bit_depth * (i % pixels_per_byte));
+ auto palette_index = (palette_indices[i / pixels_per_byte] >> bit_offset) & mask;
+ auto& pixel = (Pixel&)context.bitmap->scanline(y)[i];
+ if ((size_t)palette_index >= context.palette_data.size())
+ return Error::from_string_literal("PNGImageDecoderPlugin: Palette index out of range");
+ auto& color = context.palette_data.at(palette_index);
+ auto transparency = context.palette_transparency_data.size() >= palette_index + 1u
+ ? context.palette_transparency_data.data()[palette_index]
+ : 0xff;
+ pixel.r = color.r;
+ pixel.g = color.g;
+ pixel.b = color.b;
+ pixel.a = transparency;
+ }
+ }
+ } else {
+ VERIFY_NOT_REACHED();
+ }
+ break;
+ default:
+ VERIFY_NOT_REACHED();
+ break;
+ }
+
+ // Swap r and b values:
+ for (int y = 0; y < context.height; ++y) {
+ auto* pixels = (Pixel*)context.bitmap->scanline(y);
+ for (int i = 0; i < context.bitmap->width(); ++i) {
+ auto& x = pixels[i];
+ swap(x.r, x.b);
+ }
+ }
+
+ return {};
+}
+
+static bool decode_png_header(PNGLoadingContext& context)
+{
+ if (context.state >= PNGLoadingContext::HeaderDecoded)
+ return true;
+
+ if (!context.data || context.data_size < sizeof(PNG::header)) {
+ dbgln_if(PNG_DEBUG, "Missing PNG header");
+ context.state = PNGLoadingContext::State::Error;
+ return false;
+ }
+
+ if (memcmp(context.data, PNG::header.span().data(), sizeof(PNG::header)) != 0) {
+ dbgln_if(PNG_DEBUG, "Invalid PNG header");
+ context.state = PNGLoadingContext::State::Error;
+ return false;
+ }
+
+ context.state = PNGLoadingContext::HeaderDecoded;
+ return true;
+}
+
+static bool decode_png_size(PNGLoadingContext& context)
+{
+ if (context.state >= PNGLoadingContext::SizeDecoded)
+ return true;
+
+ if (context.state < PNGLoadingContext::HeaderDecoded) {
+ if (!decode_png_header(context))
+ return false;
+ }
+
+ u8 const* data_ptr = context.data + sizeof(PNG::header);
+ size_t data_remaining = context.data_size - sizeof(PNG::header);
+
+ Streamer streamer(data_ptr, data_remaining);
+ while (!streamer.at_end()) {
+ if (!process_chunk(streamer, context)) {
+ context.state = PNGLoadingContext::State::Error;
+ return false;
+ }
+ if (context.width && context.height) {
+ context.state = PNGLoadingContext::State::SizeDecoded;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool decode_png_chunks(PNGLoadingContext& context)
+{
+ if (context.state >= PNGLoadingContext::State::ChunksDecoded)
+ return true;
+
+ if (context.state < PNGLoadingContext::HeaderDecoded) {
+ if (!decode_png_header(context))
+ return false;
+ }
+
+ u8 const* data_ptr = context.data + sizeof(PNG::header);
+ int data_remaining = context.data_size - sizeof(PNG::header);
+
+ context.compressed_data.ensure_capacity(context.data_size);
+
+ Streamer streamer(data_ptr, data_remaining);
+ while (!streamer.at_end()) {
+ if (!process_chunk(streamer, context)) {
+ // Ignore failed chunk and just consider chunk decoding being done.
+ // decode_png_bitmap() will check whether we got all required ones anyway.
+ break;
+ }
+ }
+
+ context.state = PNGLoadingContext::State::ChunksDecoded;
+ return true;
+}
+
+static ErrorOr<void> decode_png_bitmap_simple(PNGLoadingContext& context)
+{
+ Streamer streamer(context.decompression_buffer->data(), context.decompression_buffer->size());
+
+ for (int y = 0; y < context.height; ++y) {
+ PNG::FilterType filter;
+ if (!streamer.read(filter)) {
+ context.state = PNGLoadingContext::State::Error;
+ return Error::from_string_literal("PNGImageDecoderPlugin: Decoding failed");
+ }
+
+ if (to_underlying(filter) > 4) {
+ context.state = PNGLoadingContext::State::Error;
+ return Error::from_string_literal("PNGImageDecoderPlugin: Invalid PNG filter");
+ }
+
+ context.scanlines.append({ filter });
+ auto& scanline_buffer = context.scanlines.last().data;
+ auto row_size = context.compute_row_size_for_width(context.width);
+ if (row_size.has_overflow())
+ return Error::from_string_literal("PNGImageDecoderPlugin: Row size overflow");
+
+ if (!streamer.wrap_bytes(scanline_buffer, row_size.value())) {
+ context.state = PNGLoadingContext::State::Error;
+ return Error::from_string_literal("PNGImageDecoderPlugin: Decoding failed");
+ }
+ }
+
+ context.bitmap = TRY(Bitmap::create(context.has_alpha() ? BitmapFormat::BGRA8888 : BitmapFormat::BGRx8888, { context.width, context.height }));
+ return unfilter(context);
+}
+
+static int adam7_height(PNGLoadingContext& context, int pass)
+{
+ switch (pass) {
+ case 1:
+ return (context.height + 7) / 8;
+ case 2:
+ return (context.height + 7) / 8;
+ case 3:
+ return (context.height + 3) / 8;
+ case 4:
+ return (context.height + 3) / 4;
+ case 5:
+ return (context.height + 1) / 4;
+ case 6:
+ return (context.height + 1) / 2;
+ case 7:
+ return context.height / 2;
+ default:
+ VERIFY_NOT_REACHED();
+ }
+}
+
+static int adam7_width(PNGLoadingContext& context, int pass)
+{
+ switch (pass) {
+ case 1:
+ return (context.width + 7) / 8;
+ case 2:
+ return (context.width + 3) / 8;
+ case 3:
+ return (context.width + 3) / 4;
+ case 4:
+ return (context.width + 1) / 4;
+ case 5:
+ return (context.width + 1) / 2;
+ case 6:
+ return context.width / 2;
+ case 7:
+ return context.width;
+ default:
+ VERIFY_NOT_REACHED();
+ }
+}
+
+// Index 0 unused (non-interlaced case)
+static int adam7_starty[8] = { 0, 0, 0, 4, 0, 2, 0, 1 };
+static int adam7_startx[8] = { 0, 0, 4, 0, 2, 0, 1, 0 };
+static int adam7_stepy[8] = { 1, 8, 8, 8, 4, 4, 2, 2 };
+static int adam7_stepx[8] = { 1, 8, 8, 4, 4, 2, 2, 1 };
+
+static ErrorOr<void> decode_adam7_pass(PNGLoadingContext& context, Streamer& streamer, int pass)
+{
+ PNGLoadingContext subimage_context;
+ subimage_context.width = adam7_width(context, pass);
+ subimage_context.height = adam7_height(context, pass);
+ subimage_context.channels = context.channels;
+ subimage_context.color_type = context.color_type;
+ subimage_context.palette_data = context.palette_data;
+ subimage_context.palette_transparency_data = context.palette_transparency_data;
+ subimage_context.bit_depth = context.bit_depth;
+ subimage_context.filter_method = context.filter_method;
+
+ // For small images, some passes might be empty
+ if (!subimage_context.width || !subimage_context.height)
+ return {};
+
+ subimage_context.scanlines.clear_with_capacity();
+ for (int y = 0; y < subimage_context.height; ++y) {
+ PNG::FilterType filter;
+ if (!streamer.read(filter)) {
+ context.state = PNGLoadingContext::State::Error;
+ return Error::from_string_literal("PNGImageDecoderPlugin: Decoding failed");
+ }
+
+ if (to_underlying(filter) > 4) {
+ context.state = PNGLoadingContext::State::Error;
+ return Error::from_string_literal("PNGImageDecoderPlugin: Invalid PNG filter");
+ }
+
+ subimage_context.scanlines.append({ filter });
+ auto& scanline_buffer = subimage_context.scanlines.last().data;
+
+ auto row_size = context.compute_row_size_for_width(subimage_context.width);
+ if (row_size.has_overflow())
+ return Error::from_string_literal("PNGImageDecoderPlugin: Row size overflow");
+ if (!streamer.wrap_bytes(scanline_buffer, row_size.value())) {
+ context.state = PNGLoadingContext::State::Error;
+ return Error::from_string_literal("PNGImageDecoderPlugin: Decoding failed");
+ }
+ }
+
+ subimage_context.bitmap = TRY(Bitmap::create(context.bitmap->format(), { subimage_context.width, subimage_context.height }));
+ TRY(unfilter(subimage_context));
+
+ // Copy the subimage data into the main image according to the pass pattern
+ for (int y = 0, dy = adam7_starty[pass]; y < subimage_context.height && dy < context.height; ++y, dy += adam7_stepy[pass]) {
+ for (int x = 0, dx = adam7_startx[pass]; x < subimage_context.width && dy < context.width; ++x, dx += adam7_stepx[pass]) {
+ context.bitmap->set_pixel(dx, dy, subimage_context.bitmap->get_pixel(x, y));
+ }
+ }
+ return {};
+}
+
+static ErrorOr<void> decode_png_adam7(PNGLoadingContext& context)
+{
+ Streamer streamer(context.decompression_buffer->data(), context.decompression_buffer->size());
+ context.bitmap = TRY(Bitmap::create(context.has_alpha() ? BitmapFormat::BGRA8888 : BitmapFormat::BGRx8888, { context.width, context.height }));
+ for (int pass = 1; pass <= 7; ++pass)
+ TRY(decode_adam7_pass(context, streamer, pass));
+ return {};
+}
+
+static ErrorOr<void> decode_png_bitmap(PNGLoadingContext& context)
+{
+ if (context.state < PNGLoadingContext::State::ChunksDecoded) {
+ if (!decode_png_chunks(context))
+ return Error::from_string_literal("PNGImageDecoderPlugin: Decoding failed");
+ }
+
+ if (context.state >= PNGLoadingContext::State::BitmapDecoded)
+ return {};
+
+ if (context.width == -1 || context.height == -1)
+ return Error::from_string_literal("PNGImageDecoderPlugin: Didn't see an IHDR chunk.");
+
+ if (context.color_type == PNG::ColorType::IndexedColor && context.palette_data.is_empty())
+ return Error::from_string_literal("PNGImageDecoderPlugin: Didn't see a PLTE chunk for a palletized image, or it was empty.");
+
+ auto result = Compress::ZlibDecompressor::decompress_all(context.compressed_data.span());
+ if (!result.has_value()) {
+ context.state = PNGLoadingContext::State::Error;
+ return Error::from_string_literal("PNGImageDecoderPlugin: Decompression failed");
+ }
+ context.decompression_buffer = &result.value();
+ context.compressed_data.clear();
+
+ context.scanlines.ensure_capacity(context.height);
+ switch (context.interlace_method) {
+ case PngInterlaceMethod::Null:
+ TRY(decode_png_bitmap_simple(context));
+ break;
+ case PngInterlaceMethod::Adam7:
+ TRY(decode_png_adam7(context));
+ break;
+ default:
+ context.state = PNGLoadingContext::State::Error;
+ return Error::from_string_literal("PNGImageDecoderPlugin: Invalid interlace method");
+ }
+
+ context.decompression_buffer = nullptr;
+
+ context.state = PNGLoadingContext::State::BitmapDecoded;
+ return {};
+}
+
+static bool is_valid_compression_method(u8 compression_method)
+{
+ return compression_method == 0;
+}
+
+static bool is_valid_filter_method(u8 filter_method)
+{
+ return filter_method == 0;
+}
+
+static bool process_IHDR(ReadonlyBytes data, PNGLoadingContext& context)
+{
+ if (data.size() < (int)sizeof(PNG_IHDR))
+ return false;
+ auto& ihdr = *(const PNG_IHDR*)data.data();
+
+ if (ihdr.width > maximum_width_for_decoded_images || ihdr.height > maximum_height_for_decoded_images) {
+ dbgln("This PNG is too large for comfort: {}x{}", (u32)ihdr.width, (u32)ihdr.height);
+ return false;
+ }
+
+ if (!is_valid_compression_method(ihdr.compression_method)) {
+ dbgln("PNG has invalid compression method {}", ihdr.compression_method);
+ return false;
+ }
+
+ if (!is_valid_filter_method(ihdr.filter_method)) {
+ dbgln("PNG has invalid filter method {}", ihdr.filter_method);
+ return false;
+ }
+
+ context.width = ihdr.width;
+ context.height = ihdr.height;
+ context.bit_depth = ihdr.bit_depth;
+ context.color_type = ihdr.color_type;
+ context.compression_method = ihdr.compression_method;
+ context.filter_method = ihdr.filter_method;
+ context.interlace_method = ihdr.interlace_method;
+
+ dbgln_if(PNG_DEBUG, "PNG: {}x{} ({} bpp)", context.width, context.height, context.bit_depth);
+ dbgln_if(PNG_DEBUG, " Color type: {}", to_underlying(context.color_type));
+ dbgln_if(PNG_DEBUG, "Compress Method: {}", context.compression_method);
+ dbgln_if(PNG_DEBUG, " Filter Method: {}", context.filter_method);
+ dbgln_if(PNG_DEBUG, " Interlace type: {}", context.interlace_method);
+
+ if (context.interlace_method != PngInterlaceMethod::Null && context.interlace_method != PngInterlaceMethod::Adam7) {
+ dbgln_if(PNG_DEBUG, "PNGLoader::process_IHDR: unknown interlace method: {}", context.interlace_method);
+ return false;
+ }
+
+ switch (context.color_type) {
+ case PNG::ColorType::Greyscale:
+ if (context.bit_depth != 1 && context.bit_depth != 2 && context.bit_depth != 4 && context.bit_depth != 8 && context.bit_depth != 16)
+ return false;
+ context.channels = 1;
+ break;
+ case PNG::ColorType::GreyscaleWithAlpha:
+ if (context.bit_depth != 8 && context.bit_depth != 16)
+ return false;
+ context.channels = 2;
+ break;
+ case PNG::ColorType::Truecolor:
+ if (context.bit_depth != 8 && context.bit_depth != 16)
+ return false;
+ context.channels = 3;
+ break;
+ case PNG::ColorType::IndexedColor:
+ if (context.bit_depth != 1 && context.bit_depth != 2 && context.bit_depth != 4 && context.bit_depth != 8)
+ return false;
+ context.channels = 1;
+ break;
+ case PNG::ColorType::TruecolorWithAlpha:
+ if (context.bit_depth != 8 && context.bit_depth != 16)
+ return false;
+ context.channels = 4;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+static bool process_IDAT(ReadonlyBytes data, PNGLoadingContext& context)
+{
+ context.compressed_data.append(data.data(), data.size());
+ return true;
+}
+
+static bool process_PLTE(ReadonlyBytes data, PNGLoadingContext& context)
+{
+ context.palette_data.append((PaletteEntry const*)data.data(), data.size() / 3);
+ return true;
+}
+
+static bool process_tRNS(ReadonlyBytes data, PNGLoadingContext& context)
+{
+ switch (context.color_type) {
+ case PNG::ColorType::Greyscale:
+ case PNG::ColorType::Truecolor:
+ case PNG::ColorType::IndexedColor:
+ context.palette_transparency_data.append(data.data(), data.size());
+ break;
+ default:
+ break;
+ }
+ return true;
+}
+
+static bool process_cHRM(ReadonlyBytes data, PNGLoadingContext& context)
+{
+ // https://www.w3.org/TR/png/#11cHRM
+ if (data.size() != 32)
+ return false;
+ context.chromaticities_and_whitepoint = *bit_cast<ChromaticitiesAndWhitepoint* const>(data.data());
+ return true;
+}
+
+static bool process_cICP(ReadonlyBytes data, PNGLoadingContext& context)
+{
+ // https://www.w3.org/TR/png/#cICP-chunk
+ if (data.size() != 4)
+ return false;
+ context.coding_independent_code_points = *bit_cast<CodingIndependentCodePoints* const>(data.data());
+ return true;
+}
+
+static bool process_iCCP(ReadonlyBytes data, PNGLoadingContext& context)
+{
+ // https://www.w3.org/TR/png/#11iCCP
+ size_t profile_name_length_max = min(80u, data.size());
+ size_t profile_name_length = strnlen((char const*)data.data(), profile_name_length_max);
+ if (profile_name_length == 0 || profile_name_length == profile_name_length_max)
+ return false;
+
+ if (data.size() < profile_name_length + 2)
+ return false;
+
+ u8 compression_method = data[profile_name_length + 1];
+ if (compression_method != 0)
+ return false;
+
+ context.embedded_icc_profile = EmbeddedICCProfile { { data.data(), profile_name_length }, data.slice(profile_name_length + 2) };
+
+ return true;
+}
+
+static bool process_gAMA(ReadonlyBytes data, PNGLoadingContext& context)
+{
+ // https://www.w3.org/TR/png/#11gAMA
+ if (data.size() != 4)
+ return false;
+
+ u32 gamma = *bit_cast<NetworkOrdered<u32> const*>(data.data());
+ if (gamma & 0x8000'0000)
+ return false;
+ context.gamma = gamma;
+
+ return true;
+}
+
+static bool process_sRGB(ReadonlyBytes data, PNGLoadingContext& context)
+{
+ // https://www.w3.org/TR/png/#srgb-standard-colour-space
+ if (data.size() != 1)
+ return false;
+
+ u8 rendering_intent = data[0];
+ if (rendering_intent > 3)
+ return false;
+
+ context.sRGB_rendering_intent = (RenderingIntent)rendering_intent;
+
+ return true;
+}
+
+static bool process_chunk(Streamer& streamer, PNGLoadingContext& context)
+{
+ u32 chunk_size;
+ if (!streamer.read(chunk_size)) {
+ dbgln_if(PNG_DEBUG, "Bail at chunk_size");
+ return false;
+ }
+ u8 chunk_type[5];
+ chunk_type[4] = '\0';
+ if (!streamer.read_bytes(chunk_type, 4)) {
+ dbgln_if(PNG_DEBUG, "Bail at chunk_type");
+ return false;
+ }
+ ReadonlyBytes chunk_data;
+ if (!streamer.wrap_bytes(chunk_data, chunk_size)) {
+ dbgln_if(PNG_DEBUG, "Bail at chunk_data");
+ return false;
+ }
+ u32 chunk_crc;
+ if (!streamer.read(chunk_crc)) {
+ dbgln_if(PNG_DEBUG, "Bail at chunk_crc");
+ return false;
+ }
+ dbgln_if(PNG_DEBUG, "Chunk type: '{}', size: {}, crc: {:x}", chunk_type, chunk_size, chunk_crc);
+
+ if (!strcmp((char const*)chunk_type, "IHDR"))
+ return process_IHDR(chunk_data, context);
+ if (!strcmp((char const*)chunk_type, "IDAT"))
+ return process_IDAT(chunk_data, context);
+ if (!strcmp((char const*)chunk_type, "PLTE"))
+ return process_PLTE(chunk_data, context);
+ if (!strcmp((char const*)chunk_type, "cHRM"))
+ return process_cHRM(chunk_data, context);
+ if (!strcmp((char const*)chunk_type, "cICP"))
+ return process_cICP(chunk_data, context);
+ if (!strcmp((char const*)chunk_type, "iCCP"))
+ return process_iCCP(chunk_data, context);
+ if (!strcmp((char const*)chunk_type, "gAMA"))
+ return process_gAMA(chunk_data, context);
+ if (!strcmp((char const*)chunk_type, "sRGB"))
+ return process_sRGB(chunk_data, context);
+ if (!strcmp((char const*)chunk_type, "tRNS"))
+ return process_tRNS(chunk_data, context);
+ return true;
+}
+
+PNGImageDecoderPlugin::PNGImageDecoderPlugin(u8 const* data, size_t size)
+{
+ m_context = make<PNGLoadingContext>();
+ m_context->data = data;
+ m_context->data_size = size;
+}
+
+PNGImageDecoderPlugin::~PNGImageDecoderPlugin() = default;
+
+IntSize PNGImageDecoderPlugin::size()
+{
+ if (m_context->state == PNGLoadingContext::State::Error)
+ return {};
+
+ if (m_context->state < PNGLoadingContext::State::SizeDecoded) {
+ bool success = decode_png_size(*m_context);
+ if (!success)
+ return {};
+ }
+
+ return { m_context->width, m_context->height };
+}
+
+void PNGImageDecoderPlugin::set_volatile()
+{
+ if (m_context->bitmap)
+ m_context->bitmap->set_volatile();
+}
+
+bool PNGImageDecoderPlugin::set_nonvolatile(bool& was_purged)
+{
+ if (!m_context->bitmap)
+ return false;
+ return m_context->bitmap->set_nonvolatile(was_purged);
+}
+
+bool PNGImageDecoderPlugin::initialize()
+{
+ return decode_png_header(*m_context);
+}
+
+bool PNGImageDecoderPlugin::sniff(ReadonlyBytes data)
+{
+ PNGLoadingContext context;
+ context.data = data.data();
+ context.data_size = data.size();
+ return decode_png_header(context);
+}
+
+ErrorOr<NonnullOwnPtr<ImageDecoderPlugin>> PNGImageDecoderPlugin::create(ReadonlyBytes data)
+{
+ return adopt_nonnull_own_or_enomem(new (nothrow) PNGImageDecoderPlugin(data.data(), data.size()));
+}
+
+bool PNGImageDecoderPlugin::is_animated()
+{
+ return false;
+}
+
+size_t PNGImageDecoderPlugin::loop_count()
+{
+ return 0;
+}
+
+size_t PNGImageDecoderPlugin::frame_count()
+{
+ return 1;
+}
+
+ErrorOr<ImageFrameDescriptor> PNGImageDecoderPlugin::frame(size_t index)
+{
+ if (index > 0)
+ return Error::from_string_literal("PNGImageDecoderPlugin: Invalid frame index");
+
+ if (m_context->state == PNGLoadingContext::State::Error)
+ return Error::from_string_literal("PNGImageDecoderPlugin: Decoding failed");
+
+ if (m_context->state < PNGLoadingContext::State::BitmapDecoded) {
+ // NOTE: This forces the chunk decoding to happen.
+ TRY(decode_png_bitmap(*m_context));
+ }
+
+ VERIFY(m_context->bitmap);
+ return ImageFrameDescriptor { m_context->bitmap, 0 };
+}
+
+ErrorOr<Optional<ReadonlyBytes>> PNGImageDecoderPlugin::icc_data()
+{
+ if (!decode_png_chunks(*m_context))
+ return Error::from_string_literal("PNGImageDecoderPlugin: Decoding chunks failed");
+
+ if (m_context->embedded_icc_profile.has_value()) {
+ if (!m_context->decompressed_icc_profile.has_value()) {
+ auto result = Compress::ZlibDecompressor::decompress_all(m_context->embedded_icc_profile->compressed_data);
+ if (!result.has_value()) {
+ m_context->embedded_icc_profile.clear();
+ return Error::from_string_literal("PNGImageDecoderPlugin: Decompression of ICC profile failed");
+ }
+ m_context->decompressed_icc_profile = move(*result);
+ }
+
+ return m_context->decompressed_icc_profile.value();
+ }
+
+ // FIXME: Eventually, look at coding_independent_code_points, chromaticities_and_whitepoint, gamma, sRGB_rendering_intent too.
+ // The order is:
+ // 1. Use coding_independent_code_points if it exists, ignore the rest.
+ // 2. Use embedded_icc_profile if it exists, ignore the rest.
+ // 3. Use sRGB_rendering_intent if it exists, ignore the rest.
+ // 4. Use gamma to adjust gamma and chromaticities_and_whitepoint to adjust color.
+ // (Order between 2 and 3 isn't fully clear, but "It is recommended that the sRGB and iCCP chunks do not appear simultaneously in a PNG datastream."
+
+ return OptionalNone {};
+}
+
+}
diff --git a/Userland/Libraries/LibGfx/ImageFormats/PNGLoader.h b/Userland/Libraries/LibGfx/ImageFormats/PNGLoader.h
new file mode 100644
index 0000000000..b8f7b6097f
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/PNGLoader.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#pragma once
+
+#include <LibGfx/ImageFormats/ImageDecoder.h>
+
+namespace Gfx {
+
+struct PNGLoadingContext;
+
+class PNGImageDecoderPlugin final : public ImageDecoderPlugin {
+public:
+ static bool sniff(ReadonlyBytes);
+ static ErrorOr<NonnullOwnPtr<ImageDecoderPlugin>> create(ReadonlyBytes);
+
+ virtual ~PNGImageDecoderPlugin() override;
+
+ virtual IntSize size() override;
+ virtual void set_volatile() override;
+ [[nodiscard]] virtual bool set_nonvolatile(bool& was_purged) override;
+ virtual bool initialize() override;
+ virtual bool is_animated() override;
+ virtual size_t loop_count() override;
+ virtual size_t frame_count() override;
+ virtual ErrorOr<ImageFrameDescriptor> frame(size_t index) override;
+ virtual ErrorOr<Optional<ReadonlyBytes>> icc_data() override;
+
+private:
+ PNGImageDecoderPlugin(u8 const*, size_t);
+
+ OwnPtr<PNGLoadingContext> m_context;
+};
+
+}
diff --git a/Userland/Libraries/LibGfx/ImageFormats/PNGShared.h b/Userland/Libraries/LibGfx/ImageFormats/PNGShared.h
new file mode 100644
index 0000000000..299fceb29e
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/PNGShared.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2022, the SerenityOS developers.
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#pragma once
+
+#include <AK/Array.h>
+#include <AK/SIMD.h>
+
+namespace Gfx::PNG {
+
+// https://www.w3.org/TR/PNG/#5PNG-file-signature
+static constexpr Array<u8, 8> header = { 0x89, 'P', 'N', 'G', 13, 10, 26, 10 };
+
+// https://www.w3.org/TR/PNG/#6Colour-values
+enum class ColorType : u8 {
+ Greyscale = 0,
+ Truecolor = 2, // RGB
+ IndexedColor = 3,
+ GreyscaleWithAlpha = 4,
+ TruecolorWithAlpha = 6,
+};
+
+// https://www.w3.org/TR/PNG/#9Filter-types
+enum class FilterType : u8 {
+ None,
+ Sub,
+ Up,
+ Average,
+ Paeth,
+};
+
+// https://www.w3.org/TR/PNG/#9Filter-type-4-Paeth
+ALWAYS_INLINE u8 paeth_predictor(u8 a, u8 b, u8 c)
+{
+ int p = a + b - c;
+ int pa = AK::abs(p - a);
+ int pb = AK::abs(p - b);
+ int pc = AK::abs(p - c);
+ if (pa <= pb && pa <= pc)
+ return a;
+ if (pb <= pc)
+ return b;
+ return c;
+}
+
+ALWAYS_INLINE AK::SIMD::u8x4 paeth_predictor(AK::SIMD::u8x4 a, AK::SIMD::u8x4 b, AK::SIMD::u8x4 c)
+{
+ return AK::SIMD::u8x4 {
+ paeth_predictor(a[0], b[0], c[0]),
+ paeth_predictor(a[1], b[1], c[1]),
+ paeth_predictor(a[2], b[2], c[2]),
+ paeth_predictor(a[3], b[3], c[3]),
+ };
+}
+
+};
diff --git a/Userland/Libraries/LibGfx/ImageFormats/PNGWriter.cpp b/Userland/Libraries/LibGfx/ImageFormats/PNGWriter.cpp
new file mode 100644
index 0000000000..ad1c7afa51
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/PNGWriter.cpp
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2021, Pierre Hoffmeister
+ * Copyright (c) 2021, Andreas Kling <kling@serenityos.org>
+ * Copyright (c) 2021, Aziz Berkay Yesilyurt <abyesilyurt@gmail.com>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <AK/Concepts.h>
+#include <AK/FixedArray.h>
+#include <AK/SIMDExtras.h>
+#include <AK/String.h>
+#include <LibCompress/Zlib.h>
+#include <LibCrypto/Checksum/CRC32.h>
+#include <LibGfx/Bitmap.h>
+#include <LibGfx/ImageFormats/PNGWriter.h>
+
+#pragma GCC diagnostic ignored "-Wpsabi"
+
+namespace Gfx {
+
+class PNGChunk {
+ using data_length_type = u32;
+
+public:
+ explicit PNGChunk(String);
+ auto const& data() const { return m_data; };
+ String const& type() const { return m_type; };
+ ErrorOr<void> reserve(size_t bytes) { return m_data.try_ensure_capacity(bytes); }
+
+ template<typename T>
+ ErrorOr<void> add_as_big_endian(T);
+
+ ErrorOr<void> add_u8(u8);
+
+ ErrorOr<void> compress_and_add(ReadonlyBytes);
+ ErrorOr<void> add(ReadonlyBytes);
+
+ ErrorOr<void> store_type();
+ void store_data_length();
+ u32 crc();
+
+private:
+ ByteBuffer m_data;
+ String m_type;
+};
+
+PNGChunk::PNGChunk(String type)
+ : m_type(move(type))
+{
+ VERIFY(m_type.bytes().size() == 4);
+
+ // NOTE: These are MUST() because they should always be able to fit in m_data's inline capacity.
+ MUST(add_as_big_endian<data_length_type>(0));
+ MUST(store_type());
+}
+
+ErrorOr<void> PNGChunk::store_type()
+{
+ TRY(add(type().bytes()));
+ return {};
+}
+
+void PNGChunk::store_data_length()
+{
+ auto data_length = BigEndian<u32>(m_data.size() - sizeof(data_length_type) - m_type.bytes().size());
+ __builtin_memcpy(m_data.offset_pointer(0), &data_length, sizeof(u32));
+}
+
+u32 PNGChunk::crc()
+{
+ u32 crc = Crypto::Checksum::CRC32({ m_data.offset_pointer(sizeof(data_length_type)), m_data.size() - sizeof(data_length_type) }).digest();
+ return crc;
+}
+
+ErrorOr<void> PNGChunk::compress_and_add(ReadonlyBytes uncompressed_bytes)
+{
+ return add(TRY(Compress::ZlibCompressor::compress_all(uncompressed_bytes, Compress::ZlibCompressionLevel::Best)));
+}
+
+ErrorOr<void> PNGChunk::add(ReadonlyBytes bytes)
+{
+ TRY(m_data.try_append(bytes));
+ return {};
+}
+
+template<typename T>
+ErrorOr<void> PNGChunk::add_as_big_endian(T data)
+{
+ auto data_out = AK::convert_between_host_and_big_endian(data);
+ TRY(m_data.try_append(&data_out, sizeof(T)));
+ return {};
+}
+
+ErrorOr<void> PNGChunk::add_u8(u8 data)
+{
+ TRY(m_data.try_append(data));
+ return {};
+}
+
+ErrorOr<void> PNGWriter::add_chunk(PNGChunk& png_chunk)
+{
+ png_chunk.store_data_length();
+ u32 crc = png_chunk.crc();
+ TRY(png_chunk.add_as_big_endian(crc));
+ TRY(m_data.try_append(png_chunk.data().data(), png_chunk.data().size()));
+ return {};
+}
+
+ErrorOr<void> PNGWriter::add_png_header()
+{
+ TRY(m_data.try_append(PNG::header.data(), PNG::header.size()));
+ return {};
+}
+
+ErrorOr<void> PNGWriter::add_IHDR_chunk(u32 width, u32 height, u8 bit_depth, PNG::ColorType color_type, u8 compression_method, u8 filter_method, u8 interlace_method)
+{
+ PNGChunk png_chunk { "IHDR"_short_string };
+ TRY(png_chunk.add_as_big_endian(width));
+ TRY(png_chunk.add_as_big_endian(height));
+ TRY(png_chunk.add_u8(bit_depth));
+ TRY(png_chunk.add_u8(to_underlying(color_type)));
+ TRY(png_chunk.add_u8(compression_method));
+ TRY(png_chunk.add_u8(filter_method));
+ TRY(png_chunk.add_u8(interlace_method));
+ TRY(add_chunk(png_chunk));
+ return {};
+}
+
+ErrorOr<void> PNGWriter::add_iCCP_chunk(ReadonlyBytes icc_data)
+{
+ // https://www.w3.org/TR/png/#11iCCP
+ PNGChunk chunk { "iCCP"_short_string };
+
+ TRY(chunk.add("embedded profile"sv.bytes()));
+ TRY(chunk.add_u8(0)); // \0-terminate profile name
+
+ TRY(chunk.add_u8(0)); // compression method deflate
+ TRY(chunk.compress_and_add(icc_data));
+
+ TRY(add_chunk(chunk));
+ return {};
+}
+
+ErrorOr<void> PNGWriter::add_IEND_chunk()
+{
+ PNGChunk png_chunk { "IEND"_short_string };
+ TRY(add_chunk(png_chunk));
+ return {};
+}
+
+union [[gnu::packed]] Pixel {
+ ARGB32 rgba { 0 };
+ struct {
+ u8 red;
+ u8 green;
+ u8 blue;
+ u8 alpha;
+ };
+ AK::SIMD::u8x4 simd;
+
+ ALWAYS_INLINE static AK::SIMD::u8x4 gfx_to_png(Pixel pixel)
+ {
+ swap(pixel.red, pixel.blue);
+ return pixel.simd;
+ }
+};
+static_assert(AssertSize<Pixel, 4>());
+
+ErrorOr<void> PNGWriter::add_IDAT_chunk(Gfx::Bitmap const& bitmap)
+{
+ PNGChunk png_chunk { "IDAT"_short_string };
+ TRY(png_chunk.reserve(bitmap.size_in_bytes()));
+
+ ByteBuffer uncompressed_block_data;
+ TRY(uncompressed_block_data.try_ensure_capacity(bitmap.size_in_bytes() + bitmap.height()));
+
+ auto dummy_scanline = TRY(FixedArray<Pixel>::create(bitmap.width()));
+ auto const* scanline_minus_1 = dummy_scanline.data();
+
+ for (int y = 0; y < bitmap.height(); ++y) {
+ auto* scanline = reinterpret_cast<Pixel const*>(bitmap.scanline(y));
+
+ struct Filter {
+ PNG::FilterType type;
+ ByteBuffer buffer {};
+ int sum = 0;
+
+ ErrorOr<void> append(u8 byte)
+ {
+ TRY(buffer.try_append(byte));
+ sum += static_cast<i8>(byte);
+ return {};
+ }
+
+ ErrorOr<void> append(AK::SIMD::u8x4 simd)
+ {
+ TRY(append(simd[0]));
+ TRY(append(simd[1]));
+ TRY(append(simd[2]));
+ TRY(append(simd[3]));
+ return {};
+ }
+ };
+
+ Filter none_filter { .type = PNG::FilterType::None };
+ TRY(none_filter.buffer.try_ensure_capacity(sizeof(Pixel) * bitmap.height()));
+
+ Filter sub_filter { .type = PNG::FilterType::Sub };
+ TRY(sub_filter.buffer.try_ensure_capacity(sizeof(Pixel) * bitmap.height()));
+
+ Filter up_filter { .type = PNG::FilterType::Up };
+ TRY(up_filter.buffer.try_ensure_capacity(sizeof(Pixel) * bitmap.height()));
+
+ Filter average_filter { .type = PNG::FilterType::Average };
+ TRY(average_filter.buffer.try_ensure_capacity(sizeof(ARGB32) * bitmap.height()));
+
+ Filter paeth_filter { .type = PNG::FilterType::Paeth };
+ TRY(paeth_filter.buffer.try_ensure_capacity(sizeof(ARGB32) * bitmap.height()));
+
+ auto pixel_x_minus_1 = Pixel::gfx_to_png(dummy_scanline[0]);
+ auto pixel_xy_minus_1 = Pixel::gfx_to_png(dummy_scanline[0]);
+
+ for (int x = 0; x < bitmap.width(); ++x) {
+ auto pixel = Pixel::gfx_to_png(scanline[x]);
+ auto pixel_y_minus_1 = Pixel::gfx_to_png(scanline_minus_1[x]);
+
+ TRY(none_filter.append(pixel));
+
+ TRY(sub_filter.append(pixel - pixel_x_minus_1));
+
+ TRY(up_filter.append(pixel - pixel_y_minus_1));
+
+ // The sum Orig(a) + Orig(b) shall be performed without overflow (using at least nine-bit arithmetic).
+ auto sum = AK::SIMD::to_u16x4(pixel_x_minus_1) + AK::SIMD::to_u16x4(pixel_y_minus_1);
+ auto average = AK::SIMD::to_u8x4(sum / 2);
+ TRY(average_filter.append(pixel - average));
+
+ TRY(paeth_filter.append(pixel - PNG::paeth_predictor(pixel_x_minus_1, pixel_y_minus_1, pixel_xy_minus_1)));
+
+ pixel_x_minus_1 = pixel;
+ pixel_xy_minus_1 = pixel_y_minus_1;
+ }
+
+ scanline_minus_1 = scanline;
+
+ // 12.8 Filter selection: https://www.w3.org/TR/PNG/#12Filter-selection
+ // For best compression of truecolour and greyscale images, the recommended approach
+ // is adaptive filtering in which a filter is chosen for each scanline.
+ // The following simple heuristic has performed well in early tests:
+ // compute the output scanline using all five filters, and select the filter that gives the smallest sum of absolute values of outputs.
+ // (Consider the output bytes as signed differences for this test.)
+ Filter& best_filter = none_filter;
+ if (abs(best_filter.sum) > abs(sub_filter.sum))
+ best_filter = sub_filter;
+ if (abs(best_filter.sum) > abs(up_filter.sum))
+ best_filter = up_filter;
+ if (abs(best_filter.sum) > abs(average_filter.sum))
+ best_filter = average_filter;
+ if (abs(best_filter.sum) > abs(paeth_filter.sum))
+ best_filter = paeth_filter;
+
+ TRY(uncompressed_block_data.try_append(to_underlying(best_filter.type)));
+ TRY(uncompressed_block_data.try_append(best_filter.buffer));
+ }
+
+ TRY(png_chunk.compress_and_add(uncompressed_block_data));
+ TRY(add_chunk(png_chunk));
+ return {};
+}
+
+ErrorOr<ByteBuffer> PNGWriter::encode(Gfx::Bitmap const& bitmap, Options options)
+{
+ PNGWriter writer;
+ TRY(writer.add_png_header());
+ TRY(writer.add_IHDR_chunk(bitmap.width(), bitmap.height(), 8, PNG::ColorType::TruecolorWithAlpha, 0, 0, 0));
+ if (options.icc_data.has_value())
+ TRY(writer.add_iCCP_chunk(options.icc_data.value()));
+ TRY(writer.add_IDAT_chunk(bitmap));
+ TRY(writer.add_IEND_chunk());
+ return ByteBuffer::copy(writer.m_data);
+}
+
+}
diff --git a/Userland/Libraries/LibGfx/ImageFormats/PNGWriter.h b/Userland/Libraries/LibGfx/ImageFormats/PNGWriter.h
new file mode 100644
index 0000000000..0891261ed2
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/PNGWriter.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2021, Pierre Hoffmeister
+ * Copyright (c) 2021, Andreas Kling <kling@serenityos.org>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#pragma once
+
+#include <AK/Optional.h>
+#include <AK/Vector.h>
+#include <LibGfx/Forward.h>
+#include <LibGfx/ImageFormats/PNGShared.h>
+
+namespace Gfx {
+
+class PNGChunk;
+
+// This is not a nested struct to work around https://llvm.org/PR36684
+struct PNGWriterOptions {
+ // Data for the iCCP chunk.
+ // FIXME: Allow writing cICP, sRGB, or gAMA instead too.
+ Optional<ReadonlyBytes> icc_data;
+};
+
+class PNGWriter {
+public:
+ using Options = PNGWriterOptions;
+
+ static ErrorOr<ByteBuffer> encode(Gfx::Bitmap const&, Options options = Options {});
+
+private:
+ PNGWriter() = default;
+
+ Vector<u8> m_data;
+ ErrorOr<void> add_chunk(PNGChunk&);
+ ErrorOr<void> add_png_header();
+ ErrorOr<void> add_IHDR_chunk(u32 width, u32 height, u8 bit_depth, PNG::ColorType color_type, u8 compression_method, u8 filter_method, u8 interlace_method);
+ ErrorOr<void> add_iCCP_chunk(ReadonlyBytes icc_data);
+ ErrorOr<void> add_IDAT_chunk(Gfx::Bitmap const&);
+ ErrorOr<void> add_IEND_chunk();
+};
+
+}
diff --git a/Userland/Libraries/LibGfx/ImageFormats/PPMLoader.cpp b/Userland/Libraries/LibGfx/ImageFormats/PPMLoader.cpp
new file mode 100644
index 0000000000..c662e76b87
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/PPMLoader.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2020, Hüseyin ASLITÜRK <asliturk@hotmail.com>
+ * Copyright (c) 2022, the SerenityOS developers.
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include "PPMLoader.h"
+#include "PortableImageLoaderCommon.h"
+#include <AK/Endian.h>
+#include <AK/LexicalPath.h>
+#include <AK/ScopeGuard.h>
+#include <AK/StringBuilder.h>
+#include <LibGfx/Streamer.h>
+#include <string.h>
+
+namespace Gfx {
+
+bool read_image_data(PPMLoadingContext& context, Streamer& streamer)
+{
+ Vector<Gfx::Color> color_data;
+ color_data.ensure_capacity(context.width * context.height);
+
+ if (context.type == PPMLoadingContext::Type::ASCII) {
+ u16 red;
+ u16 green;
+ u16 blue;
+
+ while (true) {
+ if (!read_number(streamer, &red))
+ break;
+
+ if (!read_whitespace(context, streamer))
+ break;
+
+ if (!read_number(streamer, &green))
+ break;
+
+ if (!read_whitespace(context, streamer))
+ break;
+
+ if (!read_number(streamer, &blue))
+ break;
+
+ if (!read_whitespace(context, streamer))
+ break;
+
+ Color color { (u8)red, (u8)green, (u8)blue };
+ if (context.format_details.max_val < 255)
+ color = adjust_color(context.format_details.max_val, color);
+ color_data.append(color);
+ }
+ } else if (context.type == PPMLoadingContext::Type::RAWBITS) {
+ u8 pixel[3];
+ while (streamer.read_bytes(pixel, 3)) {
+ color_data.append({ pixel[0], pixel[1], pixel[2] });
+ }
+ }
+
+ if (context.width * context.height != color_data.size())
+ return false;
+
+ if (!create_bitmap(context)) {
+ return false;
+ }
+
+ set_pixels(context, color_data);
+
+ context.state = PPMLoadingContext::State::Bitmap;
+ return true;
+}
+}
diff --git a/Userland/Libraries/LibGfx/ImageFormats/PPMLoader.h b/Userland/Libraries/LibGfx/ImageFormats/PPMLoader.h
new file mode 100644
index 0000000000..527bddbeae
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/PPMLoader.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2020, Hüseyin ASLITÜRK <asliturk@hotmail.com>
+ * Copyright (c) 2022, the SerenityOS developers.
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#pragma once
+
+#include <AK/StringView.h>
+#include <LibGfx/ImageFormats/ImageDecoder.h>
+#include <LibGfx/ImageFormats/PortableImageMapLoader.h>
+
+namespace Gfx {
+
+struct PPM {
+ static constexpr auto ascii_magic_number = '3';
+ static constexpr auto binary_magic_number = '6';
+ static constexpr StringView image_type = "PPM"sv;
+ u16 max_val { 0 };
+};
+
+using PPMLoadingContext = PortableImageMapLoadingContext<PPM>;
+using PPMImageDecoderPlugin = PortableImageDecoderPlugin<PPMLoadingContext>;
+
+bool read_image_data(PPMLoadingContext& context, Streamer& streamer);
+}
diff --git a/Userland/Libraries/LibGfx/ImageFormats/PortableFormatWriter.cpp b/Userland/Libraries/LibGfx/ImageFormats/PortableFormatWriter.cpp
new file mode 100644
index 0000000000..56bf8b0bd6
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/PortableFormatWriter.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2023, Lucas Chollet <lucas.chollet@serenityos.org >
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include "PortableFormatWriter.h"
+#include <AK/String.h>
+
+namespace Gfx {
+
+ErrorOr<ByteBuffer> PortableFormatWriter::encode(Bitmap const& bitmap, Options options)
+{
+ ByteBuffer buffer;
+
+ // FIXME: Add support for PBM and PGM
+
+ TRY(add_header(buffer, options, bitmap.width(), bitmap.height(), 255));
+ TRY(add_pixels(buffer, options, bitmap));
+
+ return buffer;
+}
+
+ErrorOr<void> PortableFormatWriter::add_header(ByteBuffer& buffer, Options const& options, u32 width, u32 height, u32 maximal_value)
+{
+ TRY(buffer.try_append(TRY(String::formatted("P{}\n", options.format == Options::Format::ASCII ? "3"sv : "6"sv)).bytes()));
+ TRY(buffer.try_append(TRY(String::formatted("# {}\n", options.comment)).bytes()));
+ TRY(buffer.try_append(TRY(String::formatted("{} {}\n", width, height)).bytes()));
+ TRY(buffer.try_append(TRY(String::formatted("{}\n", maximal_value)).bytes()));
+
+ return {};
+}
+
+ErrorOr<void> PortableFormatWriter::add_pixels(ByteBuffer& buffer, Options const& options, Bitmap const& bitmap)
+{
+ for (int i = 0; i < bitmap.height(); ++i) {
+ for (int j = 0; j < bitmap.width(); ++j) {
+ auto color = bitmap.get_pixel(j, i);
+ if (options.format == Options::Format::ASCII) {
+ TRY(buffer.try_append(TRY(String::formatted("{} {} {}\t", color.red(), color.green(), color.blue())).bytes()));
+ } else {
+ TRY(buffer.try_append(color.red()));
+ TRY(buffer.try_append(color.green()));
+ TRY(buffer.try_append(color.blue()));
+ }
+ }
+ if (options.format == Options::Format::ASCII)
+ TRY(buffer.try_append('\n'));
+ }
+
+ return {};
+}
+
+}
diff --git a/Userland/Libraries/LibGfx/ImageFormats/PortableFormatWriter.h b/Userland/Libraries/LibGfx/ImageFormats/PortableFormatWriter.h
new file mode 100644
index 0000000000..30da31a703
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/PortableFormatWriter.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2023, Lucas Chollet <lucas.chollet@serenityos.org>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#pragma once
+
+#include <AK/ByteBuffer.h>
+#include <LibGfx/Bitmap.h>
+
+namespace Gfx {
+
+// This is not a nested struct to work around https://llvm.org/PR36684
+struct PortableFormatWriterOptions {
+ enum class Format {
+ ASCII,
+ Raw,
+ };
+
+ Format format = Format::Raw;
+ StringView comment = "Generated with SerenityOS - LibGfx."sv;
+};
+
+class PortableFormatWriter {
+public:
+ using Options = PortableFormatWriterOptions;
+
+ static ErrorOr<ByteBuffer> encode(Bitmap const&, Options options = Options {});
+
+private:
+ PortableFormatWriter() = delete;
+
+ static ErrorOr<void> add_header(ByteBuffer&, Options const& options, u32 width, u32 height, u32 max_value);
+ static ErrorOr<void> add_pixels(ByteBuffer&, Options const& options, Bitmap const&);
+};
+
+}
diff --git a/Userland/Libraries/LibGfx/ImageFormats/PortableImageLoaderCommon.h b/Userland/Libraries/LibGfx/ImageFormats/PortableImageLoaderCommon.h
new file mode 100644
index 0000000000..a17b6c1921
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/PortableImageLoaderCommon.h
@@ -0,0 +1,272 @@
+/*
+ * Copyright (c) 2020, Hüseyin Aslıtürk <asliturk@hotmail.com>
+ * Copyright (c) 2020-2022, the SerenityOS developers.
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#pragma once
+
+#include <AK/Debug.h>
+#include <AK/DeprecatedString.h>
+#include <AK/Endian.h>
+#include <AK/ScopeGuard.h>
+#include <AK/StringBuilder.h>
+#include <AK/Types.h>
+#include <AK/Vector.h>
+#include <LibGfx/Bitmap.h>
+#include <LibGfx/Color.h>
+#include <LibGfx/ImageFormats/ImageDecoder.h>
+#include <LibGfx/Streamer.h>
+
+namespace Gfx {
+
+static constexpr Color adjust_color(u16 max_val, Color color)
+{
+ color.set_red((color.red() * 255) / max_val);
+ color.set_green((color.green() * 255) / max_val);
+ color.set_blue((color.blue() * 255) / max_val);
+
+ return color;
+}
+
+template<typename TValue>
+static bool read_number(Streamer& streamer, TValue* value)
+{
+ u8 byte {};
+ StringBuilder sb {};
+
+ while (streamer.read(byte)) {
+ if (byte == ' ' || byte == '\t' || byte == '\n' || byte == '\r') {
+ streamer.step_back();
+ break;
+ }
+
+ sb.append(byte);
+ }
+
+ auto const opt_value = sb.to_deprecated_string().to_uint();
+ if (!opt_value.has_value()) {
+ *value = 0;
+ return false;
+ }
+
+ *value = static_cast<u16>(opt_value.value());
+ return true;
+}
+
+template<typename TContext>
+static bool read_comment([[maybe_unused]] TContext& context, Streamer& streamer)
+{
+ bool exist = false;
+ u8 byte {};
+
+ while (streamer.read(byte)) {
+ if (byte == '#') {
+ exist = true;
+ } else if (byte == '\t' || byte == '\n') {
+ return exist;
+ }
+ }
+
+ return exist;
+}
+
+template<typename TContext>
+static bool read_magic_number(TContext& context, Streamer& streamer)
+{
+ if (context.state >= TContext::State::MagicNumber) {
+ return true;
+ }
+
+ if (!context.data || context.data_size < 2) {
+ context.state = TContext::State::Error;
+ dbgln_if(PORTABLE_IMAGE_LOADER_DEBUG, "There is no enough data for {}", TContext::FormatDetails::image_type);
+ return false;
+ }
+
+ u8 magic_number[2] {};
+ if (!streamer.read_bytes(magic_number, 2)) {
+ context.state = TContext::State::Error;
+ dbgln_if(PORTABLE_IMAGE_LOADER_DEBUG, "We can't read magic number for {}", TContext::FormatDetails::image_type);
+ return false;
+ }
+
+ if (magic_number[0] == 'P' && magic_number[1] == TContext::FormatDetails::ascii_magic_number) {
+ context.type = TContext::Type::ASCII;
+ context.state = TContext::State::MagicNumber;
+ return true;
+ }
+
+ if (magic_number[0] == 'P' && magic_number[1] == TContext::FormatDetails::binary_magic_number) {
+ context.type = TContext::Type::RAWBITS;
+ context.state = TContext::State::MagicNumber;
+ return true;
+ }
+
+ context.state = TContext::State::Error;
+ dbgln_if(PORTABLE_IMAGE_LOADER_DEBUG, "Magic number is not valid for {}{}{}", magic_number[0], magic_number[1], TContext::FormatDetails::image_type);
+ return false;
+}
+
+template<typename TContext>
+static bool read_whitespace(TContext& context, Streamer& streamer)
+{
+ bool exist = false;
+ u8 byte {};
+
+ while (streamer.read(byte)) {
+ if (byte == ' ' || byte == '\t' || byte == '\n' || byte == '\r') {
+ exist = true;
+ } else if (byte == '#') {
+ streamer.step_back();
+ read_comment(context, streamer);
+ } else {
+ streamer.step_back();
+ return exist;
+ }
+ }
+
+ return exist;
+}
+
+template<typename TContext>
+static bool read_width(TContext& context, Streamer& streamer)
+{
+ if (bool const result = read_number(streamer, &context.width);
+ !result || context.width == 0) {
+ return false;
+ }
+
+ context.state = TContext::State::Width;
+ return true;
+}
+
+template<typename TContext>
+static bool read_height(TContext& context, Streamer& streamer)
+{
+ if (bool const result = read_number(streamer, &context.height);
+ !result || context.height == 0) {
+ return false;
+ }
+
+ context.state = TContext::State::Height;
+ return true;
+}
+
+template<typename TContext>
+static bool read_max_val(TContext& context, Streamer& streamer)
+{
+ if (bool const result = read_number(streamer, &context.format_details.max_val);
+ !result || context.format_details.max_val == 0) {
+ return false;
+ }
+
+ if (context.format_details.max_val > 255) {
+ dbgln_if(PORTABLE_IMAGE_LOADER_DEBUG, "We can't parse 2 byte color for {}", TContext::FormatDetails::image_type);
+ context.state = TContext::State::Error;
+ return false;
+ }
+
+ context.state = TContext::State::Maxval;
+ return true;
+}
+
+template<typename TContext>
+static bool create_bitmap(TContext& context)
+{
+ auto bitmap_or_error = Bitmap::create(BitmapFormat::BGRx8888, { context.width, context.height });
+ if (bitmap_or_error.is_error()) {
+ context.state = TContext::State::Error;
+ return false;
+ }
+ context.bitmap = bitmap_or_error.release_value_but_fixme_should_propagate_errors();
+ return true;
+}
+
+template<typename TContext>
+static void set_pixels(TContext& context, Vector<Gfx::Color> const& color_data)
+{
+ size_t index = 0;
+ for (size_t y = 0; y < context.height; ++y) {
+ for (size_t x = 0; x < context.width; ++x) {
+ context.bitmap->set_pixel(x, y, color_data.at(index));
+ index++;
+ }
+ }
+}
+
+template<typename TContext>
+static bool decode(TContext& context)
+{
+ if (context.state >= TContext::State::Decoded)
+ return true;
+
+ auto error_guard = ArmedScopeGuard([&] {
+ context.state = TContext::State::Error;
+ });
+
+ Streamer streamer(context.data, context.data_size);
+
+ if (!read_magic_number(context, streamer))
+ return false;
+
+ if (!read_whitespace(context, streamer))
+ return false;
+
+ if (!read_width(context, streamer))
+ return false;
+
+ if (!read_whitespace(context, streamer))
+ return false;
+
+ if (!read_height(context, streamer))
+ return false;
+
+ if (context.width > maximum_width_for_decoded_images || context.height > maximum_height_for_decoded_images) {
+ dbgln("This portable network image is too large for comfort: {}x{}", context.width, context.height);
+ return false;
+ }
+
+ if (!read_whitespace(context, streamer))
+ return false;
+
+ if constexpr (requires { context.format_details.max_val; }) {
+ if (!read_max_val(context, streamer))
+ return false;
+
+ if (!read_whitespace(context, streamer))
+ return false;
+ }
+
+ if (!read_image_data(context, streamer))
+ return false;
+
+ error_guard.disarm();
+ context.state = TContext::State::Decoded;
+ return true;
+}
+
+template<typename TContext>
+static RefPtr<Gfx::Bitmap> load_impl(u8 const* data, size_t data_size)
+{
+ TContext context {};
+ context.data = data;
+ context.data_size = data_size;
+
+ if (!decode(context)) {
+ return nullptr;
+ }
+ return context.bitmap;
+}
+
+template<typename TContext>
+static RefPtr<Gfx::Bitmap> load_from_memory(u8 const* data, size_t length, DeprecatedString const& mmap_name)
+{
+ auto bitmap = load_impl<TContext>(data, length);
+ if (bitmap)
+ bitmap->set_mmap_name(DeprecatedString::formatted("Gfx::Bitmap [{}] - Decoded {}: {}", bitmap->size(), TContext::FormatDetails::image_type, mmap_name));
+ return bitmap;
+}
+
+}
diff --git a/Userland/Libraries/LibGfx/ImageFormats/PortableImageMapLoader.h b/Userland/Libraries/LibGfx/ImageFormats/PortableImageMapLoader.h
new file mode 100644
index 0000000000..9426bca36e
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/PortableImageMapLoader.h
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2020, Hüseyin ASLITÜRK <asliturk@hotmail.com>
+ * Copyright (c) 2022, the SerenityOS developers.
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#pragma once
+
+#include <AK/RefPtr.h>
+#include <AK/StringView.h>
+#include <AK/Types.h>
+#include <LibGfx/Bitmap.h>
+#include <LibGfx/ImageFormats/PortableImageLoaderCommon.h>
+
+namespace Gfx {
+
+template<class TFormatDetails>
+struct PortableImageMapLoadingContext {
+ using FormatDetails = TFormatDetails;
+
+ enum class Type {
+ Unknown,
+ ASCII,
+ RAWBITS
+ };
+
+ enum class State {
+ NotDecoded = 0,
+ Error,
+ MagicNumber,
+ Width,
+ Height,
+ Maxval,
+ Bitmap,
+ Decoded
+ };
+
+ Type type { Type::Unknown };
+ State state { State::NotDecoded };
+ u8 const* data { nullptr };
+ size_t data_size { 0 };
+ size_t width { 0 };
+ size_t height { 0 };
+ FormatDetails format_details {};
+ RefPtr<Gfx::Bitmap> bitmap;
+};
+
+template<typename TContext>
+class PortableImageDecoderPlugin final : public ImageDecoderPlugin {
+public:
+ static bool sniff(ReadonlyBytes);
+ static ErrorOr<NonnullOwnPtr<ImageDecoderPlugin>> create(ReadonlyBytes);
+
+ PortableImageDecoderPlugin(u8 const*, size_t);
+ virtual ~PortableImageDecoderPlugin() override = default;
+
+ virtual IntSize size() override;
+
+ virtual void set_volatile() override;
+ [[nodiscard]] virtual bool set_nonvolatile(bool& was_purged) override;
+
+ virtual bool initialize() override;
+ virtual bool is_animated() override;
+ virtual size_t loop_count() override;
+ virtual size_t frame_count() override;
+ virtual ErrorOr<ImageFrameDescriptor> frame(size_t index) override;
+ virtual ErrorOr<Optional<ReadonlyBytes>> icc_data() override;
+
+private:
+ OwnPtr<TContext> m_context;
+};
+
+template<typename TContext>
+PortableImageDecoderPlugin<TContext>::PortableImageDecoderPlugin(u8 const* data, size_t size)
+{
+ m_context = make<TContext>();
+ m_context->data = data;
+ m_context->data_size = size;
+}
+
+template<typename TContext>
+IntSize PortableImageDecoderPlugin<TContext>::size()
+{
+ if (m_context->state == TContext::State::Error)
+ return {};
+
+ if (m_context->state < TContext::State::Decoded) {
+ bool success = decode(*m_context);
+ if (!success)
+ return {};
+ }
+
+ return { m_context->width, m_context->height };
+}
+
+template<typename TContext>
+void PortableImageDecoderPlugin<TContext>::set_volatile()
+{
+ if (m_context->bitmap)
+ m_context->bitmap->set_volatile();
+}
+
+template<typename TContext>
+bool PortableImageDecoderPlugin<TContext>::set_nonvolatile(bool& was_purged)
+{
+ if (!m_context->bitmap)
+ return false;
+
+ return m_context->bitmap->set_nonvolatile(was_purged);
+}
+
+template<typename TContext>
+bool PortableImageDecoderPlugin<TContext>::initialize()
+{
+ using Context = TContext;
+ if (m_context->data_size < 2)
+ return false;
+
+ if (m_context->data[0] == 'P' && m_context->data[1] == Context::FormatDetails::ascii_magic_number)
+ return true;
+
+ if (m_context->data[0] == 'P' && m_context->data[1] == Context::FormatDetails::binary_magic_number)
+ return true;
+
+ return false;
+}
+
+template<typename TContext>
+ErrorOr<NonnullOwnPtr<ImageDecoderPlugin>> PortableImageDecoderPlugin<TContext>::create(ReadonlyBytes data)
+{
+ return adopt_nonnull_own_or_enomem(new (nothrow) PortableImageDecoderPlugin<TContext>(data.data(), data.size()));
+}
+
+template<typename TContext>
+bool PortableImageDecoderPlugin<TContext>::sniff(ReadonlyBytes data)
+{
+ using Context = TContext;
+ if (data.size() < 2)
+ return false;
+
+ if (data.data()[0] == 'P' && data.data()[1] == Context::FormatDetails::ascii_magic_number)
+ return true;
+
+ if (data.data()[0] == 'P' && data.data()[1] == Context::FormatDetails::binary_magic_number)
+ return true;
+
+ return false;
+}
+
+template<typename TContext>
+bool PortableImageDecoderPlugin<TContext>::is_animated()
+{
+ return false;
+}
+
+template<typename TContext>
+size_t PortableImageDecoderPlugin<TContext>::loop_count()
+{
+ return 0;
+}
+
+template<typename TContext>
+size_t PortableImageDecoderPlugin<TContext>::frame_count()
+{
+ return 1;
+}
+
+template<typename TContext>
+ErrorOr<ImageFrameDescriptor> PortableImageDecoderPlugin<TContext>::frame(size_t index)
+{
+ if (index > 0)
+ return Error::from_string_literal("PortableImageDecoderPlugin: Invalid frame index");
+
+ if (m_context->state == TContext::State::Error)
+ return Error::from_string_literal("PortableImageDecoderPlugin: Decoding failed");
+
+ if (m_context->state < TContext::State::Decoded) {
+ bool success = decode(*m_context);
+ if (!success)
+ return Error::from_string_literal("PortableImageDecoderPlugin: Decoding failed");
+ }
+
+ VERIFY(m_context->bitmap);
+ return ImageFrameDescriptor { m_context->bitmap, 0 };
+}
+
+template<typename TContext>
+ErrorOr<Optional<ReadonlyBytes>> PortableImageDecoderPlugin<TContext>::icc_data()
+{
+ return OptionalNone {};
+}
+
+}
diff --git a/Userland/Libraries/LibGfx/ImageFormats/QOILoader.cpp b/Userland/Libraries/LibGfx/ImageFormats/QOILoader.cpp
new file mode 100644
index 0000000000..fcebf42f79
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/QOILoader.cpp
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2021, Linus Groh <linusg@serenityos.org>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <AK/Endian.h>
+#include <AK/MemoryStream.h>
+#include <LibGfx/Bitmap.h>
+#include <LibGfx/ImageFormats/QOILoader.h>
+
+namespace Gfx {
+
+static constexpr auto QOI_MAGIC = "qoif"sv;
+static constexpr u8 QOI_OP_RGB = 0b11111110;
+static constexpr u8 QOI_OP_RGBA = 0b11111111;
+static constexpr u8 QOI_OP_INDEX = 0b00000000;
+static constexpr u8 QOI_OP_DIFF = 0b01000000;
+static constexpr u8 QOI_OP_LUMA = 0b10000000;
+static constexpr u8 QOI_OP_RUN = 0b11000000;
+static constexpr u8 QOI_MASK_2 = 0b11000000;
+static constexpr u8 END_MARKER[] = { 0, 0, 0, 0, 0, 0, 0, 1 };
+
+static ErrorOr<QOIHeader> decode_qoi_header(Stream& stream)
+{
+ auto header = TRY(stream.read_value<QOIHeader>());
+ if (StringView { header.magic, array_size(header.magic) } != QOI_MAGIC)
+ return Error::from_string_literal("Invalid QOI image: incorrect header magic");
+ header.width = AK::convert_between_host_and_big_endian(header.width);
+ header.height = AK::convert_between_host_and_big_endian(header.height);
+ return header;
+}
+
+static ErrorOr<Color> decode_qoi_op_rgb(Stream& stream, u8 first_byte, Color pixel)
+{
+ VERIFY(first_byte == QOI_OP_RGB);
+ u8 bytes[3];
+ TRY(stream.read_until_filled({ &bytes, array_size(bytes) }));
+
+ // The alpha value remains unchanged from the previous pixel.
+ return Color { bytes[0], bytes[1], bytes[2], pixel.alpha() };
+}
+
+static ErrorOr<Color> decode_qoi_op_rgba(Stream& stream, u8 first_byte)
+{
+ VERIFY(first_byte == QOI_OP_RGBA);
+ u8 bytes[4];
+ TRY(stream.read_until_filled({ &bytes, array_size(bytes) }));
+ return Color { bytes[0], bytes[1], bytes[2], bytes[3] };
+}
+
+static ErrorOr<u8> decode_qoi_op_index(Stream&, u8 first_byte)
+{
+ VERIFY((first_byte & QOI_MASK_2) == QOI_OP_INDEX);
+ u8 index = first_byte & ~QOI_MASK_2;
+ VERIFY(index <= 63);
+ return index;
+}
+
+static ErrorOr<Color> decode_qoi_op_diff(Stream&, u8 first_byte, Color pixel)
+{
+ VERIFY((first_byte & QOI_MASK_2) == QOI_OP_DIFF);
+ u8 dr = (first_byte & 0b00110000) >> 4;
+ u8 dg = (first_byte & 0b00001100) >> 2;
+ u8 db = (first_byte & 0b00000011);
+ VERIFY(dr <= 3 && dg <= 3 && db <= 3);
+
+ // Values are stored as unsigned integers with a bias of 2.
+ return Color {
+ static_cast<u8>(pixel.red() + static_cast<i8>(dr - 2)),
+ static_cast<u8>(pixel.green() + static_cast<i8>(dg - 2)),
+ static_cast<u8>(pixel.blue() + static_cast<i8>(db - 2)),
+ pixel.alpha(),
+ };
+}
+
+static ErrorOr<Color> decode_qoi_op_luma(Stream& stream, u8 first_byte, Color pixel)
+{
+ VERIFY((first_byte & QOI_MASK_2) == QOI_OP_LUMA);
+ auto byte = TRY(stream.read_value<u8>());
+ u8 diff_green = (first_byte & ~QOI_MASK_2);
+ u8 dr_dg = (byte & 0b11110000) >> 4;
+ u8 db_dg = (byte & 0b00001111);
+
+ // Values are stored as unsigned integers with a bias of 32 for the green channel and a bias of 8 for the red and blue channel.
+ return Color {
+ static_cast<u8>(pixel.red() + static_cast<i8>((diff_green - 32) + (dr_dg - 8))),
+ static_cast<u8>(pixel.green() + static_cast<i8>(diff_green - 32)),
+ static_cast<u8>(pixel.blue() + static_cast<i8>((diff_green - 32) + (db_dg - 8))),
+ pixel.alpha(),
+ };
+}
+
+static ErrorOr<u8> decode_qoi_op_run(Stream&, u8 first_byte)
+{
+ VERIFY((first_byte & QOI_MASK_2) == QOI_OP_RUN);
+ u8 run = first_byte & ~QOI_MASK_2;
+
+ // The run-length is stored with a bias of -1.
+ run += 1;
+
+ // Note that the run-lengths 63 and 64 (b111110 and b111111) are illegal as they are occupied by the QOI_OP_RGB and QOI_OP_RGBA tags.
+ if (run == QOI_OP_RGB || run == QOI_OP_RGBA)
+ return Error::from_string_literal("Invalid QOI image: illegal run length");
+
+ VERIFY(run >= 1 && run <= 62);
+ return run;
+}
+
+static ErrorOr<void> decode_qoi_end_marker(Stream& stream)
+{
+ u8 bytes[array_size(END_MARKER)];
+ TRY(stream.read_until_filled({ &bytes, array_size(bytes) }));
+ if (!stream.is_eof())
+ return Error::from_string_literal("Invalid QOI image: expected end of stream but more bytes are available");
+ if (memcmp(&END_MARKER, &bytes, array_size(bytes)) != 0)
+ return Error::from_string_literal("Invalid QOI image: incorrect end marker");
+ return {};
+}
+
+static ErrorOr<NonnullRefPtr<Bitmap>> decode_qoi_image(Stream& stream, u32 width, u32 height)
+{
+ // FIXME: Why is Gfx::Bitmap's size signed? Makes no sense whatsoever.
+ if (width > NumericLimits<int>::max())
+ return Error::from_string_literal("Cannot create bitmap for QOI image of valid size, width exceeds maximum Gfx::Bitmap width");
+ if (height > NumericLimits<int>::max())
+ return Error::from_string_literal("Cannot create bitmap for QOI image of valid size, height exceeds maximum Gfx::Bitmap height");
+
+ auto bitmap = TRY(Bitmap::create(BitmapFormat::BGRA8888, { width, height }));
+
+ u8 run = 0;
+ Color pixel = { 0, 0, 0, 255 };
+ Color previous_pixels[64] {};
+
+ for (u32 y = 0; y < height; ++y) {
+ for (u32 x = 0; x < width; ++x) {
+ if (run > 0)
+ --run;
+ if (run == 0) {
+ auto first_byte = TRY(stream.read_value<u8>());
+ if (first_byte == QOI_OP_RGB)
+ pixel = TRY(decode_qoi_op_rgb(stream, first_byte, pixel));
+ else if (first_byte == QOI_OP_RGBA)
+ pixel = TRY(decode_qoi_op_rgba(stream, first_byte));
+ else if ((first_byte & QOI_MASK_2) == QOI_OP_INDEX)
+ pixel = previous_pixels[TRY(decode_qoi_op_index(stream, first_byte))];
+ else if ((first_byte & QOI_MASK_2) == QOI_OP_DIFF)
+ pixel = TRY(decode_qoi_op_diff(stream, first_byte, pixel));
+ else if ((first_byte & QOI_MASK_2) == QOI_OP_LUMA)
+ pixel = TRY(decode_qoi_op_luma(stream, first_byte, pixel));
+ else if ((first_byte & QOI_MASK_2) == QOI_OP_RUN)
+ run = TRY(decode_qoi_op_run(stream, first_byte));
+ else
+ return Error::from_string_literal("Invalid QOI image: unknown chunk tag");
+ }
+ auto index_position = (pixel.red() * 3 + pixel.green() * 5 + pixel.blue() * 7 + pixel.alpha() * 11) % 64;
+ previous_pixels[index_position] = pixel;
+ bitmap->set_pixel(x, y, pixel);
+ }
+ }
+ TRY(decode_qoi_end_marker(stream));
+ return { move(bitmap) };
+}
+
+QOIImageDecoderPlugin::QOIImageDecoderPlugin(NonnullOwnPtr<Stream> stream)
+{
+ m_context = make<QOILoadingContext>();
+ m_context->stream = move(stream);
+}
+
+IntSize QOIImageDecoderPlugin::size()
+{
+ if (m_context->state < QOILoadingContext::State::HeaderDecoded) {
+ // FIXME: This is a weird API (inherited from ImageDecoderPlugin), should probably propagate errors by returning ErrorOr<IntSize>.
+ // For the time being, ignore the result and rely on the context's state.
+ (void)decode_header_and_update_context(*m_context->stream);
+ }
+
+ if (m_context->state == QOILoadingContext::State::Error)
+ return {};
+
+ return { m_context->header.width, m_context->header.height };
+}
+
+void QOIImageDecoderPlugin::set_volatile()
+{
+ if (m_context->bitmap)
+ m_context->bitmap->set_volatile();
+}
+
+bool QOIImageDecoderPlugin::set_nonvolatile(bool& was_purged)
+{
+ if (!m_context->bitmap)
+ return false;
+ return m_context->bitmap->set_nonvolatile(was_purged);
+}
+
+bool QOIImageDecoderPlugin::initialize()
+{
+ return !decode_header_and_update_context(*m_context->stream).is_error();
+}
+
+bool QOIImageDecoderPlugin::sniff(ReadonlyBytes data)
+{
+ FixedMemoryStream stream { { data.data(), data.size() } };
+ return !decode_qoi_header(stream).is_error();
+}
+
+ErrorOr<NonnullOwnPtr<ImageDecoderPlugin>> QOIImageDecoderPlugin::create(ReadonlyBytes data)
+{
+ auto stream = TRY(try_make<FixedMemoryStream>(data));
+ return adopt_nonnull_own_or_enomem(new (nothrow) QOIImageDecoderPlugin(move(stream)));
+}
+
+ErrorOr<ImageFrameDescriptor> QOIImageDecoderPlugin::frame(size_t index)
+{
+ if (index > 0)
+ return Error::from_string_literal("Invalid frame index");
+
+ // No one should try to decode the frame again after an error was already returned.
+ VERIFY(m_context->state != QOILoadingContext::State::Error);
+
+ if (m_context->state == QOILoadingContext::State::NotDecoded) {
+ TRY(decode_header_and_update_context(*m_context->stream));
+ TRY(decode_image_and_update_context(*m_context->stream));
+ } else if (m_context->state == QOILoadingContext::State::HeaderDecoded) {
+ TRY(decode_image_and_update_context(*m_context->stream));
+ }
+
+ VERIFY(m_context->state == QOILoadingContext::State::ImageDecoded);
+ VERIFY(m_context->bitmap);
+ return ImageFrameDescriptor { m_context->bitmap, 0 };
+}
+
+ErrorOr<void> QOIImageDecoderPlugin::decode_header_and_update_context(Stream& stream)
+{
+ VERIFY(m_context->state < QOILoadingContext::State::HeaderDecoded);
+ auto error_or_header = decode_qoi_header(stream);
+ if (error_or_header.is_error()) {
+ m_context->state = QOILoadingContext::State::Error;
+ return error_or_header.release_error();
+ }
+ m_context->state = QOILoadingContext::State::HeaderDecoded;
+ m_context->header = error_or_header.release_value();
+ return {};
+}
+
+ErrorOr<void> QOIImageDecoderPlugin::decode_image_and_update_context(Stream& stream)
+{
+ VERIFY(m_context->state < QOILoadingContext::State::ImageDecoded);
+ auto error_or_bitmap = decode_qoi_image(stream, m_context->header.width, m_context->header.height);
+ if (error_or_bitmap.is_error()) {
+ m_context->state = QOILoadingContext::State::Error;
+ return error_or_bitmap.release_error();
+ }
+ m_context->state = QOILoadingContext::State::ImageDecoded;
+ m_context->bitmap = error_or_bitmap.release_value();
+ return {};
+}
+
+ErrorOr<Optional<ReadonlyBytes>> QOIImageDecoderPlugin::icc_data()
+{
+ return OptionalNone {};
+}
+
+}
diff --git a/Userland/Libraries/LibGfx/ImageFormats/QOILoader.h b/Userland/Libraries/LibGfx/ImageFormats/QOILoader.h
new file mode 100644
index 0000000000..e072828018
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/QOILoader.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2021, Linus Groh <linusg@serenityos.org>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#pragma once
+
+#include <AK/Forward.h>
+#include <LibGfx/Forward.h>
+#include <LibGfx/ImageFormats/ImageDecoder.h>
+
+namespace Gfx {
+
+// Decoder for the "Quite OK Image" format (v1.0).
+// https://qoiformat.org/qoi-specification.pdf
+
+struct [[gnu::packed]] QOIHeader {
+ char magic[4];
+ u32 width;
+ u32 height;
+ u8 channels;
+ u8 colorspace;
+};
+
+struct QOILoadingContext {
+ enum class State {
+ NotDecoded = 0,
+ HeaderDecoded,
+ ImageDecoded,
+ Error,
+ };
+ State state { State::NotDecoded };
+ OwnPtr<Stream> stream {};
+ QOIHeader header {};
+ RefPtr<Bitmap> bitmap;
+};
+
+class QOIImageDecoderPlugin final : public ImageDecoderPlugin {
+public:
+ static bool sniff(ReadonlyBytes);
+ static ErrorOr<NonnullOwnPtr<ImageDecoderPlugin>> create(ReadonlyBytes);
+
+ virtual ~QOIImageDecoderPlugin() override = default;
+
+ virtual IntSize size() override;
+ virtual void set_volatile() override;
+ [[nodiscard]] virtual bool set_nonvolatile(bool& was_purged) override;
+ virtual bool initialize() override;
+ virtual bool is_animated() override { return false; }
+ virtual size_t loop_count() override { return 0; }
+ virtual size_t frame_count() override { return 1; }
+ virtual ErrorOr<ImageFrameDescriptor> frame(size_t index) override;
+ virtual ErrorOr<Optional<ReadonlyBytes>> icc_data() override;
+
+private:
+ ErrorOr<void> decode_header_and_update_context(Stream&);
+ ErrorOr<void> decode_image_and_update_context(Stream&);
+
+ QOIImageDecoderPlugin(NonnullOwnPtr<Stream>);
+
+ OwnPtr<QOILoadingContext> m_context;
+};
+
+}
+
+template<>
+struct AK::Traits<Gfx::QOIHeader> : public GenericTraits<Gfx::QOIHeader> {
+ static constexpr bool is_trivially_serializable() { return true; }
+};
diff --git a/Userland/Libraries/LibGfx/ImageFormats/QOIWriter.cpp b/Userland/Libraries/LibGfx/ImageFormats/QOIWriter.cpp
new file mode 100644
index 0000000000..4abd119dd4
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/QOIWriter.cpp
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2022, Olivier De Cannière <olivier.decanniere96@gmail.com>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include "QOIWriter.h"
+#include <AK/DeprecatedString.h>
+#include <AK/Endian.h>
+
+namespace Gfx {
+
+static constexpr Array<u8, 4> qoi_magic_bytes = { 'q', 'o', 'i', 'f' };
+static constexpr Array<u8, 8> qoi_end_marker = { 0, 0, 0, 0, 0, 0, 0, 1 };
+
+enum class Colorspace {
+ sRGB,
+ Linear,
+};
+
+enum class Channels {
+ RGB,
+ RGBA,
+};
+
+ErrorOr<ByteBuffer> QOIWriter::encode(Bitmap const& bitmap)
+{
+ QOIWriter writer;
+ TRY(writer.add_header(bitmap.width(), bitmap.height(), Channels::RGBA, Colorspace::sRGB));
+
+ Color previous_pixel = { 0, 0, 0, 255 };
+
+ bool creating_run = false;
+ int run_length = 0;
+
+ for (auto y = 0; y < bitmap.height(); y++) {
+ for (auto x = 0; x < bitmap.width(); x++) {
+ auto pixel = bitmap.get_pixel(x, y);
+
+ // Check for at most 62 consecutive identical pixels.
+ if (pixel == previous_pixel) {
+ if (!creating_run) {
+ creating_run = true;
+ run_length = 0;
+ writer.insert_into_running_array(pixel);
+ }
+
+ run_length++;
+
+ // If the run reaches a maximum length of 62 or if this is the last pixel then create the chunk.
+ if (run_length == 62 || (y == bitmap.height() - 1 && x == bitmap.width() - 1)) {
+ TRY(writer.add_run_chunk(run_length));
+ creating_run = false;
+ }
+
+ continue;
+ }
+
+ // Run ended with the previous pixel. Create a chunk for it and continue processing this pixel.
+ if (creating_run) {
+ TRY(writer.add_run_chunk(run_length));
+ creating_run = false;
+ }
+
+ // Check if the pixel matches a pixel in the running array.
+ auto index = pixel_hash_function(pixel);
+ auto& array_pixel = writer.running_array[index];
+ if (array_pixel == pixel) {
+ TRY(writer.add_index_chunk(index));
+ previous_pixel = pixel;
+ continue;
+ }
+
+ writer.running_array[index] = pixel;
+
+ // Check if pixel can be expressed as a difference of the previous pixel.
+ if (pixel.alpha() == previous_pixel.alpha()) {
+ int red_difference = pixel.red() - previous_pixel.red();
+ int green_difference = pixel.green() - previous_pixel.green();
+ int blue_difference = pixel.blue() - previous_pixel.blue();
+ int relative_red_difference = red_difference - green_difference;
+ int relative_blue_difference = blue_difference - green_difference;
+
+ if (red_difference > -3 && red_difference < 2
+ && green_difference > -3 && green_difference < 2
+ && blue_difference > -3 && blue_difference < 2) {
+ TRY(writer.add_diff_chunk(red_difference, green_difference, blue_difference));
+ previous_pixel = pixel;
+ continue;
+ }
+ if (relative_red_difference > -9 && relative_red_difference < 8
+ && green_difference > -33 && green_difference < 32
+ && relative_blue_difference > -9 && relative_blue_difference < 8) {
+ TRY(writer.add_luma_chunk(relative_red_difference, green_difference, relative_blue_difference));
+ previous_pixel = pixel;
+ continue;
+ }
+
+ TRY(writer.add_rgb_chunk(pixel.red(), pixel.green(), pixel.blue()));
+ previous_pixel = pixel;
+ continue;
+ }
+
+ previous_pixel = pixel;
+
+ // Write full color values.
+ TRY(writer.add_rgba_chunk(pixel.red(), pixel.green(), pixel.blue(), pixel.alpha()));
+ }
+ }
+
+ TRY(writer.add_end_marker());
+
+ return ByteBuffer::copy(writer.m_data);
+}
+
+ErrorOr<void> QOIWriter::add_header(u32 width, u32 height, Channels channels = Channels::RGBA, Colorspace color_space = Colorspace::sRGB)
+{
+ // FIXME: Handle RGB and all linear channels.
+ if (channels == Channels::RGB || color_space == Colorspace::Linear)
+ TODO();
+
+ TRY(m_data.try_append(qoi_magic_bytes.data(), sizeof(qoi_magic_bytes)));
+
+ auto big_endian_width = AK::convert_between_host_and_big_endian(width);
+ TRY(m_data.try_append(bit_cast<u8*>(&big_endian_width), sizeof(width)));
+
+ auto big_endian_height = AK::convert_between_host_and_big_endian(height);
+ TRY(m_data.try_append(bit_cast<u8*>(&big_endian_height), sizeof(height)));
+
+ // Number of channels: 3 = RGB, 4 = RGBA.
+ TRY(m_data.try_append(4));
+
+ // Colorspace: 0 = sRGB, 1 = all linear channels.
+ TRY(m_data.try_append(color_space == Colorspace::sRGB ? 0 : 1));
+
+ return {};
+}
+
+ErrorOr<void> QOIWriter::add_rgb_chunk(u8 r, u8 g, u8 b)
+{
+ constexpr static u8 rgb_tag = 0b1111'1110;
+
+ TRY(m_data.try_append(rgb_tag));
+ TRY(m_data.try_append(r));
+ TRY(m_data.try_append(g));
+ TRY(m_data.try_append(b));
+ return {};
+}
+
+ErrorOr<void> QOIWriter::add_rgba_chunk(u8 r, u8 g, u8 b, u8 a)
+{
+ constexpr static u8 rgba_tag = 0b1111'1111;
+
+ TRY(m_data.try_append(rgba_tag));
+ TRY(m_data.try_append(r));
+ TRY(m_data.try_append(g));
+ TRY(m_data.try_append(b));
+ TRY(m_data.try_append(a));
+ return {};
+}
+
+ErrorOr<void> QOIWriter::add_index_chunk(unsigned int index)
+{
+ constexpr static u8 index_tag = 0b0000'0000;
+
+ u8 chunk = index_tag | index;
+ TRY(m_data.try_append(chunk));
+ return {};
+}
+
+ErrorOr<void> QOIWriter::add_diff_chunk(i8 red_difference, i8 green_difference, i8 blue_difference)
+{
+ constexpr static u8 diff_tag = 0b0100'0000;
+
+ u8 bias = 2;
+ u8 red = red_difference + bias;
+ u8 green = green_difference + bias;
+ u8 blue = blue_difference + bias;
+
+ u8 chunk = diff_tag | (red << 4) | (green << 2) | blue;
+ TRY(m_data.try_append(chunk));
+ return {};
+}
+
+ErrorOr<void> QOIWriter::add_luma_chunk(i8 relative_red_difference, i8 green_difference, i8 relative_blue_difference)
+{
+ constexpr static u8 luma_tag = 0b1000'0000;
+ u8 green_bias = 32;
+ u8 red_blue_bias = 8;
+
+ u8 chunk1 = luma_tag | (green_difference + green_bias);
+ u8 chunk2 = ((relative_red_difference + red_blue_bias) << 4) | (relative_blue_difference + red_blue_bias);
+ TRY(m_data.try_append(chunk1));
+ TRY(m_data.try_append(chunk2));
+ return {};
+}
+
+ErrorOr<void> QOIWriter::add_run_chunk(unsigned run_length)
+{
+ constexpr static u8 run_tag = 0b1100'0000;
+ int bias = -1;
+
+ u8 chunk = run_tag | (run_length + bias);
+ TRY(m_data.try_append(chunk));
+ return {};
+}
+
+ErrorOr<void> QOIWriter::add_end_marker()
+{
+ TRY(m_data.try_append(qoi_end_marker.data(), sizeof(qoi_end_marker)));
+ return {};
+}
+
+u32 QOIWriter::pixel_hash_function(Color pixel)
+{
+ return (pixel.red() * 3 + pixel.green() * 5 + pixel.blue() * 7 + pixel.alpha() * 11) % 64;
+}
+
+void QOIWriter::insert_into_running_array(Color pixel)
+{
+ auto index = pixel_hash_function(pixel);
+ running_array[index] = pixel;
+}
+
+}
diff --git a/Userland/Libraries/LibGfx/ImageFormats/QOIWriter.h b/Userland/Libraries/LibGfx/ImageFormats/QOIWriter.h
new file mode 100644
index 0000000000..232aad5cd1
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/QOIWriter.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2022, Olivier De Cannière <olivier.decanniere96@gmail.com>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#pragma once
+
+#include <AK/Error.h>
+#include <AK/Vector.h>
+#include <LibGfx/Bitmap.h>
+
+namespace Gfx {
+
+enum class Colorspace;
+enum class Channels;
+
+class QOIWriter {
+public:
+ static ErrorOr<ByteBuffer> encode(Gfx::Bitmap const&);
+
+private:
+ QOIWriter() = default;
+
+ Vector<u8> m_data;
+ ErrorOr<void> add_header(u32 width, u32 height, Channels, Colorspace);
+ ErrorOr<void> add_rgb_chunk(u8, u8, u8);
+ ErrorOr<void> add_rgba_chunk(u8, u8, u8, u8);
+ ErrorOr<void> add_index_chunk(u32 index);
+ ErrorOr<void> add_diff_chunk(i8 red_difference, i8 green_difference, i8 blue_difference);
+ ErrorOr<void> add_luma_chunk(i8 relative_red_difference, i8 green_difference, i8 relative_blue_difference);
+ ErrorOr<void> add_run_chunk(u32 run_length);
+ ErrorOr<void> add_end_marker();
+
+ Array<Color, 64> running_array;
+ static u32 pixel_hash_function(Color pixel);
+ void insert_into_running_array(Color pixel);
+};
+
+}
diff --git a/Userland/Libraries/LibGfx/ImageFormats/TGALoader.cpp b/Userland/Libraries/LibGfx/ImageFormats/TGALoader.cpp
new file mode 100644
index 0000000000..0d5a8e1fea
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/TGALoader.cpp
@@ -0,0 +1,370 @@
+/*
+ * Copyright (c) 2022, Tom Needham <06needhamt@gmail.com>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <AK/Span.h>
+#include <AK/StdLibExtraDetails.h>
+#include <AK/String.h>
+#include <LibGfx/ImageFormats/TGALoader.h>
+
+namespace Gfx {
+
+enum TGADataType : u8 {
+ None = 0,
+ UncompressedColorMapped = 1,
+ UncompressedRGB = 2,
+ UncompressedBlackAndWhite = 3,
+ RunLengthEncodedColorMapped = 9,
+ RunLengthEncodedRGB = 10,
+ CompressedBlackAndWhite = 11,
+ CompressedColorMapped = 32,
+ CompressedColorMappedFourPass = 33
+};
+
+struct [[gnu::packed]] TGAHeader {
+ u8 id_length;
+ u8 color_map_type;
+ TGADataType data_type_code;
+ i16 color_map_origin;
+ i16 color_map_length;
+ u8 color_map_depth;
+ i16 x_origin;
+ i16 y_origin;
+ u16 width;
+ u16 height;
+ u8 bits_per_pixel;
+ u8 image_descriptor;
+};
+
+static_assert(sizeof(TGAHeader) == 18);
+
+union [[gnu::packed]] TGAPixel {
+ struct TGAColor {
+ u8 blue;
+ u8 green;
+ u8 red;
+ u8 alpha;
+ } components;
+
+ u32 data;
+};
+
+struct TGAPixelPacket {
+ bool raw;
+ u8 pixels_count;
+};
+
+static_assert(AssertSize<TGAPixel, 4>());
+
+class TGAReader {
+public:
+ TGAReader(ReadonlyBytes data)
+ : m_data(move(data))
+ {
+ }
+
+ TGAReader(ReadonlyBytes data, size_t index)
+ : m_data(move(data))
+ , m_index(index)
+ {
+ }
+
+ ALWAYS_INLINE u8 read_u8()
+ {
+ u8 value = m_data[m_index];
+ m_index++;
+ return value;
+ }
+
+ ALWAYS_INLINE i8 read_i8()
+ {
+ return static_cast<i8>(read_u8());
+ }
+
+ ALWAYS_INLINE u16 read_u16()
+ {
+ return read_u8() | read_u8() << 8;
+ }
+
+ ALWAYS_INLINE i16 read_i16()
+ {
+ return read_i8() | read_i8() << 8;
+ }
+
+ ALWAYS_INLINE u32 read_u32()
+ {
+ return read_u16() | read_u16() << 16;
+ }
+
+ ALWAYS_INLINE i32 read_i32()
+ {
+ return read_i16() | read_i16() << 16;
+ }
+
+ ALWAYS_INLINE TGAPixelPacket read_packet_type()
+ {
+ auto pixel_packet_type = read_u8();
+ auto pixel_packet = TGAPixelPacket();
+ pixel_packet.raw = !(pixel_packet_type & 0x80);
+ pixel_packet.pixels_count = (pixel_packet_type & 0x7f);
+
+ // NOTE: Run-length-encoded/Raw pixel packets cannot encode zero pixels,
+ // so value 0 stands for 1 pixel, 1 stands for 2, etc...
+ pixel_packet.pixels_count++;
+ return pixel_packet;
+ }
+
+ ALWAYS_INLINE TGAPixel read_pixel(u8 bits_per_pixel)
+ {
+ auto pixel = TGAPixel();
+
+ switch (bits_per_pixel) {
+ case 24:
+ pixel.components.blue = read_u8();
+ pixel.components.green = read_u8();
+ pixel.components.red = read_u8();
+ pixel.components.alpha = 0xFF;
+ return pixel;
+
+ case 32:
+ pixel.components.blue = read_u8();
+ pixel.components.green = read_u8();
+ pixel.components.red = read_u8();
+ pixel.components.alpha = read_u8();
+ return pixel;
+
+ default:
+ VERIFY_NOT_REACHED();
+ }
+ }
+
+ size_t index() const
+ {
+ return m_index;
+ }
+
+ ReadonlyBytes data() const
+ {
+ return m_data;
+ }
+
+private:
+ ReadonlyBytes m_data;
+ size_t m_index { 0 };
+};
+
+struct TGALoadingContext {
+ TGAHeader header;
+ OwnPtr<TGAReader> reader = { nullptr };
+ RefPtr<Gfx::Bitmap> bitmap;
+};
+
+TGAImageDecoderPlugin::TGAImageDecoderPlugin(u8 const* file_data, size_t file_size)
+{
+ m_context = make<TGALoadingContext>();
+ m_context->reader = make<TGAReader>(ReadonlyBytes { file_data, file_size });
+}
+
+TGAImageDecoderPlugin::~TGAImageDecoderPlugin() = default;
+
+IntSize TGAImageDecoderPlugin::size()
+{
+ return IntSize { m_context->header.width, m_context->header.height };
+}
+
+void TGAImageDecoderPlugin::set_volatile()
+{
+ if (m_context->bitmap)
+ m_context->bitmap->set_volatile();
+}
+
+bool TGAImageDecoderPlugin::set_nonvolatile(bool& was_purged)
+{
+ if (!m_context->bitmap)
+ return false;
+ return m_context->bitmap->set_nonvolatile(was_purged);
+}
+
+bool TGAImageDecoderPlugin::decode_tga_header()
+{
+ auto& reader = m_context->reader;
+ if (reader->data().size() < sizeof(TGAHeader))
+ return false;
+
+ m_context->header = TGAHeader();
+ m_context->header.id_length = reader->read_u8();
+ m_context->header.color_map_type = reader->read_u8();
+ m_context->header.data_type_code = static_cast<TGADataType>(reader->read_u8());
+ m_context->header.color_map_origin = reader->read_i16();
+ m_context->header.color_map_length = reader->read_i16();
+ m_context->header.color_map_depth = reader->read_u8();
+ m_context->header.x_origin = reader->read_i16();
+ m_context->header.y_origin = reader->read_i16();
+ m_context->header.width = reader->read_u16();
+ m_context->header.height = reader->read_u16();
+ m_context->header.bits_per_pixel = reader->read_u8();
+ m_context->header.image_descriptor = reader->read_u8();
+
+ auto bytes_remaining = reader->data().size() - reader->index();
+
+ // FIXME: Check for multiplication overflow!
+ if (m_context->header.data_type_code == TGADataType::UncompressedRGB && bytes_remaining < static_cast<size_t>(m_context->header.width * m_context->header.height * (m_context->header.bits_per_pixel / 8)))
+ return false;
+
+ if (m_context->header.bits_per_pixel < 8 || m_context->header.bits_per_pixel > 32)
+ return false;
+
+ return true;
+}
+
+bool TGAImageDecoderPlugin::initialize()
+{
+ return decode_tga_header();
+}
+
+ErrorOr<bool> TGAImageDecoderPlugin::validate_before_create(ReadonlyBytes data)
+{
+ if (data.size() < sizeof(TGAHeader))
+ return false;
+ TGAHeader const& header = *reinterpret_cast<TGAHeader const*>(data.data());
+ // FIXME: Check for multiplication overflow!
+ if (header.data_type_code == TGADataType::UncompressedRGB && data.size() < static_cast<size_t>(header.width * header.height * (header.bits_per_pixel / 8)))
+ return false;
+ if (header.bits_per_pixel < 8 || header.bits_per_pixel > 32)
+ return false;
+ return true;
+}
+
+ErrorOr<NonnullOwnPtr<ImageDecoderPlugin>> TGAImageDecoderPlugin::create(ReadonlyBytes data)
+{
+ return adopt_nonnull_own_or_enomem(new (nothrow) TGAImageDecoderPlugin(data.data(), data.size()));
+}
+
+bool TGAImageDecoderPlugin::is_animated()
+{
+ return false;
+}
+
+size_t TGAImageDecoderPlugin::loop_count()
+{
+ return 0;
+}
+
+size_t TGAImageDecoderPlugin::frame_count()
+{
+ return 1;
+}
+
+ErrorOr<ImageFrameDescriptor> TGAImageDecoderPlugin::frame(size_t index)
+{
+ auto bits_per_pixel = m_context->header.bits_per_pixel;
+ auto color_map = m_context->header.color_map_type;
+ auto data_type = m_context->header.data_type_code;
+ auto width = m_context->header.width;
+ auto height = m_context->header.height;
+ auto x_origin = m_context->header.x_origin;
+ auto y_origin = m_context->header.y_origin;
+
+ if (index != 0)
+ return Error::from_string_literal("TGAImageDecoderPlugin: frame index must be 0");
+
+ if (color_map > 1)
+ return Error::from_string_literal("TGAImageDecoderPlugin: Invalid color map type");
+
+ if (m_context->bitmap) {
+ return ImageFrameDescriptor { m_context->bitmap, 0 };
+ } else {
+ // NOTE: Just to be on the safe side, if m_context->bitmap is nullptr, then
+ // just re-construct the reader object. This will ensure that if the bitmap
+ // was set as volatile and therefore it is gone, we can always re-generate it
+ // with a new call to this method!
+ VERIFY(m_context->reader);
+ m_context->reader = make<TGAReader>(m_context->reader->data(), sizeof(TGAHeader));
+ }
+
+ RefPtr<Gfx::Bitmap> bitmap;
+ switch (bits_per_pixel) {
+ case 24:
+ bitmap = TRY(Bitmap::create(BitmapFormat::BGRx8888, { m_context->header.width, m_context->header.height }));
+ break;
+
+ case 32:
+ bitmap = TRY(Bitmap::create(BitmapFormat::BGRA8888, { m_context->header.width, m_context->header.height }));
+ break;
+
+ default:
+ // FIXME: Implement other TGA bit depths
+ return Error::from_string_literal("TGAImageDecoderPlugin: Can only handle 24 and 32 bits per pixel");
+ }
+
+ // FIXME: Try to understand the Image origin (instead of X and Y origin coordinates)
+ // based on the Image descriptor, Field 5.6, bits 4 and 5.
+
+ // NOTE: If Y origin is set to a negative number, just assume the generating software
+ // meant that we start with Y origin at the top height of the picture.
+ // At least this is the observed behavior when generating some pictures in GIMP.
+ if (y_origin < 0)
+ y_origin = height;
+ if (y_origin != 0 && y_origin != height)
+ return Error::from_string_literal("TGAImageDecoderPlugin: Can only handle Y origin which is 0 or the entire height");
+ if (x_origin != 0 && x_origin != width)
+ return Error::from_string_literal("TGAImageDecoderPlugin: Can only handle X origin which is 0 or the entire width");
+
+ switch (data_type) {
+ case TGADataType::UncompressedRGB: {
+ for (int row = 0; row < height; ++row) {
+ for (int col = 0; col < width; ++col) {
+ auto pixel = m_context->reader->read_pixel(bits_per_pixel);
+ auto actual_row = row;
+ if (y_origin < height)
+ actual_row = height - 1 - row;
+ auto actual_col = col;
+ if (x_origin > width)
+ actual_col = width - 1 - col;
+ bitmap->scanline(actual_row)[actual_col] = pixel.data;
+ }
+ }
+ break;
+ }
+ case TGADataType::RunLengthEncodedRGB: {
+ size_t pixel_index = 0;
+ size_t pixel_count = height * width;
+ while (pixel_index < pixel_count) {
+ auto packet_type = m_context->reader->read_packet_type();
+ VERIFY(packet_type.pixels_count > 0);
+ TGAPixel pixel = m_context->reader->read_pixel(bits_per_pixel);
+ auto max_pixel_index = min(pixel_index + packet_type.pixels_count, pixel_count);
+ for (size_t current_pixel_index = pixel_index; current_pixel_index < max_pixel_index; ++current_pixel_index) {
+ int row = current_pixel_index / width;
+ int col = current_pixel_index % width;
+ auto actual_row = row;
+ if (y_origin < height)
+ actual_row = height - 1 - row;
+ auto actual_col = col;
+ if (x_origin > width)
+ actual_col = width - 1 - col;
+ bitmap->scanline(actual_row)[actual_col] = pixel.data;
+ if (packet_type.raw && (current_pixel_index + 1) < max_pixel_index)
+ pixel = m_context->reader->read_pixel(bits_per_pixel);
+ }
+ pixel_index += packet_type.pixels_count;
+ }
+ break;
+ }
+ default:
+ // FIXME: Implement other TGA data types
+ return Error::from_string_literal("TGAImageDecoderPlugin: Can currently only handle the UncompressedRGB or CompressedRGB data type");
+ }
+
+ m_context->bitmap = bitmap;
+ return ImageFrameDescriptor { m_context->bitmap, 0 };
+}
+
+ErrorOr<Optional<ReadonlyBytes>> TGAImageDecoderPlugin::icc_data()
+{
+ return OptionalNone {};
+}
+
+}
diff --git a/Userland/Libraries/LibGfx/ImageFormats/TGALoader.h b/Userland/Libraries/LibGfx/ImageFormats/TGALoader.h
new file mode 100644
index 0000000000..70f2719aa1
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/TGALoader.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2022, Tom Needham <06needhamt@gmail.com>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#pragma once
+
+#include <LibGfx/ImageFormats/ImageDecoder.h>
+
+namespace Gfx {
+
+struct TGALoadingContext;
+
+class TGAImageDecoderPlugin final : public ImageDecoderPlugin {
+public:
+ static ErrorOr<bool> validate_before_create(ReadonlyBytes);
+ static ErrorOr<NonnullOwnPtr<ImageDecoderPlugin>> create(ReadonlyBytes);
+
+ virtual ~TGAImageDecoderPlugin() override;
+ TGAImageDecoderPlugin(u8 const*, size_t);
+
+ virtual IntSize size() override;
+ virtual void set_volatile() override;
+ [[nodiscard]] virtual bool set_nonvolatile(bool& was_purged) override;
+ virtual bool initialize() override;
+ virtual bool is_animated() override;
+ virtual size_t loop_count() override;
+ virtual size_t frame_count() override;
+ virtual ErrorOr<ImageFrameDescriptor> frame(size_t index) override;
+ virtual ErrorOr<Optional<ReadonlyBytes>> icc_data() override;
+
+private:
+ bool decode_tga_header();
+ OwnPtr<TGALoadingContext> m_context;
+};
+
+}
diff --git a/Userland/Libraries/LibGfx/ImageFormats/WebPLoader.cpp b/Userland/Libraries/LibGfx/ImageFormats/WebPLoader.cpp
new file mode 100644
index 0000000000..3106e66088
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/WebPLoader.cpp
@@ -0,0 +1,612 @@
+/*
+ * Copyright (c) 2023, Nico Weber <thakis@chromium.org>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <AK/Debug.h>
+#include <AK/Endian.h>
+#include <AK/Format.h>
+#include <AK/Vector.h>
+#include <LibGfx/ImageFormats/WebPLoader.h>
+
+// Overview: https://developers.google.com/speed/webp/docs/compression
+// Container: https://developers.google.com/speed/webp/docs/riff_container
+// Lossless format: https://developers.google.com/speed/webp/docs/webp_lossless_bitstream_specification
+// Lossy format: https://datatracker.ietf.org/doc/html/rfc6386
+
+namespace Gfx {
+
+namespace {
+
+struct FourCC {
+ constexpr FourCC(char const* name)
+ {
+ cc[0] = name[0];
+ cc[1] = name[1];
+ cc[2] = name[2];
+ cc[3] = name[3];
+ }
+
+ bool operator==(FourCC const&) const = default;
+ bool operator!=(FourCC const&) const = default;
+
+ char cc[4];
+};
+
+// https://developers.google.com/speed/webp/docs/riff_container#webp_file_header
+struct WebPFileHeader {
+ FourCC riff;
+ LittleEndian<u32> file_size;
+ FourCC webp;
+};
+static_assert(AssertSize<WebPFileHeader, 12>());
+
+struct ChunkHeader {
+ FourCC chunk_type;
+ LittleEndian<u32> chunk_size;
+};
+static_assert(AssertSize<ChunkHeader, 8>());
+
+struct Chunk {
+ FourCC type;
+ ReadonlyBytes data;
+};
+
+struct VP8Header {
+ u8 version;
+ bool show_frame;
+ u32 size_of_first_partition;
+ u32 width;
+ u8 horizontal_scale;
+ u32 height;
+ u8 vertical_scale;
+};
+
+struct VP8LHeader {
+ u16 width;
+ u16 height;
+ bool is_alpha_used;
+};
+
+struct VP8XHeader {
+ bool has_icc;
+ bool has_alpha;
+ bool has_exif;
+ bool has_xmp;
+ bool has_animation;
+ u32 width;
+ u32 height;
+};
+
+struct ANIMChunk {
+ u32 background_color;
+ u16 loop_count;
+};
+
+}
+
+struct WebPLoadingContext {
+ enum State {
+ NotDecoded = 0,
+ Error,
+ HeaderDecoded,
+ FirstChunkRead,
+ FirstChunkDecoded,
+ ChunksDecoded,
+ BitmapDecoded,
+ };
+ State state { State::NotDecoded };
+ ReadonlyBytes data;
+
+ ReadonlyBytes chunks_cursor;
+
+ Optional<IntSize> size;
+
+ RefPtr<Gfx::Bitmap> bitmap;
+
+ // Either 'VP8 ' (simple lossy file), 'VP8L' (simple lossless file), or 'VP8X' (extended file).
+ Optional<Chunk> first_chunk;
+ union {
+ VP8Header vp8_header;
+ VP8LHeader vp8l_header;
+ VP8XHeader vp8x_header;
+ };
+
+ // If first_chunk is not a VP8X chunk, then only image_data_chunk is set and all the other Chunks are not set.
+
+ // "For a still image, the image data consists of a single frame, which is made up of:
+ // An optional alpha subchunk.
+ // A bitstream subchunk."
+ Optional<Chunk> alpha_chunk; // 'ALPH'
+ Optional<Chunk> image_data_chunk; // Either 'VP8 ' or 'VP8L'.
+
+ Optional<Chunk> animation_header_chunk; // 'ANIM'
+ Vector<Chunk> animation_frame_chunks; // 'ANMF'
+
+ Optional<Chunk> iccp_chunk; // 'ICCP'
+ Optional<Chunk> exif_chunk; // 'EXIF'
+ Optional<Chunk> xmp_chunk; // 'XMP '
+
+ template<size_t N>
+ [[nodiscard]] class Error error(char const (&string_literal)[N])
+ {
+ state = WebPLoadingContext::State::Error;
+ return Error::from_string_literal(string_literal);
+ }
+};
+
+// https://developers.google.com/speed/webp/docs/riff_container#webp_file_header
+static ErrorOr<void> decode_webp_header(WebPLoadingContext& context)
+{
+ if (context.state >= WebPLoadingContext::HeaderDecoded)
+ return {};
+
+ if (context.data.size() < sizeof(WebPFileHeader))
+ return context.error("Missing WebP header");
+
+ auto& header = *bit_cast<WebPFileHeader const*>(context.data.data());
+ if (header.riff != FourCC("RIFF") || header.webp != FourCC("WEBP"))
+ return context.error("Invalid WebP header");
+
+ // "File Size: [...] The size of the file in bytes starting at offset 8. The maximum value of this field is 2^32 minus 10 bytes."
+ u32 const maximum_webp_file_size = 0xffff'ffff - 9;
+ if (header.file_size > maximum_webp_file_size)
+ return context.error("WebP header file size over maximum");
+
+ // "The file size in the header is the total size of the chunks that follow plus 4 bytes for the 'WEBP' FourCC.
+ // The file SHOULD NOT contain any data after the data specified by File Size.
+ // Readers MAY parse such files, ignoring the trailing data."
+ if (context.data.size() - 8 < header.file_size)
+ return context.error("WebP data too small for size in header");
+ if (context.data.size() - 8 > header.file_size) {
+ dbgln_if(WEBP_DEBUG, "WebP has {} bytes of data, but header needs only {}. Trimming.", context.data.size(), header.file_size + 8);
+ context.data = context.data.trim(header.file_size + 8);
+ }
+
+ context.state = WebPLoadingContext::HeaderDecoded;
+ return {};
+}
+
+// https://developers.google.com/speed/webp/docs/riff_container#riff_file_format
+static ErrorOr<Chunk> decode_webp_chunk_header(WebPLoadingContext& context, ReadonlyBytes chunks)
+{
+ if (chunks.size() < sizeof(ChunkHeader))
+ return context.error("Not enough data for WebP chunk header");
+
+ auto const& header = *bit_cast<ChunkHeader const*>(chunks.data());
+ dbgln_if(WEBP_DEBUG, "chunk {} size {}", header.chunk_type, header.chunk_size);
+
+ if (chunks.size() < sizeof(ChunkHeader) + header.chunk_size)
+ return context.error("Not enough data for WebP chunk");
+
+ return Chunk { header.chunk_type, { chunks.data() + sizeof(ChunkHeader), header.chunk_size } };
+}
+
+// https://developers.google.com/speed/webp/docs/riff_container#riff_file_format
+static ErrorOr<Chunk> decode_webp_advance_chunk(WebPLoadingContext& context, ReadonlyBytes& chunks)
+{
+ auto chunk = TRY(decode_webp_chunk_header(context, chunks));
+
+ // "Chunk Size: 32 bits (uint32)
+ // The size of the chunk in bytes, not including this field, the chunk identifier or padding.
+ // Chunk Payload: Chunk Size bytes
+ // The data payload. If Chunk Size is odd, a single padding byte -- that MUST be 0 to conform with RIFF -- is added."
+ chunks = chunks.slice(sizeof(ChunkHeader) + chunk.data.size());
+
+ if (chunk.data.size() % 2 != 0) {
+ if (chunks.is_empty())
+ return context.error("Missing data for padding byte");
+ if (*chunks.data() != 0)
+ return context.error("Padding byte is not 0");
+ chunks = chunks.slice(1);
+ }
+
+ return chunk;
+}
+
+// https://developers.google.com/speed/webp/docs/riff_container#simple_file_format_lossy
+// https://datatracker.ietf.org/doc/html/rfc6386#section-19 "Annex A: Bitstream Syntax"
+static ErrorOr<VP8Header> decode_webp_chunk_VP8_header(WebPLoadingContext& context, Chunk const& vp8_chunk)
+{
+ VERIFY(vp8_chunk.type == FourCC("VP8 "));
+
+ if (vp8_chunk.data.size() < 10)
+ return context.error("WebPImageDecoderPlugin: 'VP8 ' chunk too small");
+
+ // FIXME: Eventually, this should probably call into LibVideo/VP8,
+ // and image decoders should move into LibImageDecoders which depends on both LibGfx and LibVideo.
+ // (LibVideo depends on LibGfx, so LibGfx can't depend on LibVideo itself.)
+
+ // https://datatracker.ietf.org/doc/html/rfc6386#section-4 "Overview of Compressed Data Format"
+ // "The decoder is simply presented with a sequence of compressed frames [...]
+ // The first frame presented to the decompressor is [...] a key frame. [...]
+ // [E]very compressed frame has three or more pieces. It begins with an uncompressed data chunk comprising 10 bytes in the case of key frames
+
+ u8 const* data = vp8_chunk.data.data();
+
+ // https://datatracker.ietf.org/doc/html/rfc6386#section-9.1 "Uncompressed Data Chunk"
+ u32 frame_tag = data[0] | (data[1] << 8) | (data[2] << 16);
+ bool is_key_frame = (frame_tag & 1) == 0; // https://www.rfc-editor.org/errata/eid5534
+ u8 version = (frame_tag & 0xe) >> 1;
+ bool show_frame = (frame_tag & 0x10) != 0;
+ u32 size_of_first_partition = frame_tag >> 5;
+
+ if (!is_key_frame)
+ return context.error("WebPImageDecoderPlugin: 'VP8 ' chunk not a key frame");
+
+ // FIXME: !show_frame does not make sense in a webp file either, probably?
+
+ u32 start_code = data[3] | (data[4] << 8) | (data[5] << 16);
+ if (start_code != 0x2a019d) // https://www.rfc-editor.org/errata/eid7370
+ return context.error("WebPImageDecoderPlugin: 'VP8 ' chunk invalid start_code");
+
+ // "The scaling specifications for each dimension are encoded as follows.
+ // 0 | No upscaling (the most common case).
+ // 1 | Upscale by 5/4.
+ // 2 | Upscale by 5/3.
+ // 3 | Upscale by 2."
+ // This is a display-time operation and doesn't affect decoding.
+ u16 width_and_horizontal_scale = data[6] | (data[7] << 8);
+ u16 width = width_and_horizontal_scale & 0x3fff;
+ u8 horizontal_scale = width_and_horizontal_scale >> 14;
+
+ u16 heigth_and_vertical_scale = data[8] | (data[9] << 8);
+ u16 height = heigth_and_vertical_scale & 0x3fff;
+ u8 vertical_scale = heigth_and_vertical_scale >> 14;
+
+ dbgln_if(WEBP_DEBUG, "version {}, show_frame {}, size_of_first_partition {}, width {}, horizontal_scale {}, height {}, vertical_scale {}",
+ version, show_frame, size_of_first_partition, width, horizontal_scale, height, vertical_scale);
+
+ return VP8Header { version, show_frame, size_of_first_partition, width, horizontal_scale, height, vertical_scale };
+}
+
+// https://developers.google.com/speed/webp/docs/riff_container#simple_file_format_lossless
+// https://developers.google.com/speed/webp/docs/webp_lossless_bitstream_specification#7_overall_structure_of_the_format
+static ErrorOr<VP8LHeader> decode_webp_chunk_VP8L_header(WebPLoadingContext& context, Chunk const& vp8l_chunk)
+{
+ VERIFY(vp8l_chunk.type == FourCC("VP8L"));
+
+ // https://developers.google.com/speed/webp/docs/webp_lossless_bitstream_specification#3_riff_header
+ if (vp8l_chunk.data.size() < 5)
+ return context.error("WebPImageDecoderPlugin: VP8L chunk too small");
+
+ u8 const* data = vp8l_chunk.data.data();
+ u8 signature = data[0];
+ if (signature != 0x2f)
+ return context.error("WebPImageDecoderPlugin: VP8L chunk invalid signature");
+
+ // 14 bits width-1, 14 bits height-1, 1 bit alpha hint, 3 bit version_number.
+ u16 width = (data[1] | ((data[2] & 0x3f) << 8)) + 1;
+ u16 height = ((data[2] >> 6) | (data[3] << 2) | ((data[4] & 0xf) << 12)) + 1;
+ bool is_alpha_used = (data[4] & 0x10) != 0;
+ u8 version_number = (data[4] & 0xe0) >> 5;
+
+ dbgln_if(WEBP_DEBUG, "width {}, height {}, is_alpha_used {}, version_number {}",
+ width, height, is_alpha_used, version_number);
+
+ // "The version_number is a 3 bit code that must be set to 0. Any other value should be treated as an error. [AMENDED]"
+ if (version_number != 0)
+ return context.error("WebPImageDecoderPlugin: VP8L chunk invalid version_number");
+
+ return VP8LHeader { width, height, is_alpha_used };
+}
+
+static ErrorOr<VP8XHeader> decode_webp_chunk_VP8X(WebPLoadingContext& context, Chunk const& vp8x_chunk)
+{
+ VERIFY(vp8x_chunk.type == FourCC("VP8X"));
+
+ // The VP8X chunk is documented at "Extended WebP file header:" at the end of
+ // https://developers.google.com/speed/webp/docs/riff_container#extended_file_format
+ if (vp8x_chunk.data.size() < 10)
+ return context.error("WebPImageDecoderPlugin: VP8X chunk too small");
+
+ u8 const* data = vp8x_chunk.data.data();
+
+ // 1 byte flags
+ // "Reserved (Rsv): 2 bits MUST be 0. Readers MUST ignore this field.
+ // ICC profile (I): 1 bit Set if the file contains an ICC profile.
+ // Alpha (L): 1 bit Set if any of the frames of the image contain transparency information ("alpha").
+ // Exif metadata (E): 1 bit Set if the file contains Exif metadata.
+ // XMP metadata (X): 1 bit Set if the file contains XMP metadata.
+ // Animation (A): 1 bit Set if this is an animated image. Data in 'ANIM' and 'ANMF' chunks should be used to control the animation.
+ // Reserved (R): 1 bit MUST be 0. Readers MUST ignore this field."
+ u8 flags = data[0];
+ bool has_icc = flags & 0x20;
+ bool has_alpha = flags & 0x10;
+ bool has_exif = flags & 0x8;
+ bool has_xmp = flags & 0x4;
+ bool has_animation = flags & 0x2;
+
+ // 3 bytes reserved
+ // 3 bytes width minus one
+ u32 width = (data[4] | (data[5] << 8) | (data[6] << 16)) + 1;
+
+ // 3 bytes height minus one
+ u32 height = (data[7] | (data[8] << 8) | (data[9] << 16)) + 1;
+
+ dbgln_if(WEBP_DEBUG, "flags 0x{:x} --{}{}{}{}{}{}, width {}, height {}",
+ flags,
+ has_icc ? " icc" : "",
+ has_alpha ? " alpha" : "",
+ has_exif ? " exif" : "",
+ has_xmp ? " xmp" : "",
+ has_animation ? " anim" : "",
+ (flags & 0x3e) == 0 ? " none" : "",
+ width, height);
+
+ return VP8XHeader { has_icc, has_alpha, has_exif, has_xmp, has_animation, width, height };
+}
+
+// https://developers.google.com/speed/webp/docs/riff_container#animation
+static ErrorOr<ANIMChunk> decode_webp_chunk_ANIM(WebPLoadingContext& context, Chunk const& anim_chunk)
+{
+ VERIFY(anim_chunk.type == FourCC("ANIM"));
+ if (anim_chunk.data.size() < 6)
+ return context.error("WebPImageDecoderPlugin: ANIM chunk too small");
+
+ u8 const* data = anim_chunk.data.data();
+ u32 background_color = (u32)data[0] | ((u32)data[1] << 8) | ((u32)data[2] << 16) | ((u32)data[3] << 24);
+ u16 loop_count = data[4] | (data[5] << 8);
+
+ return ANIMChunk { background_color, loop_count };
+}
+
+// https://developers.google.com/speed/webp/docs/riff_container#extended_file_format
+static ErrorOr<void> decode_webp_extended(WebPLoadingContext& context, ReadonlyBytes chunks)
+{
+ VERIFY(context.first_chunk->type == FourCC("VP8X"));
+
+ // FIXME: This isn't quite to spec, which says
+ // "All chunks SHOULD be placed in the same order as listed above.
+ // If a chunk appears in the wrong place, the file is invalid, but readers MAY parse the file, ignoring the chunks that are out of order."
+ auto store = [](auto& field, Chunk const& chunk) {
+ if (!field.has_value())
+ field = chunk;
+ };
+ while (!chunks.is_empty()) {
+ auto chunk = TRY(decode_webp_advance_chunk(context, chunks));
+
+ if (chunk.type == FourCC("ICCP"))
+ store(context.iccp_chunk, chunk);
+ else if (chunk.type == FourCC("ALPH"))
+ store(context.alpha_chunk, chunk);
+ else if (chunk.type == FourCC("ANIM"))
+ store(context.animation_header_chunk, chunk);
+ else if (chunk.type == FourCC("ANMF"))
+ TRY(context.animation_frame_chunks.try_append(chunk));
+ else if (chunk.type == FourCC("EXIF"))
+ store(context.exif_chunk, chunk);
+ else if (chunk.type == FourCC("XMP "))
+ store(context.xmp_chunk, chunk);
+ else if (chunk.type == FourCC("VP8 ") || chunk.type == FourCC("VP8L"))
+ store(context.image_data_chunk, chunk);
+ }
+
+ // Validate chunks.
+
+ // https://developers.google.com/speed/webp/docs/riff_container#animation
+ // "ANIM Chunk: [...] This chunk MUST appear if the Animation flag in the VP8X chunk is set. If the Animation flag is not set and this chunk is present, it MUST be ignored."
+ if (context.vp8x_header.has_animation && !context.animation_header_chunk.has_value())
+ return context.error("WebPImageDecoderPlugin: Header claims animation, but no ANIM chunk");
+ if (!context.vp8x_header.has_animation && context.animation_header_chunk.has_value()) {
+ dbgln_if(WEBP_DEBUG, "WebPImageDecoderPlugin: Header claims no animation, but ANIM chunk present. Ignoring ANIM chunk.");
+ context.animation_header_chunk.clear();
+ }
+
+ // "ANMF Chunk: [...] If the Animation flag is not set, then this chunk SHOULD NOT be present."
+ if (!context.vp8x_header.has_animation && context.animation_header_chunk.has_value()) {
+ dbgln_if(WEBP_DEBUG, "WebPImageDecoderPlugin: Header claims no animation, but ANMF chunks present. Ignoring ANMF chunks.");
+ context.animation_frame_chunks.clear();
+ }
+
+ // https://developers.google.com/speed/webp/docs/riff_container#alpha
+ // "A frame containing a 'VP8L' chunk SHOULD NOT contain this chunk."
+ // FIXME: Also check in ANMF chunks.
+ if (context.alpha_chunk.has_value() && context.image_data_chunk.has_value() && context.image_data_chunk->type == FourCC("VP8L")) {
+ dbgln_if(WEBP_DEBUG, "WebPImageDecoderPlugin: VP8L frames should not have ALPH chunks. Ignoring ALPH chunk.");
+ context.alpha_chunk.clear();
+ }
+
+ // https://developers.google.com/speed/webp/docs/riff_container#color_profile
+ // "This chunk MUST appear before the image data."
+ // FIXME: Doesn't check animated files.
+ if (context.iccp_chunk.has_value() && context.image_data_chunk.has_value() && context.iccp_chunk->data.data() > context.image_data_chunk->data.data())
+ return context.error("WebPImageDecoderPlugin: ICCP chunk is after image data");
+
+ context.state = WebPLoadingContext::State::ChunksDecoded;
+ return {};
+}
+
+static ErrorOr<void> read_webp_first_chunk(WebPLoadingContext& context)
+{
+ if (context.state >= WebPLoadingContext::State::FirstChunkRead)
+ return {};
+
+ if (context.state < WebPLoadingContext::HeaderDecoded)
+ TRY(decode_webp_header(context));
+
+ context.chunks_cursor = context.data.slice(sizeof(WebPFileHeader));
+ auto first_chunk = TRY(decode_webp_advance_chunk(context, context.chunks_cursor));
+
+ if (first_chunk.type != FourCC("VP8 ") && first_chunk.type != FourCC("VP8L") && first_chunk.type != FourCC("VP8X"))
+ return context.error("WebPImageDecoderPlugin: Invalid first chunk type");
+
+ context.first_chunk = first_chunk;
+ context.state = WebPLoadingContext::State::FirstChunkRead;
+
+ if (first_chunk.type == FourCC("VP8 ") || first_chunk.type == FourCC("VP8L"))
+ context.image_data_chunk = first_chunk;
+
+ return {};
+}
+
+static ErrorOr<void> decode_webp_first_chunk(WebPLoadingContext& context)
+{
+ if (context.state >= WebPLoadingContext::State::FirstChunkDecoded)
+ return {};
+
+ if (context.state < WebPLoadingContext::FirstChunkRead)
+ TRY(read_webp_first_chunk(context));
+
+ if (context.first_chunk->type == FourCC("VP8 ")) {
+ context.vp8_header = TRY(decode_webp_chunk_VP8_header(context, context.first_chunk.value()));
+ context.size = IntSize { context.vp8_header.width, context.vp8_header.height };
+ context.state = WebPLoadingContext::State::FirstChunkDecoded;
+ return {};
+ }
+ if (context.first_chunk->type == FourCC("VP8L")) {
+ context.vp8l_header = TRY(decode_webp_chunk_VP8L_header(context, context.first_chunk.value()));
+ context.size = IntSize { context.vp8l_header.width, context.vp8l_header.height };
+ context.state = WebPLoadingContext::State::FirstChunkDecoded;
+ return {};
+ }
+ VERIFY(context.first_chunk->type == FourCC("VP8X"));
+ context.vp8x_header = TRY(decode_webp_chunk_VP8X(context, context.first_chunk.value()));
+ context.size = IntSize { context.vp8x_header.width, context.vp8x_header.height };
+ context.state = WebPLoadingContext::State::FirstChunkDecoded;
+ return {};
+}
+
+static ErrorOr<void> decode_webp_chunks(WebPLoadingContext& context)
+{
+ if (context.state >= WebPLoadingContext::State::ChunksDecoded)
+ return {};
+
+ if (context.state < WebPLoadingContext::FirstChunkDecoded)
+ TRY(decode_webp_first_chunk(context));
+
+ if (context.first_chunk->type == FourCC("VP8X"))
+ return decode_webp_extended(context, context.chunks_cursor);
+
+ context.state = WebPLoadingContext::State::ChunksDecoded;
+ return {};
+}
+
+WebPImageDecoderPlugin::WebPImageDecoderPlugin(ReadonlyBytes data, OwnPtr<WebPLoadingContext> context)
+ : m_context(move(context))
+{
+ m_context->data = data;
+}
+
+WebPImageDecoderPlugin::~WebPImageDecoderPlugin() = default;
+
+IntSize WebPImageDecoderPlugin::size()
+{
+ if (m_context->state == WebPLoadingContext::State::Error)
+ return {};
+
+ if (m_context->state < WebPLoadingContext::State::FirstChunkDecoded) {
+ if (decode_webp_first_chunk(*m_context).is_error())
+ return {};
+ }
+
+ return m_context->size.value();
+}
+
+void WebPImageDecoderPlugin::set_volatile()
+{
+ if (m_context->bitmap)
+ m_context->bitmap->set_volatile();
+}
+
+bool WebPImageDecoderPlugin::set_nonvolatile(bool& was_purged)
+{
+ if (!m_context->bitmap)
+ return false;
+ return m_context->bitmap->set_nonvolatile(was_purged);
+}
+
+bool WebPImageDecoderPlugin::initialize()
+{
+ return !decode_webp_header(*m_context).is_error();
+}
+
+bool WebPImageDecoderPlugin::sniff(ReadonlyBytes data)
+{
+ WebPLoadingContext context;
+ context.data = data;
+ return !decode_webp_header(context).is_error();
+}
+
+ErrorOr<NonnullOwnPtr<ImageDecoderPlugin>> WebPImageDecoderPlugin::create(ReadonlyBytes data)
+{
+ auto context = TRY(try_make<WebPLoadingContext>());
+ return adopt_nonnull_own_or_enomem(new (nothrow) WebPImageDecoderPlugin(data, move(context)));
+}
+
+bool WebPImageDecoderPlugin::is_animated()
+{
+ if (m_context->state == WebPLoadingContext::State::Error)
+ return false;
+
+ if (m_context->state < WebPLoadingContext::State::FirstChunkDecoded) {
+ if (decode_webp_first_chunk(*m_context).is_error())
+ return false;
+ }
+
+ return m_context->first_chunk->type == FourCC("VP8X") && m_context->vp8x_header.has_animation;
+}
+
+size_t WebPImageDecoderPlugin::loop_count()
+{
+ if (!is_animated())
+ return 0;
+
+ if (m_context->state < WebPLoadingContext::State::ChunksDecoded) {
+ if (decode_webp_chunks(*m_context).is_error())
+ return 0;
+ }
+
+ auto anim_or_error = decode_webp_chunk_ANIM(*m_context, m_context->animation_header_chunk.value());
+ if (decode_webp_chunks(*m_context).is_error())
+ return 0;
+
+ return anim_or_error.value().loop_count;
+}
+
+size_t WebPImageDecoderPlugin::frame_count()
+{
+ if (!is_animated())
+ return 1;
+
+ if (m_context->state < WebPLoadingContext::State::ChunksDecoded) {
+ if (decode_webp_chunks(*m_context).is_error())
+ return 1;
+ }
+
+ return m_context->animation_frame_chunks.size();
+}
+
+ErrorOr<ImageFrameDescriptor> WebPImageDecoderPlugin::frame(size_t index)
+{
+ if (index >= frame_count())
+ return Error::from_string_literal("WebPImageDecoderPlugin: Invalid frame index");
+
+ return Error::from_string_literal("WebPImageDecoderPlugin: decoding not yet implemented");
+}
+
+ErrorOr<Optional<ReadonlyBytes>> WebPImageDecoderPlugin::icc_data()
+{
+ TRY(decode_webp_chunks(*m_context));
+
+ // FIXME: "If this chunk is not present, sRGB SHOULD be assumed."
+
+ return m_context->iccp_chunk.map([](auto iccp_chunk) { return iccp_chunk.data; });
+}
+
+}
+
+template<>
+struct AK::Formatter<Gfx::FourCC> : StandardFormatter {
+ ErrorOr<void> format(FormatBuilder& builder, Gfx::FourCC const& four_cc)
+ {
+ TRY(builder.put_padding('\'', 1));
+ TRY(builder.put_padding(four_cc.cc[0], 1));
+ TRY(builder.put_padding(four_cc.cc[1], 1));
+ TRY(builder.put_padding(four_cc.cc[2], 1));
+ TRY(builder.put_padding(four_cc.cc[3], 1));
+ TRY(builder.put_padding('\'', 1));
+ return {};
+ }
+};
diff --git a/Userland/Libraries/LibGfx/ImageFormats/WebPLoader.h b/Userland/Libraries/LibGfx/ImageFormats/WebPLoader.h
new file mode 100644
index 0000000000..f7f07a458c
--- /dev/null
+++ b/Userland/Libraries/LibGfx/ImageFormats/WebPLoader.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2023, Nico Weber <thakis@chromium.org>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#pragma once
+
+#include <LibGfx/ImageFormats/ImageDecoder.h>
+
+namespace Gfx {
+
+struct WebPLoadingContext;
+
+class WebPImageDecoderPlugin final : public ImageDecoderPlugin {
+public:
+ static bool sniff(ReadonlyBytes);
+ static ErrorOr<NonnullOwnPtr<ImageDecoderPlugin>> create(ReadonlyBytes);
+
+ virtual ~WebPImageDecoderPlugin() override;
+
+ virtual IntSize size() override;
+ virtual void set_volatile() override;
+ [[nodiscard]] virtual bool set_nonvolatile(bool& was_purged) override;
+ virtual bool initialize() override;
+ virtual bool is_animated() override;
+ virtual size_t loop_count() override;
+ virtual size_t frame_count() override;
+ virtual ErrorOr<ImageFrameDescriptor> frame(size_t index) override;
+ virtual ErrorOr<Optional<ReadonlyBytes>> icc_data() override;
+
+private:
+ WebPImageDecoderPlugin(ReadonlyBytes, OwnPtr<WebPLoadingContext>);
+
+ OwnPtr<WebPLoadingContext> m_context;
+};
+
+}