summaryrefslogtreecommitdiff
path: root/Userland/Libraries/LibGL/ContextParameter.cpp
diff options
context:
space:
mode:
authorJelle Raaijmakers <jelle@gmta.nl>2022-08-24 23:47:49 +0200
committerAndreas Kling <kling@serenityos.org>2022-08-27 12:28:05 +0200
commiteb7c3d16fbfd805f9fbb3b819a661db10088fb56 (patch)
tree51e65bff9fead51d7c8f367d5e522d2f24deec31 /Userland/Libraries/LibGL/ContextParameter.cpp
parentd7cfdfe6335de83f25d205cd9863fc18e2854763 (diff)
downloadserenity-eb7c3d16fbfd805f9fbb3b819a661db10088fb56.zip
LibGL+LibGPU+LibSoftGPU: Implement flexible pixel format conversion
A GPU (driver) is now responsible for reading and writing pixels from and to user data. The client (LibGL) is responsible for specifying how the user data must be interpreted or written to. This allows us to centralize all pixel format conversion in one class, `LibSoftGPU::PixelConverter`. For both the input and output image, it takes a specification containing the image dimensions, the pixel type and the selection (basically a clipping rect), and converts the pixels from the input image to the output image. Effectively this means we now support almost all OpenGL 1.5 formats, and all custom logic has disappeared from: - `glDrawPixels` - `glReadPixels` - `glTexImage2D` - `glTexSubImage2D` The new logic is still unoptimized, but on my machine I experienced no noticeable slowdown. :^)
Diffstat (limited to 'Userland/Libraries/LibGL/ContextParameter.cpp')
-rw-r--r--Userland/Libraries/LibGL/ContextParameter.cpp24
1 files changed, 24 insertions, 0 deletions
diff --git a/Userland/Libraries/LibGL/ContextParameter.cpp b/Userland/Libraries/LibGL/ContextParameter.cpp
index 83a0326f29..c35048c7bd 100644
--- a/Userland/Libraries/LibGL/ContextParameter.cpp
+++ b/Userland/Libraries/LibGL/ContextParameter.cpp
@@ -584,4 +584,28 @@ GLboolean GLContext::gl_is_enabled(GLenum capability)
return parameter.value.boolean_value;
}
+GPU::PackingSpecification GLContext::get_packing_specification(PackingType packing_type)
+{
+ // Make use of the fact that the GL_PACK_* and GL_UNPACK_* enum constants are in the exact same order
+ auto const offset = (packing_type == PackingType::Unpack) ? 0 : (GL_PACK_SWAP_BYTES - GL_UNPACK_SWAP_BYTES);
+ auto get_packing_value = [&](GLenum packing_parameter) -> GLint {
+ GLint value;
+ gl_get_integerv(packing_parameter + offset, &value);
+ return value;
+ };
+
+ // FIXME: add support for GL_UNPACK_SKIP_PIXELS, GL_UNPACK_SKIP_ROWS and GL_UNPACK_LSB_FIRST
+ GLint byte_alignment { get_packing_value(GL_UNPACK_ALIGNMENT) };
+ GLint swap_bytes { get_packing_value(GL_UNPACK_SWAP_BYTES) };
+ GLint depth_stride { get_packing_value(GL_UNPACK_IMAGE_HEIGHT) };
+ GLint row_stride { get_packing_value(GL_UNPACK_ROW_LENGTH) };
+
+ return {
+ .depth_stride = static_cast<u32>(depth_stride),
+ .row_stride = static_cast<u32>(row_stride),
+ .byte_alignment = static_cast<u8>(byte_alignment),
+ .component_bytes_order = swap_bytes == GL_TRUE ? GPU::ComponentBytesOrder::Reversed : GPU::ComponentBytesOrder::Normal,
+ };
+}
+
}