summaryrefslogtreecommitdiff
path: root/Userland/Libraries/LibGL/Image.h
diff options
context:
space:
mode:
authorJelle Raaijmakers <jelle@gmta.nl>2022-08-24 23:47:49 +0200
committerAndreas Kling <kling@serenityos.org>2022-08-27 12:28:05 +0200
commiteb7c3d16fbfd805f9fbb3b819a661db10088fb56 (patch)
tree51e65bff9fead51d7c8f367d5e522d2f24deec31 /Userland/Libraries/LibGL/Image.h
parentd7cfdfe6335de83f25d205cd9863fc18e2854763 (diff)
downloadserenity-eb7c3d16fbfd805f9fbb3b819a661db10088fb56.zip
LibGL+LibGPU+LibSoftGPU: Implement flexible pixel format conversion
A GPU (driver) is now responsible for reading and writing pixels from and to user data. The client (LibGL) is responsible for specifying how the user data must be interpreted or written to. This allows us to centralize all pixel format conversion in one class, `LibSoftGPU::PixelConverter`. For both the input and output image, it takes a specification containing the image dimensions, the pixel type and the selection (basically a clipping rect), and converts the pixels from the input image to the output image. Effectively this means we now support almost all OpenGL 1.5 formats, and all custom logic has disappeared from: - `glDrawPixels` - `glReadPixels` - `glTexImage2D` - `glTexSubImage2D` The new logic is still unoptimized, but on my machine I experienced no noticeable slowdown. :^)
Diffstat (limited to 'Userland/Libraries/LibGL/Image.h')
-rw-r--r--Userland/Libraries/LibGL/Image.h20
1 files changed, 20 insertions, 0 deletions
diff --git a/Userland/Libraries/LibGL/Image.h b/Userland/Libraries/LibGL/Image.h
new file mode 100644
index 0000000000..aed64d30c0
--- /dev/null
+++ b/Userland/Libraries/LibGL/Image.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2022, Jelle Raaijmakers <jelle@gmta.nl>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#pragma once
+
+#include <AK/Error.h>
+#include <LibGL/GL/gl.h>
+#include <LibGL/GLContext.h>
+#include <LibGPU/ImageDataLayout.h>
+#include <LibGPU/ImageFormat.h>
+
+namespace GL {
+
+GPU::PixelType get_format_specification(GLenum format, GLenum type);
+ErrorOr<GPU::PixelType> get_validated_pixel_type(GLenum target, GLenum internal_format, GLenum format, GLenum type);
+
+}