diff options
author | Jelle Raaijmakers <jelle@gmta.nl> | 2022-09-04 21:38:39 +0200 |
---|---|---|
committer | Linus Groh <mail@linusgroh.de> | 2022-09-11 22:37:07 +0100 |
commit | dda5987684227e31e1d7b2fca749d43f734bfc47 (patch) | |
tree | 0815e577932a70dff260f46adcfc8586314d1850 /Userland/Libraries/LibSoftGPU/Device.cpp | |
parent | 44953a430159117fcfbbee6f3e47bfac89f5e215 (diff) | |
download | serenity-dda5987684227e31e1d7b2fca749d43f734bfc47.zip |
LibGL+LibGPU+LibSoftGPU: Remove concept of `layer` in favor of `depth`
Looking at how Khronos defines layers:
https://www.khronos.org/opengl/wiki/Array_Texture
We both have 3D textures and layers of 2D textures, which can both be
encoded in our existing `Typed3DBuffer` as depth. Since we support
depth already in the GPU API, remove layer everywhere.
Also pass in `Texture2D::LOG2_MAX_TEXTURE_SIZE` as the maximum number
of mipmap levels, so we do not allocate 999 levels on each Image
instantiation.
Diffstat (limited to 'Userland/Libraries/LibSoftGPU/Device.cpp')
-rw-r--r-- | Userland/Libraries/LibSoftGPU/Device.cpp | 11 |
1 files changed, 5 insertions, 6 deletions
diff --git a/Userland/Libraries/LibSoftGPU/Device.cpp b/Userland/Libraries/LibSoftGPU/Device.cpp index 0033ba6d87..d0a44f9bad 100644 --- a/Userland/Libraries/LibSoftGPU/Device.cpp +++ b/Userland/Libraries/LibSoftGPU/Device.cpp @@ -1473,7 +1473,7 @@ void Device::blit_from_color_buffer(NonnullRefPtr<GPU::Image> image, u32 level, auto const& softgpu_image = reinterpret_cast<Image*>(image.ptr()); auto output_layout = softgpu_image->image_data_layout(level, output_offset); - auto* output_data = softgpu_image->texel_pointer(0, level, 0, 0, 0); + auto* output_data = softgpu_image->texel_pointer(level, 0, 0, 0); PixelConverter converter { input_layout, output_layout }; auto conversion_result = converter.convert(input_data, output_data, {}); @@ -1512,7 +1512,7 @@ void Device::blit_from_depth_buffer(NonnullRefPtr<GPU::Image> image, u32 level, auto const& softgpu_image = reinterpret_cast<Image*>(image.ptr()); auto output_layout = softgpu_image->image_data_layout(level, output_offset); - auto* output_data = softgpu_image->texel_pointer(0, level, 0, 0, 0); + auto* output_data = softgpu_image->texel_pointer(level, 0, 0, 0); PixelConverter converter { input_layout, output_layout }; auto conversion_result = converter.convert(input_data, output_data, {}); @@ -1629,15 +1629,14 @@ void Device::set_light_model_params(GPU::LightModelParameters const& lighting_mo m_lighting_model = lighting_model; } -NonnullRefPtr<GPU::Image> Device::create_image(GPU::PixelFormat const& pixel_format, u32 width, u32 height, u32 depth, u32 levels, u32 layers) +NonnullRefPtr<GPU::Image> Device::create_image(GPU::PixelFormat const& pixel_format, u32 width, u32 height, u32 depth, u32 max_levels) { VERIFY(width > 0); VERIFY(height > 0); VERIFY(depth > 0); - VERIFY(levels > 0); - VERIFY(layers > 0); + VERIFY(max_levels > 0); - return adopt_ref(*new Image(this, pixel_format, width, height, depth, levels, layers)); + return adopt_ref(*new Image(this, pixel_format, width, height, depth, max_levels)); } void Device::set_sampler_config(unsigned sampler, GPU::SamplerConfig const& config) |