diff options
-rw-r--r-- | Userland/Libraries/LibVideo/VP9/Context.h | 62 | ||||
-rw-r--r-- | Userland/Libraries/LibVideo/VP9/Decoder.cpp | 13 | ||||
-rw-r--r-- | Userland/Libraries/LibVideo/VP9/Parser.cpp | 18 | ||||
-rw-r--r-- | Userland/Libraries/LibVideo/VP9/Parser.h | 2 |
4 files changed, 78 insertions, 17 deletions
diff --git a/Userland/Libraries/LibVideo/VP9/Context.h b/Userland/Libraries/LibVideo/VP9/Context.h index 38548485b0..4168977690 100644 --- a/Userland/Libraries/LibVideo/VP9/Context.h +++ b/Userland/Libraries/LibVideo/VP9/Context.h @@ -8,6 +8,11 @@ #pragma once #include <AK/Array.h> +#include <AK/Error.h> +#include <LibGfx/Size.h> +#include <LibVideo/Color/CodingIndependentCodePoints.h> + +#include <AK/Format.h> #include "Enums.h" #include "MotionVector.h" @@ -37,6 +42,63 @@ struct Pair { typedef Pair<ReferenceFrameType> ReferenceFramePair; typedef Pair<MotionVector> MotionVectorPair; +template<typename T> +class Vector2D { +public: + ~Vector2D() + { + if (m_storage) + free(m_storage); + m_storage = nullptr; + m_width = 0; + m_height = 0; + } + + ErrorOr<void> try_resize(u32 height, u32 width) + { + if (height != m_height && width != m_width) { + this->~Vector2D(); + size_t size = height * width; + auto* new_storage = static_cast<T*>(malloc(size * sizeof(T))); + if (!new_storage) + return Error::from_errno(ENOMEM); + m_storage = new_storage; + m_height = height; + m_width = width; + } + + return {}; + } + + u32 height() const { return m_height; } + u32 width() const { return m_width; } + + size_t index_at(u32 row, u32 column) const + { + VERIFY(row < height()); + VERIFY(column < width()); + return row * width() + column; + } + + T& operator[](size_t index) { return m_storage[index]; } + T const& operator[](size_t index) const { return m_storage[index]; } + size_t size() const { return m_height * m_width; } + + T& at(u32 row, u32 column) + { + return m_storage[index_at(row, column)]; + } + T const& at(u32 row, u32 column) const + { + return m_storage[index_at(row, column)]; + } + +private: + u32 m_height { 0 }; + u32 m_width { 0 }; + T* m_storage { nullptr }; +}; + struct TokensContext { TXSize m_tx_size; bool m_is_uv_plane; diff --git a/Userland/Libraries/LibVideo/VP9/Decoder.cpp b/Userland/Libraries/LibVideo/VP9/Decoder.cpp index 6e004c02ef..b2dd0176b2 100644 --- a/Userland/Libraries/LibVideo/VP9/Decoder.cpp +++ b/Userland/Libraries/LibVideo/VP9/Decoder.cpp @@ -1817,13 +1817,16 @@ DecoderErrorOr<void> Decoder::update_reference_frames() // for col = 0..MiCols-1, for list = 0..1. // − PrevMvs[ row ][ col ][ list ][ comp ] is set equal to Mvs[ row ][ col ][ list ][ comp ] for row = 0..MiRows-1, // for col = 0..MiCols-1, for list = 0..1, for comp = 0..1. - size_t size = m_parser->m_frame_block_contexts.size(); + size_t size = m_parser->m_frame_block_contexts.width() * m_parser->m_frame_block_contexts.height(); m_parser->m_prev_ref_frames.resize_and_keep_capacity(size); m_parser->m_prev_mvs.resize_and_keep_capacity(size); - for (size_t i = 0; i < size; i++) { - auto context = m_parser->m_frame_block_contexts[i]; - m_parser->m_prev_ref_frames[i] = context.ref_frames; - m_parser->m_prev_mvs[i] = context.primary_motion_vector_pair(); + for (u32 row = 0; row < m_parser->m_frame_block_contexts.height(); row++) { + for (u32 column = 0; column < m_parser->m_frame_block_contexts.width(); column++) { + auto index = m_parser->m_frame_block_contexts.index_at(row, column); + auto context = m_parser->m_frame_block_contexts[index]; + m_parser->m_prev_ref_frames[index] = context.ref_frames; + m_parser->m_prev_mvs[index] = context.primary_motion_vector_pair(); + } } } diff --git a/Userland/Libraries/LibVideo/VP9/Parser.cpp b/Userland/Libraries/LibVideo/VP9/Parser.cpp index 4d013e9507..1dbfb417df 100644 --- a/Userland/Libraries/LibVideo/VP9/Parser.cpp +++ b/Userland/Libraries/LibVideo/VP9/Parser.cpp @@ -805,8 +805,7 @@ void Parser::setup_compound_reference_mode() DecoderErrorOr<void> Parser::allocate_tile_data() { - auto dimensions = m_mi_rows * m_mi_cols; - DECODER_TRY_ALLOC(m_frame_block_contexts.try_resize_and_keep_capacity(dimensions)); + DECODER_TRY_ALLOC(m_frame_block_contexts.try_resize(m_mi_rows, m_mi_cols)); return {}; } @@ -952,10 +951,8 @@ DecoderErrorOr<void> Parser::decode_block(u32 row, u32 col, BlockSubsize subsize auto maximum_block_x = min<u32>(num_8x8_blocks_wide_lookup[subsize], m_mi_cols - col); for (size_t y = 0; y < maximum_block_y; y++) { - for (size_t x = 0; x < maximum_block_x; x++) { - auto pos = get_image_index(row + y, col + x); - m_frame_block_contexts[pos] = FrameBlockContext { true, m_skip, m_tx_size, m_y_mode, m_block_sub_modes, m_interp_filter, m_ref_frame, m_block_mvs, m_segment_id }; - } + for (size_t x = 0; x < maximum_block_x; x++) + m_frame_block_contexts.at(row + y, col + x) = FrameBlockContext { true, m_skip, m_tx_size, m_y_mode, m_block_sub_modes, m_interp_filter, m_ref_frame, m_block_mvs, m_segment_id }; } return {}; } @@ -1019,14 +1016,14 @@ FrameBlockContext Parser::get_above_context() const { if (!m_available_u) return FrameBlockContext { .is_available = false }; - return m_frame_block_contexts[get_image_index(m_mi_row - 1, m_mi_col)]; + return m_frame_block_contexts.at(m_mi_row - 1, m_mi_col); } FrameBlockContext Parser::get_left_context() const { if (!m_available_l) return FrameBlockContext { .is_available = false }; - return m_frame_block_contexts[get_image_index(m_mi_row, m_mi_col - 1)]; + return m_frame_block_contexts.at(m_mi_row, m_mi_col - 1); } DecoderErrorOr<void> Parser::read_skip() @@ -1506,7 +1503,7 @@ void Parser::get_block_mv(u32 candidate_row, u32 candidate_column, u8 ref_list, m_candidate_mv[ref_list] = m_prev_mvs[index][ref_list]; m_candidate_frame[ref_list] = m_prev_ref_frames[index][ref_list]; } else { - auto current_context = m_frame_block_contexts[index]; + auto const& current_context = m_frame_block_contexts.at(candidate_row, candidate_column); m_candidate_mv[ref_list] = current_context.primary_motion_vector_pair()[ref_list]; m_candidate_frame[ref_list] = current_context.ref_frames[ref_list]; } @@ -1585,9 +1582,8 @@ void Parser::find_mv_refs(ReferenceFrameType reference_frame, i32 block) auto candidate = base_coordinates + offset_vector; if (is_inside(candidate.row(), candidate.column())) { - auto candidate_index = get_image_index(candidate.row(), candidate.column()); different_ref_found = true; - auto context = m_frame_block_contexts[candidate_index]; + auto context = m_frame_block_contexts.at(candidate.row(), candidate.column()); context_counter += mode_2_counter[to_underlying(context.y_mode)]; for (auto ref_list = 0u; ref_list < 2; ref_list++) { diff --git a/Userland/Libraries/LibVideo/VP9/Parser.h b/Userland/Libraries/LibVideo/VP9/Parser.h index 57a9cf4e77..a6e040de1d 100644 --- a/Userland/Libraries/LibVideo/VP9/Parser.h +++ b/Userland/Libraries/LibVideo/VP9/Parser.h @@ -267,7 +267,7 @@ private: // arrays instead. // I think should also apply to other fields that are only accessed relative to the current block. Worth looking // into how much of this context needs to be stored for the whole frame vs a row or column from the current tile. - Vector<FrameBlockContext> m_frame_block_contexts; + Vector2D<FrameBlockContext> m_frame_block_contexts; MotionVectorPair m_candidate_mv; ReferenceFramePair m_candidate_frame; |