diff options
author | Zaggy1024 <zaggy1024@gmail.com> | 2022-11-26 11:15:58 -0600 |
---|---|---|
committer | Andreas Kling <kling@serenityos.org> | 2022-11-30 08:28:30 +0100 |
commit | a4f14f220d4ebba8a72b239d83eb34c841c1e18f (patch) | |
tree | 4f3e60195e2b0699f194e6ace74f7e4d093c9f05 /Userland | |
parent | db9f1a18f892819a139b8b7f75ad46edffdf4033 (diff) | |
download | serenity-a4f14f220d4ebba8a72b239d83eb34c841c1e18f.zip |
LibVideo/VP9: Fully qualify all reference frame type enum values
Since the enum is used as an index to arrays, it unfortunately can't
be converted to an enum class, but at least we can make sure to use it
with the qualified enum name to make things a bit clearer.
Diffstat (limited to 'Userland')
-rw-r--r-- | Userland/Libraries/LibVideo/VP9/Context.h | 2 | ||||
-rw-r--r-- | Userland/Libraries/LibVideo/VP9/Decoder.cpp | 2 | ||||
-rw-r--r-- | Userland/Libraries/LibVideo/VP9/Enums.h | 4 | ||||
-rw-r--r-- | Userland/Libraries/LibVideo/VP9/Parser.cpp | 34 | ||||
-rw-r--r-- | Userland/Libraries/LibVideo/VP9/TreeParser.cpp | 68 |
5 files changed, 55 insertions, 55 deletions
diff --git a/Userland/Libraries/LibVideo/VP9/Context.h b/Userland/Libraries/LibVideo/VP9/Context.h index 69be034ec1..4a8186d9f8 100644 --- a/Userland/Libraries/LibVideo/VP9/Context.h +++ b/Userland/Libraries/LibVideo/VP9/Context.h @@ -82,7 +82,7 @@ public: // This group of fields is only needed for inter-predicted frames. Array<u8, 3> reference_frame_indices; - Array<bool, LastFrame + 3> reference_frame_sign_biases; + Array<bool, ReferenceFrameType::LastFrame + 3> reference_frame_sign_biases; bool high_precision_motion_vectors_allowed { false }; InterpolationFilter interpolation_filter { InterpolationFilter::Switchable }; diff --git a/Userland/Libraries/LibVideo/VP9/Decoder.cpp b/Userland/Libraries/LibVideo/VP9/Decoder.cpp index 9238fadab8..745b391d35 100644 --- a/Userland/Libraries/LibVideo/VP9/Decoder.cpp +++ b/Userland/Libraries/LibVideo/VP9/Decoder.cpp @@ -790,7 +790,7 @@ DecoderErrorOr<void> Decoder::predict_inter_block(u8 plane, BlockContext const& // A variable refIdx specifying which reference frame is being used is set equal to // ref_frame_idx[ ref_frame[ refList ] - LAST_FRAME ]. - auto reference_frame_index = block_context.frame_context.reference_frame_indices[block_context.reference_frame_types[reference_index] - LastFrame]; + auto reference_frame_index = block_context.frame_context.reference_frame_indices[block_context.reference_frame_types[reference_index] - ReferenceFrameType::LastFrame]; // It is a requirement of bitstream conformance that all the following conditions are satisfied: // − 2 * FrameWidth >= RefFrameWidth[ refIdx ] diff --git a/Userland/Libraries/LibVideo/VP9/Enums.h b/Userland/Libraries/LibVideo/VP9/Enums.h index 17a588499e..5c57513986 100644 --- a/Userland/Libraries/LibVideo/VP9/Enums.h +++ b/Userland/Libraries/LibVideo/VP9/Enums.h @@ -37,9 +37,9 @@ enum InterpolationFilter : u8 { }; enum ReferenceFrameType : u8 { - // 0 is both INTRA_FRAME and NONE because the value's meaning changes depending on which index they're in on the ref_frame array + // None represents both INTRA_FRAME and NONE in the spec. When the primary reference + // frame type is None, that means that the frame/block is not inter-predicted. None = 0, - IntraFrame = 0, LastFrame = 1, GoldenFrame = 2, AltRefFrame = 3, diff --git a/Userland/Libraries/LibVideo/VP9/Parser.cpp b/Userland/Libraries/LibVideo/VP9/Parser.cpp index 4d33687d31..801f5b475b 100644 --- a/Userland/Libraries/LibVideo/VP9/Parser.cpp +++ b/Userland/Libraries/LibVideo/VP9/Parser.cpp @@ -224,7 +224,7 @@ DecoderErrorOr<FrameContext> Parser::uncompressed_header() reference_frames_to_update_flags = TRY_READ(m_bit_stream->read_f8()); for (auto i = 0; i < 3; i++) { frame_context.reference_frame_indices[i] = TRY_READ(m_bit_stream->read_bits(3)); - frame_context.reference_frame_sign_biases[LastFrame + i] = TRY_READ(m_bit_stream->read_bit()); + frame_context.reference_frame_sign_biases[ReferenceFrameType::LastFrame + i] = TRY_READ(m_bit_stream->read_bit()); } frame_size = TRY(parse_frame_size_with_refs(frame_context.reference_frame_indices)); render_size = TRY(parse_render_size(frame_size)); @@ -535,10 +535,10 @@ DecoderErrorOr<void> Parser::parse_tile_counts(FrameContext& frame_context) void Parser::setup_past_independence() { m_previous_block_contexts.reset(); - m_previous_loop_filter_ref_deltas[IntraFrame] = 1; - m_previous_loop_filter_ref_deltas[LastFrame] = 0; - m_previous_loop_filter_ref_deltas[GoldenFrame] = -1; - m_previous_loop_filter_ref_deltas[AltRefFrame] = -1; + m_previous_loop_filter_ref_deltas[ReferenceFrameType::None] = 1; + m_previous_loop_filter_ref_deltas[ReferenceFrameType::LastFrame] = 0; + m_previous_loop_filter_ref_deltas[ReferenceFrameType::GoldenFrame] = -1; + m_previous_loop_filter_ref_deltas[ReferenceFrameType::AltRefFrame] = -1; m_previous_loop_filter_mode_deltas.fill(0); m_previous_should_use_absolute_segment_base_quantizer = false; for (auto& segment_levels : m_previous_segmentation_features) @@ -700,15 +700,15 @@ static void setup_compound_reference_mode(FrameContext& frame_context) { ReferenceFrameType fixed_reference; ReferenceFramePair variable_references; - if (frame_context.reference_frame_sign_biases[LastFrame] == frame_context.reference_frame_sign_biases[GoldenFrame]) { - fixed_reference = AltRefFrame; - variable_references = { LastFrame, GoldenFrame }; - } else if (frame_context.reference_frame_sign_biases[LastFrame] == frame_context.reference_frame_sign_biases[AltRefFrame]) { - fixed_reference = GoldenFrame; - variable_references = { LastFrame, AltRefFrame }; + if (frame_context.reference_frame_sign_biases[ReferenceFrameType::LastFrame] == frame_context.reference_frame_sign_biases[ReferenceFrameType::GoldenFrame]) { + fixed_reference = ReferenceFrameType::AltRefFrame; + variable_references = { ReferenceFrameType::LastFrame, ReferenceFrameType::GoldenFrame }; + } else if (frame_context.reference_frame_sign_biases[ReferenceFrameType::LastFrame] == frame_context.reference_frame_sign_biases[ReferenceFrameType::AltRefFrame]) { + fixed_reference = ReferenceFrameType::GoldenFrame; + variable_references = { ReferenceFrameType::LastFrame, ReferenceFrameType::AltRefFrame }; } else { - fixed_reference = LastFrame; - variable_references = { GoldenFrame, AltRefFrame }; + fixed_reference = ReferenceFrameType::LastFrame; + variable_references = { ReferenceFrameType::GoldenFrame, ReferenceFrameType::AltRefFrame }; } frame_context.fixed_reference_type = fixed_reference; frame_context.variable_reference_types = variable_references; @@ -1128,7 +1128,7 @@ u8 Parser::get_segment_id(BlockContext const& block_context) DecoderErrorOr<bool> Parser::read_is_inter(BlockContext& block_context, FrameBlockContext above_context, FrameBlockContext left_context) { if (seg_feature_active(block_context, SEG_LVL_REF_FRAME)) - return block_context.frame_context.segmentation_features[block_context.segment_id][SEG_LVL_REF_FRAME].value != IntraFrame; + return block_context.frame_context.segmentation_features[block_context.segment_id][SEG_LVL_REF_FRAME].value != ReferenceFrameType::None; return TRY_READ(TreeParser::parse_block_is_inter_predicted(*m_bit_stream, *m_probability_tables, *m_syntax_element_counter, above_context, left_context)); } @@ -1211,7 +1211,7 @@ DecoderErrorOr<void> Parser::inter_block_mode_info(BlockContext& block_context, DecoderErrorOr<void> Parser::read_ref_frames(BlockContext& block_context, FrameBlockContext above_context, FrameBlockContext left_context) { if (seg_feature_active(block_context, SEG_LVL_REF_FRAME)) { - block_context.reference_frame_types = { static_cast<ReferenceFrameType>(block_context.frame_context.segmentation_features[block_context.segment_id][SEG_LVL_REF_FRAME].value), None }; + block_context.reference_frame_types = { static_cast<ReferenceFrameType>(block_context.frame_context.segmentation_features[block_context.segment_id][SEG_LVL_REF_FRAME].value), ReferenceFrameType::None }; return {}; } @@ -1552,14 +1552,14 @@ static void apply_sign_bias_to_motion_vector(FrameContext const& frame_context, void Parser::add_motion_vector_if_reference_frame_type_is_different(BlockContext const& block_context, MotionVector candidate_vector, ReferenceFrameType ref_frame, Vector<MotionVector, 2>& list, bool use_prev) { auto first_candidate = get_motion_vector_from_current_or_previous_frame(block_context, candidate_vector, ReferenceIndex::Primary, use_prev); - if (first_candidate.type > ReferenceFrameType::IntraFrame && first_candidate.type != ref_frame) { + if (first_candidate.type > ReferenceFrameType::None && first_candidate.type != ref_frame) { apply_sign_bias_to_motion_vector(block_context.frame_context, first_candidate, ref_frame); add_motion_vector_to_list_deduped(first_candidate.vector, list); } auto second_candidate = get_motion_vector_from_current_or_previous_frame(block_context, candidate_vector, ReferenceIndex::Secondary, use_prev); auto mvs_are_same = first_candidate.vector == second_candidate.vector; - if (second_candidate.type > ReferenceFrameType::IntraFrame && second_candidate.type != ref_frame && !mvs_are_same) { + if (second_candidate.type > ReferenceFrameType::None && second_candidate.type != ref_frame && !mvs_are_same) { apply_sign_bias_to_motion_vector(block_context.frame_context, second_candidate, ref_frame); add_motion_vector_to_list_deduped(second_candidate.vector, list); } diff --git a/Userland/Libraries/LibVideo/VP9/TreeParser.cpp b/Userland/Libraries/LibVideo/VP9/TreeParser.cpp index 5814e66d15..a236c61a21 100644 --- a/Userland/Libraries/LibVideo/VP9/TreeParser.cpp +++ b/Userland/Libraries/LibVideo/VP9/TreeParser.cpp @@ -411,29 +411,29 @@ ErrorOr<bool> TreeParser::parse_single_ref_part_1(BitStream& bit_stream, Probabi context = 2; } else if (left.is_intra_predicted()) { if (above.is_single_reference()) { - context = 4 * (above.ref_frames.primary == LastFrame); + context = 4 * (above.ref_frames.primary == ReferenceFrameType::LastFrame); } else { - context = 1 + (above.ref_frames.primary == LastFrame || above.ref_frames.secondary == LastFrame); + context = 1 + (above.ref_frames.primary == ReferenceFrameType::LastFrame || above.ref_frames.secondary == ReferenceFrameType::LastFrame); } } else if (above.is_intra_predicted()) { if (left.is_single_reference()) { - context = 4 * (left.ref_frames.primary == LastFrame); + context = 4 * (left.ref_frames.primary == ReferenceFrameType::LastFrame); } else { - context = 1 + (left.ref_frames.primary == LastFrame || left.ref_frames.secondary == LastFrame); + context = 1 + (left.ref_frames.primary == ReferenceFrameType::LastFrame || left.ref_frames.secondary == ReferenceFrameType::LastFrame); } } else { if (left.is_single_reference() && above.is_single_reference()) { - context = 2 * (above.ref_frames.primary == LastFrame) + 2 * (left.ref_frames.primary == LastFrame); + context = 2 * (above.ref_frames.primary == ReferenceFrameType::LastFrame) + 2 * (left.ref_frames.primary == ReferenceFrameType::LastFrame); } else if (!left.is_single_reference() && !above.is_single_reference()) { - auto above_used_last_frame = above.ref_frames.primary == LastFrame || above.ref_frames.secondary == LastFrame; - auto left_used_last_frame = left.ref_frames.primary == LastFrame || left.ref_frames.secondary == LastFrame; + auto above_used_last_frame = above.ref_frames.primary == ReferenceFrameType::LastFrame || above.ref_frames.secondary == ReferenceFrameType::LastFrame; + auto left_used_last_frame = left.ref_frames.primary == ReferenceFrameType::LastFrame || left.ref_frames.secondary == ReferenceFrameType::LastFrame; context = 1 + (above_used_last_frame || left_used_last_frame); } else { auto single_reference_type = above.is_single_reference() ? above.ref_frames.primary : left.ref_frames.primary; auto compound_reference_a_type = above.is_single_reference() ? left.ref_frames.primary : above.ref_frames.primary; auto compound_reference_b_type = above.is_single_reference() ? left.ref_frames.secondary : above.ref_frames.secondary; - context = compound_reference_a_type == LastFrame || compound_reference_b_type == LastFrame; - if (single_reference_type == LastFrame) + context = compound_reference_a_type == ReferenceFrameType::LastFrame || compound_reference_b_type == ReferenceFrameType::LastFrame; + if (single_reference_type == ReferenceFrameType::LastFrame) context += 3; } } @@ -442,9 +442,9 @@ ErrorOr<bool> TreeParser::parse_single_ref_part_1(BitStream& bit_stream, Probabi context = 2; } else { if (above.is_single_reference()) { - context = 4 * (above.ref_frames.primary == LastFrame); + context = 4 * (above.ref_frames.primary == ReferenceFrameType::LastFrame); } else { - context = 1 + (above.ref_frames.primary == LastFrame || above.ref_frames.secondary == LastFrame); + context = 1 + (above.ref_frames.primary == ReferenceFrameType::LastFrame || above.ref_frames.secondary == ReferenceFrameType::LastFrame); } } } else if (left.is_available) { @@ -452,9 +452,9 @@ ErrorOr<bool> TreeParser::parse_single_ref_part_1(BitStream& bit_stream, Probabi context = 2; } else { if (left.is_single_reference()) { - context = 4 * (left.ref_frames.primary == LastFrame); + context = 4 * (left.ref_frames.primary == ReferenceFrameType::LastFrame); } else { - context = 1 + (left.ref_frames.primary == LastFrame || left.ref_frames.secondary == LastFrame); + context = 1 + (left.ref_frames.primary == ReferenceFrameType::LastFrame || left.ref_frames.secondary == ReferenceFrameType::LastFrame); } } } else { @@ -478,40 +478,40 @@ ErrorOr<bool> TreeParser::parse_single_ref_part_2(BitStream& bit_stream, Probabi context = 2; } else if (left.is_intra_predicted()) { if (above.is_single_reference()) { - if (above.ref_frames.primary == LastFrame) { + if (above.ref_frames.primary == ReferenceFrameType::LastFrame) { context = 3; } else { - context = 4 * (above.ref_frames.primary == GoldenFrame); + context = 4 * (above.ref_frames.primary == ReferenceFrameType::GoldenFrame); } } else { - context = 1 + 2 * (above.ref_frames.primary == GoldenFrame || above.ref_frames.secondary == GoldenFrame); + context = 1 + 2 * (above.ref_frames.primary == ReferenceFrameType::GoldenFrame || above.ref_frames.secondary == ReferenceFrameType::GoldenFrame); } } else if (above.is_intra_predicted()) { if (left.is_single_reference()) { - if (left.ref_frames.primary == LastFrame) { + if (left.ref_frames.primary == ReferenceFrameType::LastFrame) { context = 3; } else { - context = 4 * (left.ref_frames.primary == GoldenFrame); + context = 4 * (left.ref_frames.primary == ReferenceFrameType::GoldenFrame); } } else { - context = 1 + 2 * (left.ref_frames.primary == GoldenFrame || left.ref_frames.secondary == GoldenFrame); + context = 1 + 2 * (left.ref_frames.primary == ReferenceFrameType::GoldenFrame || left.ref_frames.secondary == ReferenceFrameType::GoldenFrame); } } else { if (left.is_single_reference() && above.is_single_reference()) { - auto above_last = above.ref_frames.primary == LastFrame; - auto left_last = left.ref_frames.primary == LastFrame; + auto above_last = above.ref_frames.primary == ReferenceFrameType::LastFrame; + auto left_last = left.ref_frames.primary == ReferenceFrameType::LastFrame; if (above_last && left_last) { context = 3; } else if (above_last) { - context = 4 * (left.ref_frames.primary == GoldenFrame); + context = 4 * (left.ref_frames.primary == ReferenceFrameType::GoldenFrame); } else if (left_last) { - context = 4 * (above.ref_frames.primary == GoldenFrame); + context = 4 * (above.ref_frames.primary == ReferenceFrameType::GoldenFrame); } else { - context = 2 * (above.ref_frames.primary == GoldenFrame) + 2 * (left.ref_frames.primary == GoldenFrame); + context = 2 * (above.ref_frames.primary == ReferenceFrameType::GoldenFrame) + 2 * (left.ref_frames.primary == ReferenceFrameType::GoldenFrame); } } else if (!left.is_single_reference() && !above.is_single_reference()) { if (above.ref_frames.primary == left.ref_frames.primary && above.ref_frames.secondary == left.ref_frames.secondary) { - context = 3 * (above.ref_frames.primary == GoldenFrame || above.ref_frames.secondary == GoldenFrame); + context = 3 * (above.ref_frames.primary == ReferenceFrameType::GoldenFrame || above.ref_frames.secondary == ReferenceFrameType::GoldenFrame); } else { context = 2; } @@ -519,29 +519,29 @@ ErrorOr<bool> TreeParser::parse_single_ref_part_2(BitStream& bit_stream, Probabi auto single_reference_type = above.is_single_reference() ? above.ref_frames.primary : left.ref_frames.primary; auto compound_reference_a_type = above.is_single_reference() ? left.ref_frames.primary : above.ref_frames.primary; auto compound_reference_b_type = above.is_single_reference() ? left.ref_frames.secondary : above.ref_frames.secondary; - context = compound_reference_a_type == GoldenFrame || compound_reference_b_type == GoldenFrame; - if (single_reference_type == GoldenFrame) { + context = compound_reference_a_type == ReferenceFrameType::GoldenFrame || compound_reference_b_type == ReferenceFrameType::GoldenFrame; + if (single_reference_type == ReferenceFrameType::GoldenFrame) { context += 3; - } else if (single_reference_type != AltRefFrame) { + } else if (single_reference_type != ReferenceFrameType::AltRefFrame) { context = 1 + (2 * context); } } } } else if (above.is_available) { - if (above.is_intra_predicted() || (above.ref_frames.primary == LastFrame && above.is_single_reference())) { + if (above.is_intra_predicted() || (above.ref_frames.primary == ReferenceFrameType::LastFrame && above.is_single_reference())) { context = 2; } else if (above.is_single_reference()) { - context = 4 * (above.ref_frames.primary == GoldenFrame); + context = 4 * (above.ref_frames.primary == ReferenceFrameType::GoldenFrame); } else { - context = 3 * (above.ref_frames.primary == GoldenFrame || above.ref_frames.secondary == GoldenFrame); + context = 3 * (above.ref_frames.primary == ReferenceFrameType::GoldenFrame || above.ref_frames.secondary == ReferenceFrameType::GoldenFrame); } } else if (left.is_available) { - if (left.is_intra_predicted() || (left.ref_frames.primary == LastFrame && left.is_single_reference())) { + if (left.is_intra_predicted() || (left.ref_frames.primary == ReferenceFrameType::LastFrame && left.is_single_reference())) { context = 2; } else if (left.is_single_reference()) { - context = 4 * (left.ref_frames.primary == GoldenFrame); + context = 4 * (left.ref_frames.primary == ReferenceFrameType::GoldenFrame); } else { - context = 3 * (left.ref_frames.primary == GoldenFrame || left.ref_frames.secondary == GoldenFrame); + context = 3 * (left.ref_frames.primary == ReferenceFrameType::GoldenFrame || left.ref_frames.secondary == ReferenceFrameType::GoldenFrame); } } else { context = 2; |