summaryrefslogtreecommitdiff
path: root/Userland/Libraries
diff options
context:
space:
mode:
authorZaggy1024 <zaggy1024@gmail.com>2022-11-25 20:45:56 -0600
committerAndreas Kling <kling@serenityos.org>2022-11-30 08:28:30 +0100
commitfacb779b995c36cf47a310851f54d55b08e63834 (patch)
treec8265f3af57316f5f480aa2cedd03d05de182c24 /Userland/Libraries
parent062da60443b64803921dcd23ac8c169737e8ed93 (diff)
downloadserenity-facb779b995c36cf47a310851f54d55b08e63834.zip
LibVideo/VP9: Replace (DCT|ADST)_(DCT_ADST) with struct TransformSet
Those previous constants were only set and used to select the first and second transforms done by the Decoder class. By turning it into a struct, we can make the code a bit more legible while keeping those transform modes the same size as before or smaller.
Diffstat (limited to 'Userland/Libraries')
-rw-r--r--Userland/Libraries/LibVideo/VP9/Decoder.cpp22
-rw-r--r--Userland/Libraries/LibVideo/VP9/Decoder.h4
-rw-r--r--Userland/Libraries/LibVideo/VP9/Enums.h15
-rw-r--r--Userland/Libraries/LibVideo/VP9/LookupTables.h30
-rw-r--r--Userland/Libraries/LibVideo/VP9/Parser.cpp35
-rw-r--r--Userland/Libraries/LibVideo/VP9/Parser.h2
-rw-r--r--Userland/Libraries/LibVideo/VP9/Symbols.h4
-rw-r--r--Userland/Libraries/LibVideo/VP9/TreeParser.cpp6
-rw-r--r--Userland/Libraries/LibVideo/VP9/TreeParser.h2
9 files changed, 65 insertions, 55 deletions
diff --git a/Userland/Libraries/LibVideo/VP9/Decoder.cpp b/Userland/Libraries/LibVideo/VP9/Decoder.cpp
index b4eafbf941..252618254b 100644
--- a/Userland/Libraries/LibVideo/VP9/Decoder.cpp
+++ b/Userland/Libraries/LibVideo/VP9/Decoder.cpp
@@ -1066,7 +1066,7 @@ u16 Decoder::get_ac_quantizer(BlockContext const& block_context, u8 plane)
return ac_q(block_context.frame_context.color_config.bit_depth, static_cast<u8>(get_base_quantizer_index(block_context) + offset));
}
-DecoderErrorOr<void> Decoder::reconstruct(u8 plane, BlockContext const& block_context, u32 transform_block_x, u32 transform_block_y, TXSize transform_block_size, u8 transform_type)
+DecoderErrorOr<void> Decoder::reconstruct(u8 plane, BlockContext const& block_context, u32 transform_block_x, u32 transform_block_y, TXSize transform_block_size, TransformSet transform_set)
{
// 8.6.2 Reconstruct process
@@ -1100,7 +1100,7 @@ DecoderErrorOr<void> Decoder::reconstruct(u8 plane, BlockContext const& block_co
// 3. Invoke the 2D inverse transform block process defined in section 8.7.2 with the variable n as input.
// The inverse transform outputs are stored back to the Dequant buffer.
- TRY(inverse_transform_2d(block_context, dequantized, log2_of_block_size, transform_type));
+ TRY(inverse_transform_2d(block_context, dequantized, log2_of_block_size, transform_set));
// 4. CurrFrame[ plane ][ y + i ][ x + j ] is set equal to Clip1( CurrFrame[ plane ][ y + i ][ x + j ] + Dequant[ i ][ j ] )
// for i = 0..(n0-1) and j = 0..(n0-1).
@@ -1653,7 +1653,7 @@ inline DecoderErrorOr<void> Decoder::inverse_asymmetric_discrete_sine_transform(
return inverse_asymmetric_discrete_sine_transform_16(bit_depth, data);
}
-DecoderErrorOr<void> Decoder::inverse_transform_2d(BlockContext const& block_context, Span<Intermediate> dequantized, u8 log2_of_block_size, u8 transform_type)
+DecoderErrorOr<void> Decoder::inverse_transform_2d(BlockContext const& block_context, Span<Intermediate> dequantized, u8 log2_of_block_size, TransformSet transform_set)
{
// This process performs a 2D inverse transform for an array of size 2^n by 2^n stored in the 2D array Dequant.
// The input to this process is a variable n (log2_of_block_size) that specifies the base 2 logarithm of the width of the transform.
@@ -1676,9 +1676,8 @@ DecoderErrorOr<void> Decoder::inverse_transform_2d(BlockContext const& block_con
TRY(inverse_walsh_hadamard_transform(row, log2_of_block_size, 2));
continue;
}
- switch (transform_type) {
- case DCT_DCT:
- case ADST_DCT:
+ switch (transform_set.second_transform) {
+ case TransformType::DCT:
// Otherwise, if TxType is equal to DCT_DCT or TxType is equal to ADST_DCT, apply an inverse DCT as
// follows:
// 1. Invoke the inverse DCT permutation process as specified in section 8.7.1.2 with the input variable n.
@@ -1686,8 +1685,7 @@ DecoderErrorOr<void> Decoder::inverse_transform_2d(BlockContext const& block_con
// 2. Invoke the inverse DCT process as specified in section 8.7.1.3 with the input variable n.
TRY(inverse_discrete_cosine_transform(block_context.frame_context.color_config.bit_depth, row, log2_of_block_size));
break;
- case DCT_ADST:
- case ADST_ADST:
+ case TransformType::ADST:
// 4. Otherwise (TxType is equal to DCT_ADST or TxType is equal to ADST_ADST), invoke the inverse ADST
// process as specified in section 8.7.1.9 with input variable n.
TRY(inverse_asymmetric_discrete_sine_transform(block_context.frame_context.color_config.bit_depth, row, log2_of_block_size));
@@ -1716,9 +1714,8 @@ DecoderErrorOr<void> Decoder::inverse_transform_2d(BlockContext const& block_con
TRY(inverse_walsh_hadamard_transform(column, log2_of_block_size, 2));
continue;
}
- switch (transform_type) {
- case DCT_DCT:
- case DCT_ADST:
+ switch (transform_set.first_transform) {
+ case TransformType::DCT:
// Otherwise, if TxType is equal to DCT_DCT or TxType is equal to DCT_ADST, apply an inverse DCT as
// follows:
// 1. Invoke the inverse DCT permutation process as specified in section 8.7.1.2 with the input variable n.
@@ -1726,8 +1723,7 @@ DecoderErrorOr<void> Decoder::inverse_transform_2d(BlockContext const& block_con
// 2. Invoke the inverse DCT process as specified in section 8.7.1.3 with the input variable n.
TRY(inverse_discrete_cosine_transform(block_context.frame_context.color_config.bit_depth, column, log2_of_block_size));
break;
- case ADST_DCT:
- case ADST_ADST:
+ case TransformType::ADST:
// 4. Otherwise (TxType is equal to ADST_DCT or TxType is equal to ADST_ADST), invoke the inverse ADST
// process as specified in section 8.7.1.9 with input variable n.
TRY(inverse_asymmetric_discrete_sine_transform(block_context.frame_context.color_config.bit_depth, column, log2_of_block_size));
diff --git a/Userland/Libraries/LibVideo/VP9/Decoder.h b/Userland/Libraries/LibVideo/VP9/Decoder.h
index fddcb569cb..f980ae194a 100644
--- a/Userland/Libraries/LibVideo/VP9/Decoder.h
+++ b/Userland/Libraries/LibVideo/VP9/Decoder.h
@@ -81,10 +81,10 @@ private:
static u16 get_ac_quantizer(BlockContext const&, u8 plane);
// (8.6.2) Reconstruct process
- DecoderErrorOr<void> reconstruct(u8 plane, BlockContext const&, u32 transform_block_x, u32 transform_block_y, TXSize transform_block_size, u8 transform_type);
+ DecoderErrorOr<void> reconstruct(u8 plane, BlockContext const&, u32 transform_block_x, u32 transform_block_y, TXSize transform_block_size, TransformSet);
// (8.7) Inverse transform process
- DecoderErrorOr<void> inverse_transform_2d(BlockContext const&, Span<Intermediate> dequantized, u8 log2_of_block_size, u8 transform_type);
+ DecoderErrorOr<void> inverse_transform_2d(BlockContext const&, Span<Intermediate> dequantized, u8 log2_of_block_size, TransformSet);
// (8.7.1) 1D Transforms
// (8.7.1.1) Butterfly functions
diff --git a/Userland/Libraries/LibVideo/VP9/Enums.h b/Userland/Libraries/LibVideo/VP9/Enums.h
index a46c4c41bc..99e01601ae 100644
--- a/Userland/Libraries/LibVideo/VP9/Enums.h
+++ b/Userland/Libraries/LibVideo/VP9/Enums.h
@@ -60,6 +60,21 @@ enum TXSize : u8 {
TX_32x32 = 3,
};
+enum class TransformType : u8 {
+ DCT = 0,
+ ADST = 1,
+};
+
+struct TransformSet {
+ TransformType first_transform : 1;
+ TransformType second_transform : 1;
+
+ bool operator==(TransformSet const& other) const
+ {
+ return first_transform == other.first_transform && second_transform == other.second_transform;
+ }
+};
+
enum ReferenceMode : u8 {
SingleReference = 0,
CompoundReference = 1,
diff --git a/Userland/Libraries/LibVideo/VP9/LookupTables.h b/Userland/Libraries/LibVideo/VP9/LookupTables.h
index f6f3eaea6e..370544cf10 100644
--- a/Userland/Libraries/LibVideo/VP9/LookupTables.h
+++ b/Userland/Libraries/LibVideo/VP9/LookupTables.h
@@ -244,21 +244,21 @@ static constexpr u16 row_scan_16x16[256] = { 0, 1, 2, 16, 3, 17, 4, 18, 32, 5, 3
static constexpr u16 default_scan_32x32[1024] = { 0, 32, 1, 64, 33, 2, 96, 65, 34, 128, 3, 97, 66, 160, 129, 35, 98, 4, 67, 130, 161, 192, 36, 99, 224, 5, 162, 193, 68, 131, 37, 100, 225, 194, 256, 163, 69, 132, 6, 226, 257, 288, 195, 101, 164, 38, 258, 7, 227, 289, 133, 320, 70, 196, 165, 290, 259, 228, 39, 321, 102, 352, 8, 197, 71, 134, 322, 291, 260, 353, 384, 229, 166, 103, 40, 354, 323, 292, 135, 385, 198, 261, 72, 9, 416, 167, 386, 355, 230, 324, 104, 293, 41, 417, 199, 136, 262, 387, 448, 325, 356, 10, 73, 418, 231, 168, 449, 294, 388, 105, 419, 263, 42, 200, 357, 450, 137, 480, 74, 326, 232, 11, 389, 169, 295, 420, 106, 451, 481, 358, 264, 327, 201, 43, 138, 512, 482, 390, 296, 233, 170, 421, 75, 452, 359, 12, 513, 265, 483, 328, 107, 202, 514, 544, 422, 391, 453, 139, 44, 234, 484, 297, 360, 171, 76, 515, 545, 266, 329, 454, 13, 423, 203, 108, 546, 485, 576, 298, 235, 140, 361, 330, 172, 547, 45, 455, 267, 577, 486, 77, 204, 362, 608, 14, 299, 578, 109, 236, 487, 609, 331, 141, 579, 46, 15, 173, 610, 363, 78, 205, 16, 110, 237, 611, 142, 47, 174, 79, 206, 17, 111, 238, 48, 143, 80, 175, 112, 207, 49, 18, 239, 81, 113, 19, 50, 82, 114, 51, 83, 115, 640, 516, 392, 268, 144, 20, 672, 641, 548, 517, 424, 393, 300, 269, 176, 145, 52, 21, 704, 673, 642, 580, 549, 518, 456, 425, 394, 332, 301, 270, 208, 177, 146, 84, 53, 22, 736, 705, 674, 643, 612, 581, 550, 519, 488, 457, 426, 395, 364, 333, 302, 271, 240, 209, 178, 147, 116, 85, 54, 23, 737, 706, 675, 613, 582, 551, 489, 458, 427, 365, 334, 303, 241, 210, 179, 117, 86, 55, 738, 707, 614, 583, 490, 459, 366, 335, 242, 211, 118, 87, 739, 615, 491, 367, 243, 119, 768, 644, 520, 396, 272, 148, 24, 800, 769, 676, 645, 552, 521, 428, 397, 304, 273, 180, 149, 56, 25, 832, 801, 770, 708, 677, 646, 584, 553, 522, 460, 429, 398, 336, 305, 274, 212, 181, 150, 88, 57, 26, 864, 833, 802, 771, 740, 709, 678, 647, 616, 585, 554, 523, 492, 461, 430, 399, 368, 337, 306, 275, 244, 213, 182, 151, 120, 89, 58, 27, 865, 834, 803, 741, 710, 679, 617, 586, 555, 493, 462, 431, 369, 338, 307, 245, 214, 183, 121, 90, 59, 866, 835, 742, 711, 618, 587, 494, 463, 370, 339, 246, 215, 122, 91, 867, 743, 619, 495, 371, 247, 123, 896, 772, 648, 524, 400, 276, 152, 28, 928, 897, 804, 773, 680, 649, 556, 525, 432, 401, 308, 277, 184, 153, 60, 29, 960, 929, 898, 836, 805, 774, 712, 681, 650, 588, 557, 526, 464, 433, 402, 340, 309, 278, 216, 185, 154, 92, 61, 30, 992, 961, 930, 899, 868, 837, 806, 775, 744, 713, 682, 651, 620, 589, 558, 527, 496, 465, 434, 403, 372, 341, 310, 279, 248, 217, 186, 155, 124, 93, 62, 31, 993, 962, 931, 869, 838, 807, 745, 714, 683, 621, 590, 559, 497, 466, 435, 373, 342, 311, 249, 218, 187, 125, 94, 63, 994, 963, 870, 839, 746, 715, 622, 591, 498, 467, 374, 343, 250, 219, 126, 95, 995, 871, 747, 623, 499, 375, 251, 127, 900, 776, 652, 528, 404, 280, 156, 932, 901, 808, 777, 684, 653, 560, 529, 436, 405, 312, 281, 188, 157, 964, 933, 902, 840, 809, 778, 716, 685, 654, 592, 561, 530, 468, 437, 406, 344, 313, 282, 220, 189, 158, 996, 965, 934, 903, 872, 841, 810, 779, 748, 717, 686, 655, 624, 593, 562, 531, 500, 469, 438, 407, 376, 345, 314, 283, 252, 221, 190, 159, 997, 966, 935, 873, 842, 811, 749, 718, 687, 625, 594, 563, 501, 470, 439, 377, 346, 315, 253, 222, 191, 998, 967, 874, 843, 750, 719, 626, 595, 502, 471, 378, 347, 254, 223, 999, 875, 751, 627, 503, 379, 255, 904, 780, 656, 532, 408, 284, 936, 905, 812, 781, 688, 657, 564, 533, 440, 409, 316, 285, 968, 937, 906, 844, 813, 782, 720, 689, 658, 596, 565, 534, 472, 441, 410, 348, 317, 286, 1000, 969, 938, 907, 876, 845, 814, 783, 752, 721, 690, 659, 628, 597, 566, 535, 504, 473, 442, 411, 380, 349, 318, 287, 1001, 970, 939, 877, 846, 815, 753, 722, 691, 629, 598, 567, 505, 474, 443, 381, 350, 319, 1002, 971, 878, 847, 754, 723, 630, 599, 506, 475, 382, 351, 1003, 879, 755, 631, 507, 383, 908, 784, 660, 536, 412, 940, 909, 816, 785, 692, 661, 568, 537, 444, 413, 972, 941, 910, 848, 817, 786, 724, 693, 662, 600, 569, 538, 476, 445, 414, 1004, 973, 942, 911, 880, 849, 818, 787, 756, 725, 694, 663, 632, 601, 570, 539, 508, 477, 446, 415, 1005, 974, 943, 881, 850, 819, 757, 726, 695, 633, 602, 571, 509, 478, 447, 1006, 975, 882, 851, 758, 727, 634, 603, 510, 479, 1007, 883, 759, 635, 511, 912, 788, 664, 540, 944, 913, 820, 789, 696, 665, 572, 541, 976, 945, 914, 852, 821, 790, 728, 697, 666, 604, 573, 542, 1008, 977, 946, 915, 884, 853, 822, 791, 760, 729, 698, 667, 636, 605, 574, 543, 1009, 978, 947, 885, 854, 823, 761, 730, 699, 637, 606, 575, 1010, 979, 886, 855, 762, 731, 638, 607, 1011, 887, 763, 639, 916, 792, 668, 948, 917, 824, 793, 700, 669, 980, 949, 918, 856, 825, 794, 732, 701, 670, 1012, 981, 950, 919, 888, 857, 826, 795, 764, 733, 702, 671, 1013, 982, 951, 889, 858, 827, 765, 734, 703, 1014, 983, 890, 859, 766, 735, 1015, 891, 767, 920, 796, 952, 921, 828, 797, 984, 953, 922, 860, 829, 798, 1016, 985, 954, 923, 892, 861, 830, 799, 1017, 986, 955, 893, 862, 831, 1018, 987, 894, 863, 1019, 895, 924, 956, 925, 988, 957, 926, 1020, 989, 958, 927, 1021, 990, 959, 1022, 991, 1023 };
-static constexpr u8 mode_to_txfm_map[MB_MODE_COUNT] = {
- DCT_DCT, // DC
- ADST_DCT, // V
- DCT_ADST, // H
- DCT_DCT, // D45
- ADST_ADST, // D135
- ADST_DCT, // D117
- DCT_ADST, // D153
- DCT_ADST, // D207
- ADST_DCT, // D63
- ADST_ADST, // TM
- DCT_DCT, // NEARESTMV
- DCT_DCT, // NEARMV
- DCT_DCT, // ZEROMV
- DCT_DCT // NEWMV
+static constexpr TransformSet mode_to_txfm_map[MB_MODE_COUNT] = {
+ { TransformType::DCT, TransformType::DCT }, // DC
+ { TransformType::ADST, TransformType::DCT }, // V
+ { TransformType::DCT, TransformType::ADST }, // H
+ { TransformType::DCT, TransformType::DCT }, // D45
+ { TransformType::ADST, TransformType::ADST }, // D135
+ { TransformType::ADST, TransformType::DCT }, // D117
+ { TransformType::DCT, TransformType::ADST }, // D153
+ { TransformType::DCT, TransformType::ADST }, // D207
+ { TransformType::ADST, TransformType::DCT }, // D63
+ { TransformType::ADST, TransformType::ADST }, // TM
+ { TransformType::DCT, TransformType::DCT }, // NEARESTMV
+ { TransformType::DCT, TransformType::DCT }, // NEARMV
+ { TransformType::DCT, TransformType::DCT }, // ZEROMV
+ { TransformType::DCT, TransformType::DCT } // NEWMV
};
static constexpr u8 coefband_4x4[16] = { 0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5 };
diff --git a/Userland/Libraries/LibVideo/VP9/Parser.cpp b/Userland/Libraries/LibVideo/VP9/Parser.cpp
index ebdc7e5e71..ca31c6731a 100644
--- a/Userland/Libraries/LibVideo/VP9/Parser.cpp
+++ b/Userland/Libraries/LibVideo/VP9/Parser.cpp
@@ -1344,13 +1344,13 @@ static TXSize get_uv_tx_size(bool subsampling_x, bool subsampling_y, TXSize tx_s
return min(tx_size, max_txsize_lookup[get_plane_block_size(subsampling_x, subsampling_y, size, 1)]);
}
-static u8 select_transform_type(BlockContext const& block_context, u8 plane, TXSize tx_size, u32 block_index)
+static TransformSet select_transform_type(BlockContext const& block_context, u8 plane, TXSize tx_size, u32 block_index)
{
if (plane > 0 || tx_size == TX_32x32)
- return DCT_DCT;
+ return TransformSet { TransformType::DCT, TransformType::DCT };
if (tx_size == TX_4x4) {
if (block_context.frame_context.is_lossless() || block_context.is_inter_predicted())
- return DCT_DCT;
+ return TransformSet { TransformType::DCT, TransformType::DCT };
return mode_to_txfm_map[to_underlying(block_context.size < Block_8x8 ? block_context.sub_block_prediction_modes[block_index] : block_context.y_prediction_mode())];
}
@@ -1395,10 +1395,10 @@ DecoderErrorOr<bool> Parser::residual(BlockContext& block_context, bool has_bloc
if (!block_context.is_inter_predicted())
TRY(m_decoder.predict_intra(plane, block_context, start_x, start_y, has_block_left || x > 0, has_block_above || y > 0, (x + step) < num_4x4_w, tx_size, block_index));
if (!block_context.should_skip_residuals) {
- auto transform_type = select_transform_type(block_context, plane, tx_size, block_index);
- non_zero = TRY(tokens(block_context, plane, start_x, start_y, tx_size, transform_type));
+ auto transform_set = select_transform_type(block_context, plane, tx_size, block_index);
+ non_zero = TRY(tokens(block_context, plane, start_x, start_y, tx_size, transform_set));
had_residual_tokens = had_residual_tokens || non_zero;
- TRY(m_decoder.reconstruct(plane, block_context, start_x, start_y, tx_size, transform_type));
+ TRY(m_decoder.reconstruct(plane, block_context, start_x, start_y, tx_size, transform_set));
}
}
@@ -1421,42 +1421,45 @@ DecoderErrorOr<bool> Parser::residual(BlockContext& block_context, bool has_bloc
return had_residual_tokens;
}
-static u16 const* get_scan(TXSize tx_size, u8 transform_type)
+static u16 const* get_scan(TXSize tx_size, TransformSet transform_set)
{
+ constexpr TransformSet adst_dct { TransformType::ADST, TransformType::DCT };
+ constexpr TransformSet dct_adst { TransformType::DCT, TransformType::ADST };
+
if (tx_size == TX_4x4) {
- if (transform_type == ADST_DCT)
+ if (transform_set == adst_dct)
return row_scan_4x4;
- if (transform_type == DCT_ADST)
+ if (transform_set == dct_adst)
return col_scan_4x4;
return default_scan_4x4;
}
if (tx_size == TX_8x8) {
- if (transform_type == ADST_DCT)
+ if (transform_set == adst_dct)
return row_scan_8x8;
- if (transform_type == DCT_ADST)
+ if (transform_set == dct_adst)
return col_scan_8x8;
return default_scan_8x8;
}
if (tx_size == TX_16x16) {
- if (transform_type == ADST_DCT)
+ if (transform_set == adst_dct)
return row_scan_16x16;
- if (transform_type == DCT_ADST)
+ if (transform_set == dct_adst)
return col_scan_16x16;
return default_scan_16x16;
}
return default_scan_32x32;
}
-DecoderErrorOr<bool> Parser::tokens(BlockContext& block_context, size_t plane, u32 start_x, u32 start_y, TXSize tx_size, u8 transform_type)
+DecoderErrorOr<bool> Parser::tokens(BlockContext& block_context, size_t plane, u32 start_x, u32 start_y, TXSize tx_size, TransformSet transform_set)
{
u16 segment_eob = 16 << (tx_size << 1);
- auto const* scan = get_scan(tx_size, transform_type);
+ auto const* scan = get_scan(tx_size, transform_set);
auto check_eob = true;
u16 coef_index = 0;
for (; coef_index < segment_eob; coef_index++) {
auto pos = scan[coef_index];
auto band = (tx_size == TX_4x4) ? coefband_4x4[coef_index] : coefband_8x8plus[coef_index];
- auto tokens_context = TreeParser::get_tokens_context(block_context.frame_context.color_config.subsampling_x, block_context.frame_context.color_config.subsampling_y, block_context.frame_context.rows(), block_context.frame_context.columns(), m_above_nonzero_context, m_left_nonzero_context, m_token_cache, tx_size, transform_type, plane, start_x, start_y, pos, block_context.is_inter_predicted(), band, coef_index);
+ auto tokens_context = TreeParser::get_tokens_context(block_context.frame_context.color_config.subsampling_x, block_context.frame_context.color_config.subsampling_y, block_context.frame_context.rows(), block_context.frame_context.columns(), m_above_nonzero_context, m_left_nonzero_context, m_token_cache, tx_size, transform_set, plane, start_x, start_y, pos, block_context.is_inter_predicted(), band, coef_index);
if (check_eob) {
auto more_coefs = TRY_READ(TreeParser::parse_more_coefficients(*m_bit_stream, *m_probability_tables, *m_syntax_element_counter, tokens_context));
if (!more_coefs)
diff --git a/Userland/Libraries/LibVideo/VP9/Parser.h b/Userland/Libraries/LibVideo/VP9/Parser.h
index 5be850589e..3dc86578d1 100644
--- a/Userland/Libraries/LibVideo/VP9/Parser.h
+++ b/Userland/Libraries/LibVideo/VP9/Parser.h
@@ -117,7 +117,7 @@ private:
DecoderErrorOr<MotionVector> read_motion_vector(BlockContext const&, BlockMotionVectorCandidates const&, ReferenceIndex);
DecoderErrorOr<i32> read_single_motion_vector_component(u8 component);
DecoderErrorOr<bool> residual(BlockContext&, bool has_block_above, bool has_block_left);
- DecoderErrorOr<bool> tokens(BlockContext&, size_t plane, u32 x, u32 y, TXSize tx_size, u8 transform_type);
+ DecoderErrorOr<bool> tokens(BlockContext&, size_t plane, u32 x, u32 y, TXSize tx_size, TransformSet);
DecoderErrorOr<i32> read_coef(u8 bit_depth, Token token);
/* (6.5) Motion Vector Prediction */
diff --git a/Userland/Libraries/LibVideo/VP9/Symbols.h b/Userland/Libraries/LibVideo/VP9/Symbols.h
index 6a8cfcbff8..2f2d6af4ad 100644
--- a/Userland/Libraries/LibVideo/VP9/Symbols.h
+++ b/Userland/Libraries/LibVideo/VP9/Symbols.h
@@ -43,10 +43,6 @@ namespace Video::VP9 {
#define PARTITION_TYPES 4
#define TX_SIZES 4
#define TX_MODES 5
-#define DCT_DCT 0
-#define ADST_DCT 1
-#define DCT_ADST 2
-#define ADST_ADST 3
#define MB_MODE_COUNT 14
#define INTRA_MODES 10
#define INTER_MODES 4
diff --git a/Userland/Libraries/LibVideo/VP9/TreeParser.cpp b/Userland/Libraries/LibVideo/VP9/TreeParser.cpp
index 1f45b981e7..495bd3a831 100644
--- a/Userland/Libraries/LibVideo/VP9/TreeParser.cpp
+++ b/Userland/Libraries/LibVideo/VP9/TreeParser.cpp
@@ -624,7 +624,7 @@ ErrorOr<bool> TreeParser::parse_motion_vector_hp(BitStream& bit_stream, Probabil
return value;
}
-TokensContext TreeParser::get_tokens_context(bool subsampling_x, bool subsampling_y, u32 rows, u32 columns, Array<Vector<bool>, 3> const& above_nonzero_context, Array<Vector<bool>, 3> const& left_nonzero_context, u8 token_cache[1024], TXSize tx_size, u8 tx_type, u8 plane, u32 start_x, u32 start_y, u16 position, bool is_inter, u8 band, u16 coef_index)
+TokensContext TreeParser::get_tokens_context(bool subsampling_x, bool subsampling_y, u32 rows, u32 columns, Array<Vector<bool>, 3> const& above_nonzero_context, Array<Vector<bool>, 3> const& left_nonzero_context, u8 token_cache[1024], TXSize tx_size, TransformSet transform_set, u8 plane, u32 start_x, u32 start_y, u16 position, bool is_inter, u8 band, u16 coef_index)
{
u8 context;
if (coef_index == 0) {
@@ -652,10 +652,10 @@ TokensContext TreeParser::get_tokens_context(bool subsampling_x, bool subsamplin
auto a = i > 0 ? (i - 1) * n + j : 0;
auto a2 = i * n + j - 1;
if (i > 0 && j > 0) {
- if (tx_type == DCT_ADST) {
+ if (transform_set == TransformSet { TransformType::DCT, TransformType::ADST }) {
neighbor_0 = a;
neighbor_1 = a;
- } else if (tx_type == ADST_DCT) {
+ } else if (transform_set == TransformSet { TransformType::ADST, TransformType::DCT }) {
neighbor_0 = a2;
neighbor_1 = a2;
} else {
diff --git a/Userland/Libraries/LibVideo/VP9/TreeParser.h b/Userland/Libraries/LibVideo/VP9/TreeParser.h
index 05770122ea..4002402a69 100644
--- a/Userland/Libraries/LibVideo/VP9/TreeParser.h
+++ b/Userland/Libraries/LibVideo/VP9/TreeParser.h
@@ -76,7 +76,7 @@ public:
static ErrorOr<u8> parse_motion_vector_fr(BitStream&, ProbabilityTables const&, SyntaxElementCounter&, u8 component);
static ErrorOr<bool> parse_motion_vector_hp(BitStream&, ProbabilityTables const&, SyntaxElementCounter&, u8 component, bool use_hp);
- static TokensContext get_tokens_context(bool subsampling_x, bool subsampling_y, u32 rows, u32 columns, Array<Vector<bool>, 3> const& above_nonzero_context, Array<Vector<bool>, 3> const& left_nonzero_context, u8 token_cache[1024], TXSize tx_size, u8 tx_type, u8 plane, u32 start_x, u32 start_y, u16 position, bool is_inter, u8 band, u16 coef_index);
+ static TokensContext get_tokens_context(bool subsampling_x, bool subsampling_y, u32 rows, u32 columns, Array<Vector<bool>, 3> const& above_nonzero_context, Array<Vector<bool>, 3> const& left_nonzero_context, u8 token_cache[1024], TXSize, TransformSet, u8 plane, u32 start_x, u32 start_y, u16 position, bool is_inter, u8 band, u16 coef_index);
static ErrorOr<bool> parse_more_coefficients(BitStream&, ProbabilityTables const&, SyntaxElementCounter&, TokensContext const& context);
static ErrorOr<Token> parse_token(BitStream&, ProbabilityTables const&, SyntaxElementCounter&, TokensContext const& context);
};