/* * Copyright (c) 2021-2022, Matthew Olsson * Copyright (c) 2022, Julian Offenhäuser * * SPDX-License-Identifier: BSD-2-Clause */ #include #include #include #include #include #include #include #include namespace PDF { DocumentParser::DocumentParser(Document* document, ReadonlyBytes bytes) : Parser(document, bytes) { } PDFErrorOr DocumentParser::initialize() { TRY(parse_header()); auto const linearization_result = TRY(initialize_linearization_dict()); if (linearization_result == LinearizationResult::NotLinearized) return initialize_non_linearized_xref_table(); bool is_linearized = m_linearization_dictionary.has_value(); if (is_linearized) { // The file may have been linearized at one point, but could have been updated afterwards, // which means it is no longer a linearized PDF file. is_linearized = m_linearization_dictionary.value().length_of_file == m_reader.bytes().size(); if (!is_linearized) { // FIXME: The file shouldn't be treated as linearized, yet the xref tables are still // split. This might take some tweaking to ensure correct behavior, which can be // implemented later. TODO(); } } if (is_linearized) return initialize_linearized_xref_table(); return initialize_non_linearized_xref_table(); } PDFErrorOr DocumentParser::parse_object_with_index(u32 index) { VERIFY(m_xref_table->has_object(index)); if (m_xref_table->is_object_compressed(index)) // The object can be found in a object stream return parse_compressed_object_with_index(index); auto byte_offset = m_xref_table->byte_offset_for_object(index); m_reader.move_to(byte_offset); auto indirect_value = TRY(parse_indirect_value()); VERIFY(indirect_value->index() == index); return indirect_value->value(); } PDFErrorOr DocumentParser::parse_header() { // FIXME: Do something with the version? m_reader.set_reading_forwards(); if (m_reader.remaining() == 0) return error("Empty PDF document"); m_reader.move_to(0); if (m_reader.remaining() < 8 || !m_reader.matches("%PDF-")) return error("Not a PDF document"); m_reader.move_by(5); char major_ver = m_reader.read(); if (major_ver != '1' && major_ver != '2') return error(DeprecatedString::formatted("Unknown major version \"{}\"", major_ver)); if (m_reader.read() != '.') return error("Malformed PDF version"); char minor_ver = m_reader.read(); if (minor_ver < '0' || minor_ver > '7') return error(DeprecatedString::formatted("Unknown minor version \"{}\"", minor_ver)); m_reader.consume_eol(); // Parse optional high-byte comment, which signifies a binary file // FIXME: Do something with this? auto comment = parse_comment(); if (!comment.is_empty()) { auto binary = comment.length() >= 4; if (binary) { for (size_t i = 0; i < comment.length() && binary; i++) binary = static_cast(comment[i]) > 128; } } return {}; } PDFErrorOr DocumentParser::initialize_linearization_dict() { // parse_header() is called immediately before this, so we are at the right location auto indirect_value = Value(*TRY(parse_indirect_value())); auto dict_value = TRY(m_document->resolve(indirect_value)); if (!dict_value.has>()) return error("Expected linearization object to be a dictionary"); auto dict_object = dict_value.get>(); if (!dict_object->is()) return LinearizationResult::NotLinearized; auto dict = dict_object->cast(); if (!dict->contains(CommonNames::Linearized)) return LinearizationResult::NotLinearized; if (!dict->contains(CommonNames::L, CommonNames::H, CommonNames::O, CommonNames::E, CommonNames::N, CommonNames::T)) return error("Malformed linearization dictionary"); auto length_of_file = dict->get_value(CommonNames::L); auto hint_table = dict->get_value(CommonNames::H); auto first_page_object_number = dict->get_value(CommonNames::O); auto offset_of_first_page_end = dict->get_value(CommonNames::E); auto number_of_pages = dict->get_value(CommonNames::N); auto offset_of_main_xref_table = dict->get_value(CommonNames::T); auto first_page = dict->get(CommonNames::P).value_or({}); // Validation if (!length_of_file.has_u32() || !hint_table.has>() || !first_page_object_number.has_u32() || !number_of_pages.has_u16() || !offset_of_main_xref_table.has_u32() || (!first_page.has() && !first_page.has_u32())) { return error("Malformed linearization dictionary parameters"); } auto hint_table_array = hint_table.get>()->cast(); auto hint_table_size = hint_table_array->size(); if (hint_table_size != 2 && hint_table_size != 4) return error("Expected hint table to be of length 2 or 4"); auto primary_hint_stream_offset = hint_table_array->at(0); auto primary_hint_stream_length = hint_table_array->at(1); Value overflow_hint_stream_offset; Value overflow_hint_stream_length; if (hint_table_size == 4) { overflow_hint_stream_offset = hint_table_array->at(2); overflow_hint_stream_length = hint_table_array->at(3); } if (!primary_hint_stream_offset.has_u32() || !primary_hint_stream_length.has_u32() || (!overflow_hint_stream_offset.has() && !overflow_hint_stream_offset.has_u32()) || (!overflow_hint_stream_length.has() && !overflow_hint_stream_length.has_u32())) { return error("Malformed hint stream"); } m_linearization_dictionary = LinearizationDictionary { length_of_file.get_u32(), primary_hint_stream_offset.get_u32(), primary_hint_stream_length.get_u32(), overflow_hint_stream_offset.has() ? NumericLimits::max() : overflow_hint_stream_offset.get_u32(), overflow_hint_stream_length.has() ? NumericLimits::max() : overflow_hint_stream_length.get_u32(), first_page_object_number.get_u32(), offset_of_first_page_end.get_u32(), number_of_pages.get_u16(), offset_of_main_xref_table.get_u32(), first_page.has() ? NumericLimits::max() : first_page.get_u32(), }; return LinearizationResult::Linearized; } PDFErrorOr DocumentParser::initialize_linearized_xref_table() { // The linearization parameter dictionary has just been parsed, and the xref table // comes immediately after it. We are in the correct spot. m_xref_table = TRY(parse_xref_table()); if (!m_trailer) m_trailer = TRY(parse_file_trailer()); // Also parse the main xref table and merge into the first-page xref table. Note // that we don't use the main xref table offset from the linearization dict because // for some reason, it specified the offset of the whitespace after the object // index start and length? So it's much easier to do it this way. auto main_xref_table_offset = m_trailer->get_value(CommonNames::Prev).to_int(); m_reader.move_to(main_xref_table_offset); auto main_xref_table = TRY(parse_xref_table()); TRY(m_xref_table->merge(move(*main_xref_table))); return validate_xref_table_and_fix_if_necessary(); } PDFErrorOr DocumentParser::initialize_hint_tables() { auto linearization_dict = m_linearization_dictionary.value(); auto primary_offset = linearization_dict.primary_hint_stream_offset; auto overflow_offset = linearization_dict.overflow_hint_stream_offset; auto parse_hint_table = [&](size_t offset) -> RefPtr { m_reader.move_to(offset); auto stream_indirect_value = parse_indirect_value(); if (stream_indirect_value.is_error()) return {}; auto stream_value = stream_indirect_value.value()->value(); if (!stream_value.has>()) return {}; auto stream_object = stream_value.get>(); if (!stream_object->is()) return {}; return stream_object->cast(); }; auto primary_hint_stream = parse_hint_table(primary_offset); if (!primary_hint_stream) return error("Invalid primary hint stream"); RefPtr overflow_hint_stream; if (overflow_offset != NumericLimits::max()) overflow_hint_stream = parse_hint_table(overflow_offset); ByteBuffer possible_merged_stream_buffer; ReadonlyBytes hint_stream_bytes; if (overflow_hint_stream) { auto primary_size = primary_hint_stream->bytes().size(); auto overflow_size = overflow_hint_stream->bytes().size(); auto total_size = primary_size + overflow_size; auto buffer_result = ByteBuffer::create_uninitialized(total_size); if (buffer_result.is_error()) return Error { Error::Type::Internal, "Failed to allocate hint stream buffer" }; possible_merged_stream_buffer = buffer_result.release_value(); MUST(possible_merged_stream_buffer.try_append(primary_hint_stream->bytes())); MUST(possible_merged_stream_buffer.try_append(overflow_hint_stream->bytes())); hint_stream_bytes = possible_merged_stream_buffer.bytes(); } else { hint_stream_bytes = primary_hint_stream->bytes(); } auto hint_table = TRY(parse_page_offset_hint_table(hint_stream_bytes)); auto hint_table_entries = TRY(parse_all_page_offset_hint_table_entries(hint_table, hint_stream_bytes)); // FIXME: Do something with the hint tables return {}; } PDFErrorOr DocumentParser::initialize_non_linearized_xref_table() { m_reader.move_to(m_reader.bytes().size() - 1); if (!navigate_to_before_eof_marker()) return error("No EOF marker"); if (!navigate_to_after_startxref()) return error("No xref"); m_reader.set_reading_forwards(); auto xref_offset_value = parse_number(); if (xref_offset_value.is_error() || !xref_offset_value.value().has()) return error("Invalid xref offset"); auto xref_offset = xref_offset_value.value().get(); m_reader.move_to(xref_offset); m_xref_table = TRY(parse_xref_table()); if (!m_trailer) m_trailer = TRY(parse_file_trailer()); return validate_xref_table_and_fix_if_necessary(); } PDFErrorOr DocumentParser::validate_xref_table_and_fix_if_necessary() { /* While an xref table may start with an object number other than zero, this is very uncommon and likely a sign of a document with broken indices. Like most other PDF parsers seem to do, we still try to salvage the situation. NOTE: This is probably not spec-compliant behavior.*/ size_t first_valid_index = 0; while (m_xref_table->byte_offset_for_object(first_valid_index) == invalid_byte_offset) first_valid_index++; if (first_valid_index) { auto& entries = m_xref_table->entries(); bool need_to_rebuild_table = true; for (size_t i = first_valid_index; i < entries.size(); ++i) { if (!entries[i].in_use) continue; size_t actual_object_number = 0; if (entries[i].compressed) { auto object_stream_index = m_xref_table->object_stream_for_object(i); auto stream_offset = m_xref_table->byte_offset_for_object(object_stream_index); m_reader.move_to(stream_offset); auto first_number = TRY(parse_number()); actual_object_number = first_number.get_u32(); } else { auto byte_offset = m_xref_table->byte_offset_for_object(i); m_reader.move_to(byte_offset); auto indirect_value = TRY(parse_indirect_value()); actual_object_number = indirect_value->index(); } if (actual_object_number != i - first_valid_index) { /* Our suspicion was wrong, not all object numbers are shifted equally. This could mean that the document is hopelessly broken, or it just starts at a non-zero object index for some reason. */ need_to_rebuild_table = false; break; } } if (need_to_rebuild_table) { warnln("Broken xref table detected, trying to fix it."); entries.remove(0, first_valid_index); } } return {}; } PDFErrorOr> DocumentParser::parse_xref_stream() { auto first_number = TRY(parse_number()); auto second_number = TRY(parse_number()); if (!m_reader.matches("obj")) return error("Malformed xref object"); m_reader.move_by(3); if (m_reader.matches_eol()) m_reader.consume_eol(); auto dict = TRY(parse_dict()); auto type = TRY(dict->get_name(m_document, CommonNames::Type))->name(); if (type != "XRef") return error("Malformed xref dictionary"); auto field_sizes = TRY(dict->get_array(m_document, "W")); if (field_sizes->size() != 3) return error("Malformed xref dictionary"); auto highest_object_number = dict->get_value("Size").get() - 1; Vector> subsections; if (dict->contains(CommonNames::Index)) { auto index_array = TRY(dict->get_array(m_document, CommonNames::Index)); if (index_array->size() % 2 != 0) return error("Malformed xref dictionary"); for (size_t i = 0; i < index_array->size(); i += 2) subsections.append({ index_array->at(i).get(), index_array->at(i + 1).get() - 1 }); } else { subsections.append({ 0, highest_object_number }); } auto stream = TRY(parse_stream(dict)); auto table = adopt_ref(*new XRefTable()); auto field_to_long = [](Span field) -> long { long value = 0; const u8 max = (field.size() - 1) * 8; for (size_t i = 0; i < field.size(); ++i) { value |= static_cast(field[i]) << (max - (i * 8)); } return value; }; size_t byte_index = 0; size_t subsection_index = 0; Vector entries; for (int entry_index = 0; subsection_index < subsections.size(); ++entry_index) { Array fields; for (size_t field_index = 0; field_index < 3; ++field_index) { auto field_size = field_sizes->at(field_index).get_u32(); if (byte_index + field_size > stream->bytes().size()) return error("The xref stream data cut off early"); auto field = stream->bytes().slice(byte_index, field_size); fields[field_index] = field_to_long(field); byte_index += field_size; } u8 type = fields[0]; if (!field_sizes->at(0).get_u32()) type = 1; entries.append({ fields[1], static_cast(fields[2]), type != 0, type == 2 }); auto subsection = subsections[subsection_index]; if (entry_index >= subsection.get<1>()) { table->add_section({ subsection.get<0>(), subsection.get<1>(), entries }); entries.clear(); subsection_index++; } } m_trailer = dict; return table; } PDFErrorOr> DocumentParser::parse_xref_table() { if (!m_reader.matches("xref")) { // Since version 1.5, there may be a cross-reference stream instead return parse_xref_stream(); } m_reader.move_by(4); if (!m_reader.consume_eol()) return error("Expected newline after \"xref\""); auto table = adopt_ref(*new XRefTable()); do { if (m_reader.matches("trailer")) return table; Vector entries; auto starting_index_value = TRY(parse_number()); auto starting_index = starting_index_value.get(); auto object_count_value = TRY(parse_number()); auto object_count = object_count_value.get(); for (int i = 0; i < object_count; i++) { auto offset_string = DeprecatedString(m_reader.bytes().slice(m_reader.offset(), 10)); m_reader.move_by(10); if (!m_reader.consume(' ')) return error("Malformed xref entry"); auto generation_string = DeprecatedString(m_reader.bytes().slice(m_reader.offset(), 5)); m_reader.move_by(5); if (!m_reader.consume(' ')) return error("Malformed xref entry"); auto letter = m_reader.read(); if (letter != 'n' && letter != 'f') return error("Malformed xref entry"); // The line ending sequence can be one of the following: // SP CR, SP LF, or CR LF if (m_reader.matches(' ')) { m_reader.consume(); auto ch = m_reader.consume(); if (ch != '\r' && ch != '\n') return error("Malformed xref entry"); } else { if (!m_reader.matches("\r\n")) return error("Malformed xref entry"); m_reader.move_by(2); } auto offset = strtol(offset_string.characters(), nullptr, 10); auto generation = strtol(generation_string.characters(), nullptr, 10); entries.append({ offset, static_cast(generation), letter == 'n' }); } table->add_section({ starting_index, object_count, entries }); } while (m_reader.matches_number()); return table; } PDFErrorOr> DocumentParser::parse_file_trailer() { while (m_reader.matches_eol()) m_reader.consume_eol(); if (!m_reader.matches("trailer")) return error("Expected \"trailer\" keyword"); m_reader.move_by(7); m_reader.consume_whitespace(); auto dict = TRY(parse_dict()); if (!m_reader.matches("startxref")) return error("Expected \"startxref\""); m_reader.move_by(9); m_reader.consume_whitespace(); m_reader.move_until([&](auto) { return m_reader.matches_eol(); }); VERIFY(m_reader.consume_eol()); if (!m_reader.matches("%%EOF")) return error("Expected \"%%EOF\""); m_reader.move_by(5); m_reader.consume_whitespace(); return dict; } PDFErrorOr DocumentParser::parse_compressed_object_with_index(u32 index) { auto object_stream_index = m_xref_table->object_stream_for_object(index); auto stream_offset = m_xref_table->byte_offset_for_object(object_stream_index); m_reader.move_to(stream_offset); auto first_number = TRY(parse_number()); auto second_number = TRY(parse_number()); if (first_number.get() != object_stream_index) return error("Mismatching object stream index"); if (second_number.get() != 0) return error("Non-zero object stream generation number"); if (!m_reader.matches("obj")) return error("Malformed object stream"); m_reader.move_by(3); if (m_reader.matches_eol()) m_reader.consume_eol(); auto dict = TRY(parse_dict()); auto type = TRY(dict->get_name(m_document, CommonNames::Type))->name(); if (type != "ObjStm") return error("Invalid object stream type"); auto object_count = dict->get_value("N").get_u32(); auto first_object_offset = dict->get_value("First").get_u32(); auto stream = TRY(parse_stream(dict)); Parser stream_parser(m_document, stream->bytes()); for (u32 i = 0; i < object_count; ++i) { auto object_number = TRY(stream_parser.parse_number()); auto object_offset = TRY(stream_parser.parse_number()); if (object_number.get_u32() == index) { stream_parser.move_to(first_object_offset + object_offset.get_u32()); break; } } return TRY(stream_parser.parse_value()); } PDFErrorOr DocumentParser::parse_page_offset_hint_table(ReadonlyBytes hint_stream_bytes) { if (hint_stream_bytes.size() < sizeof(PageOffsetHintTable)) return error("Hint stream is too small"); size_t offset = 0; auto read_u32 = [&] { u32 data = reinterpret_cast(hint_stream_bytes.data() + offset)[0]; offset += 4; return AK::convert_between_host_and_big_endian(data); }; auto read_u16 = [&] { u16 data = reinterpret_cast(hint_stream_bytes.data() + offset)[0]; offset += 2; return AK::convert_between_host_and_big_endian(data); }; PageOffsetHintTable hint_table { read_u32(), read_u32(), read_u16(), read_u32(), read_u16(), read_u32(), read_u16(), read_u32(), read_u16(), read_u16(), read_u16(), read_u16(), read_u16(), }; // Verify that all of the bits_required_for_xyz fields are <= 32, since all of the numeric // fields in PageOffsetHintTableEntry are u32 VERIFY(hint_table.bits_required_for_object_number <= 32); VERIFY(hint_table.bits_required_for_page_length <= 32); VERIFY(hint_table.bits_required_for_content_stream_offsets <= 32); VERIFY(hint_table.bits_required_for_content_stream_length <= 32); VERIFY(hint_table.bits_required_for_number_of_shared_obj_refs <= 32); VERIFY(hint_table.bits_required_for_greatest_shared_obj_identifier <= 32); VERIFY(hint_table.bits_required_for_fraction_numerator <= 32); return hint_table; } PDFErrorOr> DocumentParser::parse_all_page_offset_hint_table_entries(PageOffsetHintTable const& hint_table, ReadonlyBytes hint_stream_bytes) { auto input_stream = TRY(FixedMemoryStream::construct(hint_stream_bytes)); TRY(input_stream->seek(sizeof(PageOffsetHintTable))); LittleEndianInputBitStream bit_stream { move(input_stream) }; auto number_of_pages = m_linearization_dictionary.value().number_of_pages; Vector entries; for (size_t i = 0; i < number_of_pages; i++) entries.append(PageOffsetHintTableEntry {}); auto bits_required_for_object_number = hint_table.bits_required_for_object_number; auto bits_required_for_page_length = hint_table.bits_required_for_page_length; auto bits_required_for_content_stream_offsets = hint_table.bits_required_for_content_stream_offsets; auto bits_required_for_content_stream_length = hint_table.bits_required_for_content_stream_length; auto bits_required_for_number_of_shared_obj_refs = hint_table.bits_required_for_number_of_shared_obj_refs; auto bits_required_for_greatest_shared_obj_identifier = hint_table.bits_required_for_greatest_shared_obj_identifier; auto bits_required_for_fraction_numerator = hint_table.bits_required_for_fraction_numerator; auto parse_int_entry = [&](u32 PageOffsetHintTableEntry::*field, u32 bit_size) -> ErrorOr { if (bit_size <= 0) return {}; for (int i = 0; i < number_of_pages; i++) { auto& entry = entries[i]; entry.*field = TRY(bit_stream.read_bits(bit_size)); } return {}; }; auto parse_vector_entry = [&](Vector PageOffsetHintTableEntry::*field, u32 bit_size) -> ErrorOr { if (bit_size <= 0) return {}; for (int page = 1; page < number_of_pages; page++) { auto number_of_shared_objects = entries[page].number_of_shared_objects; Vector items; items.ensure_capacity(number_of_shared_objects); for (size_t i = 0; i < number_of_shared_objects; i++) items.unchecked_append(TRY(bit_stream.read_bits(bit_size))); entries[page].*field = move(items); } return {}; }; TRY(parse_int_entry(&PageOffsetHintTableEntry::objects_in_page_number, bits_required_for_object_number)); TRY(parse_int_entry(&PageOffsetHintTableEntry::page_length_number, bits_required_for_page_length)); TRY(parse_int_entry(&PageOffsetHintTableEntry::number_of_shared_objects, bits_required_for_number_of_shared_obj_refs)); TRY(parse_vector_entry(&PageOffsetHintTableEntry::shared_object_identifiers, bits_required_for_greatest_shared_obj_identifier)); TRY(parse_vector_entry(&PageOffsetHintTableEntry::shared_object_location_numerators, bits_required_for_fraction_numerator)); TRY(parse_int_entry(&PageOffsetHintTableEntry::page_content_stream_offset_number, bits_required_for_content_stream_offsets)); TRY(parse_int_entry(&PageOffsetHintTableEntry::page_content_stream_length_number, bits_required_for_content_stream_length)); return entries; } bool DocumentParser::navigate_to_before_eof_marker() { m_reader.set_reading_backwards(); while (!m_reader.done()) { m_reader.move_until([&](auto) { return m_reader.matches_eol(); }); if (m_reader.done()) return false; m_reader.consume_eol(); if (!m_reader.matches("%%EOF")) continue; m_reader.move_by(5); if (!m_reader.matches_eol()) continue; m_reader.consume_eol(); return true; } return false; } bool DocumentParser::navigate_to_after_startxref() { m_reader.set_reading_backwards(); while (!m_reader.done()) { m_reader.move_until([&](auto) { return m_reader.matches_eol(); }); auto offset = m_reader.offset() + 1; m_reader.consume_eol(); if (!m_reader.matches("startxref")) continue; m_reader.move_by(9); if (!m_reader.matches_eol()) continue; m_reader.move_to(offset); return true; } return false; } PDFErrorOr> DocumentParser::conditionally_parse_page_tree_node(u32 object_index) { auto dict_value = TRY(parse_object_with_index(object_index)); auto dict_object = dict_value.get>(); if (!dict_object->is()) return error(DeprecatedString::formatted("Invalid page tree with xref index {}", object_index)); auto dict = dict_object->cast(); if (!dict->contains_any_of(CommonNames::Type, CommonNames::Parent, CommonNames::Kids, CommonNames::Count)) // This is a page, not a page tree node return RefPtr {}; if (!dict->contains(CommonNames::Type)) return RefPtr {}; auto type_object = TRY(dict->get_object(m_document, CommonNames::Type)); if (!type_object->is()) return RefPtr {}; auto type_name = type_object->cast(); if (type_name->name() != CommonNames::Pages) return RefPtr {}; return dict; } } namespace AK { template<> struct Formatter : Formatter { ErrorOr format(FormatBuilder& format_builder, PDF::DocumentParser::LinearizationDictionary const& dict) { StringBuilder builder; builder.append("{\n"sv); builder.appendff(" length_of_file={}\n", dict.length_of_file); builder.appendff(" primary_hint_stream_offset={}\n", dict.primary_hint_stream_offset); builder.appendff(" primary_hint_stream_length={}\n", dict.primary_hint_stream_length); builder.appendff(" overflow_hint_stream_offset={}\n", dict.overflow_hint_stream_offset); builder.appendff(" overflow_hint_stream_length={}\n", dict.overflow_hint_stream_length); builder.appendff(" first_page_object_number={}\n", dict.first_page_object_number); builder.appendff(" offset_of_first_page_end={}\n", dict.offset_of_first_page_end); builder.appendff(" number_of_pages={}\n", dict.number_of_pages); builder.appendff(" offset_of_main_xref_table={}\n", dict.offset_of_main_xref_table); builder.appendff(" first_page={}\n", dict.first_page); builder.append('}'); return Formatter::format(format_builder, builder.to_deprecated_string()); } }; template<> struct Formatter : Formatter { ErrorOr format(FormatBuilder& format_builder, PDF::DocumentParser::PageOffsetHintTable const& table) { StringBuilder builder; builder.append("{\n"sv); builder.appendff(" least_number_of_objects_in_a_page={}\n", table.least_number_of_objects_in_a_page); builder.appendff(" location_of_first_page_object={}\n", table.location_of_first_page_object); builder.appendff(" bits_required_for_object_number={}\n", table.bits_required_for_object_number); builder.appendff(" least_length_of_a_page={}\n", table.least_length_of_a_page); builder.appendff(" bits_required_for_page_length={}\n", table.bits_required_for_page_length); builder.appendff(" least_offset_of_any_content_stream={}\n", table.least_offset_of_any_content_stream); builder.appendff(" bits_required_for_content_stream_offsets={}\n", table.bits_required_for_content_stream_offsets); builder.appendff(" least_content_stream_length={}\n", table.least_content_stream_length); builder.appendff(" bits_required_for_content_stream_length={}\n", table.bits_required_for_content_stream_length); builder.appendff(" bits_required_for_number_of_shared_obj_refs={}\n", table.bits_required_for_number_of_shared_obj_refs); builder.appendff(" bits_required_for_greatest_shared_obj_identifier={}\n", table.bits_required_for_greatest_shared_obj_identifier); builder.appendff(" bits_required_for_fraction_numerator={}\n", table.bits_required_for_fraction_numerator); builder.appendff(" shared_object_reference_fraction_denominator={}\n", table.shared_object_reference_fraction_denominator); builder.append('}'); return Formatter::format(format_builder, builder.to_deprecated_string()); } }; template<> struct Formatter : Formatter { ErrorOr format(FormatBuilder& format_builder, PDF::DocumentParser::PageOffsetHintTableEntry const& entry) { StringBuilder builder; builder.append("{\n"sv); builder.appendff(" objects_in_page_number={}\n", entry.objects_in_page_number); builder.appendff(" page_length_number={}\n", entry.page_length_number); builder.appendff(" number_of_shared_objects={}\n", entry.number_of_shared_objects); builder.append(" shared_object_identifiers=["sv); for (auto& identifier : entry.shared_object_identifiers) builder.appendff(" {}", identifier); builder.append(" ]\n"sv); builder.append(" shared_object_location_numerators=["sv); for (auto& numerator : entry.shared_object_location_numerators) builder.appendff(" {}", numerator); builder.append(" ]\n"sv); builder.appendff(" page_content_stream_offset_number={}\n", entry.page_content_stream_offset_number); builder.appendff(" page_content_stream_length_number={}\n", entry.page_content_stream_length_number); builder.append('}'); return Formatter::format(format_builder, builder.to_deprecated_string()); } }; }