diff options
author | Sahan Fernando <sahan.h.fernando@gmail.com> | 2021-06-12 23:07:44 +1000 |
---|---|---|
committer | Andreas Kling <kling@serenityos.org> | 2021-06-25 19:26:30 +0200 |
commit | b9ad6058aa05b723c51beac35184ab29f7cbc113 (patch) | |
tree | 1165d9753fae4df6569c6be61c4121b735bd7c03 /Kernel/Graphics/VirtIOGPU | |
parent | b569b2df35d3f109520033ff10220cfb28f7db67 (diff) | |
download | serenity-b9ad6058aa05b723c51beac35184ab29f7cbc113.zip |
Kernel: Add VirtIOGPU graphics device
Diffstat (limited to 'Kernel/Graphics/VirtIOGPU')
-rw-r--r-- | Kernel/Graphics/VirtIOGPU/VirtIOFrameBufferDevice.cpp | 135 | ||||
-rw-r--r-- | Kernel/Graphics/VirtIOGPU/VirtIOFrameBufferDevice.h | 45 | ||||
-rw-r--r-- | Kernel/Graphics/VirtIOGPU/VirtIOGPU.cpp | 382 | ||||
-rw-r--r-- | Kernel/Graphics/VirtIOGPU/VirtIOGPU.h | 219 | ||||
-rw-r--r-- | Kernel/Graphics/VirtIOGPU/VirtIOGPUConsole.cpp | 87 | ||||
-rw-r--r-- | Kernel/Graphics/VirtIOGPU/VirtIOGPUConsole.h | 54 | ||||
-rw-r--r-- | Kernel/Graphics/VirtIOGPU/VirtIOGraphicsAdapter.cpp | 54 | ||||
-rw-r--r-- | Kernel/Graphics/VirtIOGPU/VirtIOGraphicsAdapter.h | 43 |
8 files changed, 1019 insertions, 0 deletions
diff --git a/Kernel/Graphics/VirtIOGPU/VirtIOFrameBufferDevice.cpp b/Kernel/Graphics/VirtIOGPU/VirtIOFrameBufferDevice.cpp new file mode 100644 index 0000000000..b471e9e160 --- /dev/null +++ b/Kernel/Graphics/VirtIOGPU/VirtIOFrameBufferDevice.cpp @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2021, Sahan Fernando <sahan.h.fernando@gmail.com> + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#include <Kernel/Graphics/GraphicsManagement.h> +#include <Kernel/Graphics/VirtIOGPU/VirtIOFrameBufferDevice.h> +#include <LibC/sys/ioctl_numbers.h> + +namespace Kernel::Graphics { + +VirtIOFrameBufferDevice::VirtIOFrameBufferDevice(RefPtr<VirtIOGPU> virtio_gpu) + : BlockDevice(29, GraphicsManagement::the().allocate_minor_device_number()) + , m_gpu(virtio_gpu) +{ + auto write_sink_page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::No).release_nonnull(); + auto num_needed_pages = m_gpu->framebuffer_vm_object().page_count(); + NonnullRefPtrVector<PhysicalPage> pages; + for (auto i = 0u; i < num_needed_pages; ++i) { + pages.append(write_sink_page); + } + m_framebuffer_sink_vmobject = AnonymousVMObject::create_with_physical_pages(move(pages)); +} + +VirtIOFrameBufferDevice::~VirtIOFrameBufferDevice() +{ +} + +int VirtIOFrameBufferDevice::ioctl(FileDescription&, unsigned request, FlatPtr arg) +{ + REQUIRE_PROMISE(video); + switch (request) { + case FB_IOCTL_GET_SIZE_IN_BYTES: { + auto* out = (size_t*)arg; + size_t value = m_gpu->framebuffer_size_in_bytes(); + if (!copy_to_user(out, &value)) + return -EFAULT; + return 0; + } + case FB_IOCTL_SET_RESOLUTION: { + auto* user_resolution = (FBResolution*)arg; + FBResolution resolution; + if (!copy_from_user(&resolution, user_resolution)) + return -EFAULT; + if (!m_gpu->try_to_set_resolution(resolution.width, resolution.height)) + return -EINVAL; + resolution.pitch = m_gpu->framebuffer_pitch(); + if (!copy_to_user(user_resolution, &resolution)) + return -EFAULT; + return 0; + } + case FB_IOCTL_GET_RESOLUTION: { + auto* user_resolution = (FBResolution*)arg; + FBResolution resolution; + resolution.pitch = m_gpu->framebuffer_pitch(); + resolution.width = m_gpu->framebuffer_width(); + resolution.height = m_gpu->framebuffer_height(); + if (!copy_to_user(user_resolution, &resolution)) + return -EFAULT; + return 0; + } + case FB_IOCTL_FLUSH_BUFFER: { + FBRect user_dirty_rect; + if (!copy_from_user(&user_dirty_rect, (FBRect*)arg)) + return -EFAULT; + VirtIOGPURect dirty_rect { + .x = user_dirty_rect.x, + .y = user_dirty_rect.y, + .width = user_dirty_rect.width, + .height = user_dirty_rect.height + }; + if (m_are_writes_active) + m_gpu->flush_dirty_window(dirty_rect); + return 0; + } + default: + return -EINVAL; + }; +} + +KResultOr<Region*> VirtIOFrameBufferDevice::mmap(Process& process, FileDescription&, const Range& range, u64 offset, int prot, bool shared) +{ + REQUIRE_PROMISE(video); + if (!shared) + return ENODEV; + if (offset != 0) + return ENXIO; + if (range.size() != page_round_up(m_gpu->framebuffer_size_in_bytes())) + return EOVERFLOW; + + // We only allow one process to map the region + if (m_userspace_mmap_region) + return ENOMEM; + + auto vmobject = m_are_writes_active ? m_gpu->framebuffer_vm_object().clone() : m_framebuffer_sink_vmobject; + if (vmobject.is_null()) + return ENOMEM; + + auto result = process.space().allocate_region_with_vmobject( + range, + vmobject.release_nonnull(), + 0, + "VirtIOGPU Framebuffer", + prot, + shared); + if (result.is_error()) + return result; + m_userspace_mmap_region = result.value(); + return result; +} + +void VirtIOFrameBufferDevice::deactivate_writes() +{ + m_are_writes_active = false; + if (m_userspace_mmap_region) { + auto* region = m_userspace_mmap_region.unsafe_ptr(); + auto vm_object = m_framebuffer_sink_vmobject->clone(); + VERIFY(vm_object); + region->set_vmobject(vm_object.release_nonnull()); + region->remap(); + } +} + +void VirtIOFrameBufferDevice::activate_writes() +{ + m_are_writes_active = true; + if (m_userspace_mmap_region) { + auto* region = m_userspace_mmap_region.unsafe_ptr(); + region->set_vmobject(m_gpu->framebuffer_vm_object()); + region->remap(); + } +} + +} diff --git a/Kernel/Graphics/VirtIOGPU/VirtIOFrameBufferDevice.h b/Kernel/Graphics/VirtIOGPU/VirtIOFrameBufferDevice.h new file mode 100644 index 0000000000..f918bdbc69 --- /dev/null +++ b/Kernel/Graphics/VirtIOGPU/VirtIOFrameBufferDevice.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2021, Sahan Fernando <sahan.h.fernando@gmail.com> + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#pragma once + +#include <Kernel/Devices/BlockDevice.h> +#include <Kernel/Graphics/VirtIOGPU/VirtIOGPU.h> +#include <Kernel/VirtIO/VirtIO.h> +#include <Kernel/VirtIO/VirtIOQueue.h> + +namespace Kernel::Graphics { + +class VirtIOFrameBufferDevice final : public BlockDevice { +public: + VirtIOFrameBufferDevice(RefPtr<VirtIOGPU> virtio_gpu); + virtual ~VirtIOFrameBufferDevice() override; + + virtual void deactivate_writes(); + virtual void activate_writes(); + +private: + virtual const char* class_name() const override { return "VirtIOFrameBuffer"; } + + virtual int ioctl(FileDescription&, unsigned request, FlatPtr arg) override; + virtual KResultOr<Region*> mmap(Process&, FileDescription&, const Range&, u64 offset, int prot, bool shared) override; + virtual bool can_read(const FileDescription&, size_t) const override { return true; } + virtual KResultOr<size_t> read(FileDescription&, u64, UserOrKernelBuffer&, size_t) override { return EINVAL; } + virtual bool can_write(const FileDescription&, size_t) const override { return true; } + virtual KResultOr<size_t> write(FileDescription&, u64, const UserOrKernelBuffer&, size_t) override { return EINVAL; }; + virtual void start_request(AsyncBlockDeviceRequest& request) override { request.complete(AsyncDeviceRequest::Failure); } + + virtual mode_t required_mode() const override { return 0666; } + virtual String device_name() const override { return String::formatted("fb{}", minor()); } + + RefPtr<VirtIOGPU> m_gpu; + RefPtr<VMObject> m_framebuffer_sink_vmobject; + bool m_are_writes_active { true }; + // FIXME: This needs to be cleaned up if the WindowServer exits while we are in a tty + WeakPtr<Region> m_userspace_mmap_region; +}; + +} diff --git a/Kernel/Graphics/VirtIOGPU/VirtIOGPU.cpp b/Kernel/Graphics/VirtIOGPU/VirtIOGPU.cpp new file mode 100644 index 0000000000..9e7c3baa73 --- /dev/null +++ b/Kernel/Graphics/VirtIOGPU/VirtIOGPU.cpp @@ -0,0 +1,382 @@ +/* + * Copyright (c) 2021, Sahan Fernando <sahan.h.fernando@gmail.com> + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#include <Kernel/Graphics/VirtIOGPU/VirtIOGPU.h> +#include <LibC/sys/ioctl_numbers.h> + +#define DEVICE_EVENTS_READ 0x0 +#define DEVICE_EVENTS_CLEAR 0x4 +#define DEVICE_NUM_SCANOUTS 0x8 + +namespace Kernel::Graphics { + +VirtIOGPU::VirtIOGPU(PCI::Address address) + : VirtIODevice(address, "VirtIOGPU") + , m_scratch_space(MM.allocate_contiguous_kernel_region(32 * PAGE_SIZE, "VirtGPU Scratch Space", Region::Access::Read | Region::Access::Write)) +{ + VERIFY(!!m_scratch_space); + if (auto cfg = get_config(ConfigurationType::Device)) { + m_device_configuration = cfg; + bool success = negotiate_features([&](u64 supported_features) { + u64 negotiated = 0; + if (is_feature_set(supported_features, VIRTIO_GPU_F_VIRGL)) + dbgln_if(VIRTIO_DEBUG, "VirtIOGPU: VIRGL is not yet supported!"); + if (is_feature_set(supported_features, VIRTIO_GPU_F_EDID)) + dbgln_if(VIRTIO_DEBUG, "VirtIOGPU: EDID is not yet supported!"); + return negotiated; + }); + if (success) { + read_config_atomic([&]() { + m_num_scanouts = config_read32(*cfg, DEVICE_NUM_SCANOUTS); + }); + dbgln_if(VIRTIO_DEBUG, "VirtIOGPU: num_scanouts: {}", m_num_scanouts); + success = setup_queues(2); // CONTROLQ + CURSORQ + } + VERIFY(success); + finish_init(); + Locker locker(m_operation_lock); + // 1. Get display information using VIRTIO_GPU_CMD_GET_DISPLAY_INFO + query_display_information(); + // 2. Create BUFFER using VIRTIO_GPU_CMD_RESOURCE_CREATE_2D + m_framebuffer_id = create_2d_resource(m_display_info.rect); + // 3. Attach backing storage using VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING + // FIXME: We really should be trying to allocate a small amount of pages initially, with ensure_backing_storage increasing the backing memory of the region as needed + size_t buffer_length = calculate_framebuffer_size(MAX_VIRTIOGPU_RESOLUTION_WIDTH, MAX_VIRTIOGPU_RESOLUTION_HEIGHT); + m_framebuffer = MM.allocate_kernel_region(page_round_up(buffer_length), "VirtGPU FrameBuffer", Region::Access::Read | Region::Access::Write, AllocationStrategy::AllocateNow); + ensure_backing_storage(*m_framebuffer, buffer_length, m_framebuffer_id); + // 4. Use VIRTIO_GPU_CMD_SET_SCANOUT to link the framebuffer to a display scanout. + set_scanout_resource(m_chosen_scanout.value(), m_framebuffer_id, m_display_info.rect); + // 5. Render our test pattern + draw_ntsc_test_pattern(); + // 6. Use VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D to update the host resource from guest memory. + transfer_framebuffer_data_to_host(m_display_info.rect); + // 7. Use VIRTIO_GPU_CMD_RESOURCE_FLUSH to flush the updated resource to the display. + flush_displayed_image(m_display_info.rect); + } else { + VERIFY_NOT_REACHED(); + } +} + +VirtIOGPU::~VirtIOGPU() +{ +} + +bool VirtIOGPU::handle_device_config_change() +{ + return false; +} + +void VirtIOGPU::handle_queue_update(u16 queue_index) +{ + dbgln_if(VIRTIO_DEBUG, "VirtIOGPU: Handle queue update"); + VERIFY(queue_index == CONTROLQ); + + auto& queue = get_queue(CONTROLQ); + ScopedSpinLock queue_lock(queue.lock()); + queue.discard_used_buffers(); + m_outstanding_request.wake_all(); +} + +u32 VirtIOGPU::get_pending_events() +{ + return config_read32(*m_device_configuration, DEVICE_EVENTS_READ); +} + +void VirtIOGPU::clear_pending_events(u32 event_bitmask) +{ + config_write32(*m_device_configuration, DEVICE_EVENTS_CLEAR, event_bitmask); +} + +void VirtIOGPU::query_display_information() +{ + VERIFY(m_operation_lock.is_locked()); + auto& request = *reinterpret_cast<VirtIOGPUCtrlHeader*>(m_scratch_space->vaddr().as_ptr()); + auto& response = *reinterpret_cast<VirtIOGPURespDisplayInfo*>((m_scratch_space->vaddr().offset(sizeof(request)).as_ptr())); + + populate_virtio_gpu_request_header(request, VirtIOGPUCtrlType::VIRTIO_GPU_CMD_GET_DISPLAY_INFO, VIRTIO_GPU_FLAG_FENCE); + + synchronous_virtio_gpu_command(start_of_scratch_space(), sizeof(request), sizeof(response)); + + for (size_t i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; ++i) { + auto& scanout = response.scanout_modes[i]; + if (!scanout.enabled) + continue; + dbgln_if(VIRTIO_DEBUG, "Scanout {}: x: {}, y: {}, width: {}, height: {}", i, scanout.rect.x, scanout.rect.y, scanout.rect.width, scanout.rect.height); + m_display_info = scanout; + m_chosen_scanout = i; + } + VERIFY(m_chosen_scanout.has_value()); +} + +VirtIOGPUResourceID VirtIOGPU::create_2d_resource(VirtIOGPURect rect) +{ + VERIFY(m_operation_lock.is_locked()); + auto& request = *reinterpret_cast<VirtIOGPUResourceCreate2D*>(m_scratch_space->vaddr().as_ptr()); + auto& response = *reinterpret_cast<VirtIOGPUCtrlHeader*>((m_scratch_space->vaddr().offset(sizeof(request)).as_ptr())); + + populate_virtio_gpu_request_header(request.header, VirtIOGPUCtrlType::VIRTIO_GPU_CMD_RESOURCE_CREATE_2D, VIRTIO_GPU_FLAG_FENCE); + + auto resource_id = allocate_resource_id(); + request.resource_id = resource_id.value(); + request.width = rect.width; + request.height = rect.height; + request.format = static_cast<u32>(VirtIOGPUFormats::VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM); + + synchronous_virtio_gpu_command(start_of_scratch_space(), sizeof(request), sizeof(response)); + + VERIFY(response.type == static_cast<u32>(VirtIOGPUCtrlType::VIRTIO_GPU_RESP_OK_NODATA)); + dbgln_if(VIRTIO_DEBUG, "VirtIOGPU: Allocated 2d resource with id {}", resource_id.value()); + return resource_id; +} + +void VirtIOGPU::ensure_backing_storage(Region& region, size_t buffer_length, VirtIOGPUResourceID resource_id) +{ + VERIFY(m_operation_lock.is_locked()); + // Allocate backing region + auto& vm_object = region.vmobject(); + size_t desired_num_pages = page_round_up(buffer_length); + auto& pages = vm_object.physical_pages(); + for (size_t i = pages.size(); i < desired_num_pages / PAGE_SIZE; ++i) { + auto page = MM.allocate_user_physical_page(); + // FIXME: Instead of verifying, fail the framebuffer resize operation + VERIFY(!page.is_null()); + pages.append(move(page)); + } + region.remap(); + size_t num_mem_regions = vm_object.page_count(); + + // Send request + auto& request = *reinterpret_cast<VirtIOGPUResourceAttachBacking*>(m_scratch_space->vaddr().as_ptr()); + const size_t header_block_size = sizeof(request) + num_mem_regions * sizeof(VirtIOGPUMemEntry); + auto& response = *reinterpret_cast<VirtIOGPUCtrlHeader*>((m_scratch_space->vaddr().offset(header_block_size).as_ptr())); + + populate_virtio_gpu_request_header(request.header, VirtIOGPUCtrlType::VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING, VIRTIO_GPU_FLAG_FENCE); + request.resource_id = resource_id.value(); + request.num_entries = num_mem_regions; + for (size_t i = 0; i < num_mem_regions; ++i) { + request.entries[i].address = m_framebuffer->physical_page(i)->paddr().get(); + request.entries[i].length = PAGE_SIZE; + } + + synchronous_virtio_gpu_command(start_of_scratch_space(), header_block_size, sizeof(response)); + + VERIFY(response.type == static_cast<u32>(VirtIOGPUCtrlType::VIRTIO_GPU_RESP_OK_NODATA)); + dbgln_if(VIRTIO_DEBUG, "VirtIOGPU: Allocated backing storage"); +} + +void VirtIOGPU::detach_backing_storage(VirtIOGPUResourceID resource_id) +{ + VERIFY(m_operation_lock.is_locked()); + auto& request = *reinterpret_cast<VirtIOGPUResourceDetachBacking*>(m_scratch_space->vaddr().as_ptr()); + auto& response = *reinterpret_cast<VirtIOGPUCtrlHeader*>((m_scratch_space->vaddr().offset(sizeof(request)).as_ptr())); + + populate_virtio_gpu_request_header(request.header, VirtIOGPUCtrlType::VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING, VIRTIO_GPU_FLAG_FENCE); + request.resource_id = resource_id.value(); + + synchronous_virtio_gpu_command(start_of_scratch_space(), sizeof(request), sizeof(response)); + + VERIFY(response.type == static_cast<u32>(VirtIOGPUCtrlType::VIRTIO_GPU_RESP_OK_NODATA)); + dbgln_if(VIRTIO_DEBUG, "VirtIOGPU: Detached backing storage"); +} + +void VirtIOGPU::set_scanout_resource(VirtIOGPUScanoutID scanout, VirtIOGPUResourceID resource_id, VirtIOGPURect rect) +{ + VERIFY(m_operation_lock.is_locked()); + auto& request = *reinterpret_cast<VirtIOGPUSetScanOut*>(m_scratch_space->vaddr().as_ptr()); + auto& response = *reinterpret_cast<VirtIOGPUCtrlHeader*>((m_scratch_space->vaddr().offset(sizeof(request)).as_ptr())); + + populate_virtio_gpu_request_header(request.header, VirtIOGPUCtrlType::VIRTIO_GPU_CMD_SET_SCANOUT, VIRTIO_GPU_FLAG_FENCE); + request.resource_id = resource_id.value(); + request.scanout_id = scanout.value(); + request.rect = rect; + + synchronous_virtio_gpu_command(start_of_scratch_space(), sizeof(request), sizeof(response)); + + VERIFY(response.type == static_cast<u32>(VirtIOGPUCtrlType::VIRTIO_GPU_RESP_OK_NODATA)); + dbgln_if(VIRTIO_DEBUG, "VirtIOGPU: Set backing scanout"); +} + +void VirtIOGPU::draw_ntsc_test_pattern() +{ + static constexpr u8 colors[12][4] = { + { 0xff, 0xff, 0xff, 0xff }, // White + { 0x00, 0xff, 0xff, 0xff }, // Primary + Composite colors + { 0xff, 0xff, 0x00, 0xff }, + { 0x00, 0xff, 0x00, 0xff }, + { 0xff, 0x00, 0xff, 0xff }, + { 0x00, 0x00, 0xff, 0xff }, + { 0xff, 0x00, 0x00, 0xff }, + { 0xba, 0x01, 0x5f, 0xff }, // Dark blue + { 0x8d, 0x3d, 0x00, 0xff }, // Purple + { 0x22, 0x22, 0x22, 0xff }, // Shades of gray + { 0x10, 0x10, 0x10, 0xff }, + { 0x00, 0x00, 0x00, 0xff }, + }; + size_t width = m_display_info.rect.width; + size_t height = m_display_info.rect.height; + u8* data = m_framebuffer->vaddr().as_ptr(); + // Draw NTSC test card + for (size_t y = 0; y < height; ++y) { + for (size_t x = 0; x < width; ++x) { + size_t color = 0; + if (3 * y < 2 * height) { + // Top 2/3 of image is 7 vertical stripes of color spectrum + color = (7 * x) / width; + } else if (4 * y < 3 * height) { + // 2/3 mark to 3/4 mark is backwards color spectrum alternating with black + auto segment = (7 * x) / width; + color = segment % 2 ? 10 : 6 - segment; + } else { + if (28 * x < 5 * width) { + color = 8; + } else if (28 * x < 10 * width) { + color = 0; + } else if (28 * x < 15 * width) { + color = 7; + } else if (28 * x < 20 * width) { + color = 10; + } else if (7 * x < 6 * width) { + // Grayscale gradient + color = 26 - ((21 * x) / width); + } else { + // Solid black + color = 10; + } + } + u8* pixel = &data[4 * (y * width + x)]; + for (int i = 0; i < 4; ++i) { + pixel[i] = colors[color][i]; + } + } + } + dbgln_if(VIRTIO_DEBUG, "Finish drawing the pattern"); +} + +void VirtIOGPU::transfer_framebuffer_data_to_host(VirtIOGPURect dirty_rect) +{ + VERIFY(m_operation_lock.is_locked()); + auto& request = *reinterpret_cast<VirtIOGPUTransferToHost2D*>(m_scratch_space->vaddr().as_ptr()); + auto& response = *reinterpret_cast<VirtIOGPUCtrlHeader*>((m_scratch_space->vaddr().offset(sizeof(request)).as_ptr())); + + populate_virtio_gpu_request_header(request.header, VirtIOGPUCtrlType::VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D, VIRTIO_GPU_FLAG_FENCE); + request.offset = (dirty_rect.x + (dirty_rect.y * m_display_info.rect.width)) * sizeof(u32); + request.resource_id = m_framebuffer_id.value(); + request.rect = dirty_rect; + + synchronous_virtio_gpu_command(start_of_scratch_space(), sizeof(request), sizeof(response)); + + VERIFY(response.type == static_cast<u32>(VirtIOGPUCtrlType::VIRTIO_GPU_RESP_OK_NODATA)); +} + +void VirtIOGPU::flush_displayed_image(VirtIOGPURect dirty_rect) +{ + VERIFY(m_operation_lock.is_locked()); + auto& request = *reinterpret_cast<VirtIOGPUResourceFlush*>(m_scratch_space->vaddr().as_ptr()); + auto& response = *reinterpret_cast<VirtIOGPUCtrlHeader*>((m_scratch_space->vaddr().offset(sizeof(request)).as_ptr())); + + populate_virtio_gpu_request_header(request.header, VirtIOGPUCtrlType::VIRTIO_GPU_CMD_RESOURCE_FLUSH, VIRTIO_GPU_FLAG_FENCE); + request.resource_id = m_framebuffer_id.value(); + request.rect = dirty_rect; + + synchronous_virtio_gpu_command(start_of_scratch_space(), sizeof(request), sizeof(response)); + + VERIFY(response.type == static_cast<u32>(VirtIOGPUCtrlType::VIRTIO_GPU_RESP_OK_NODATA)); +} + +void VirtIOGPU::synchronous_virtio_gpu_command(PhysicalAddress buffer_start, size_t request_size, size_t response_size) +{ + VERIFY(m_operation_lock.is_locked()); + VERIFY(m_outstanding_request.is_empty()); + auto& queue = get_queue(CONTROLQ); + { + ScopedSpinLock lock(queue.lock()); + VirtIOQueueChain chain { queue }; + chain.add_buffer_to_chain(buffer_start, request_size, BufferType::DeviceReadable); + chain.add_buffer_to_chain(buffer_start.offset(request_size), response_size, BufferType::DeviceWritable); + supply_chain_and_notify(CONTROLQ, chain); + full_memory_barrier(); + } + m_outstanding_request.wait_forever(); +} + +void VirtIOGPU::populate_virtio_gpu_request_header(VirtIOGPUCtrlHeader& header, VirtIOGPUCtrlType ctrl_type, u32 flags) +{ + header.type = static_cast<u32>(ctrl_type); + header.flags = flags; + header.fence_id = 0; + header.context_id = 0; + header.padding = 0; +} + +void VirtIOGPU::flush_dirty_window(VirtIOGPURect dirty_rect) +{ + Locker locker(m_operation_lock); + transfer_framebuffer_data_to_host(dirty_rect); + flush_displayed_image(dirty_rect); +} + +bool VirtIOGPU::try_to_set_resolution(size_t width, size_t height) +{ + if (width > MAX_VIRTIOGPU_RESOLUTION_WIDTH && height > MAX_VIRTIOGPU_RESOLUTION_HEIGHT) + return false; + Locker locker(m_operation_lock); + VirtIOGPURect rect = { + .x = 0, + .y = 0, + .width = (u32)width, + .height = (u32)height, + }; + auto old_framebuffer_id = m_framebuffer_id; + auto new_framebuffer_id = create_2d_resource(rect); + ensure_backing_storage(*m_framebuffer, calculate_framebuffer_size(width, height), new_framebuffer_id); + set_scanout_resource(m_chosen_scanout.value(), new_framebuffer_id, rect); + detach_backing_storage(old_framebuffer_id); + delete_resource(old_framebuffer_id); + m_framebuffer_id = new_framebuffer_id; + m_display_info.rect = rect; + return true; +} + +VirtIOGPUResourceID VirtIOGPU::allocate_resource_id() +{ + VERIFY(m_operation_lock.is_locked()); + m_resource_id_counter = m_resource_id_counter.value() + 1; + return m_resource_id_counter; +} + +void VirtIOGPU::delete_resource(VirtIOGPUResourceID resource_id) +{ + VERIFY(m_operation_lock.is_locked()); + auto& request = *reinterpret_cast<VirtioGPUResourceUnref*>(m_scratch_space->vaddr().as_ptr()); + auto& response = *reinterpret_cast<VirtIOGPUCtrlHeader*>((m_scratch_space->vaddr().offset(sizeof(request)).as_ptr())); + + populate_virtio_gpu_request_header(request.header, VirtIOGPUCtrlType::VIRTIO_GPU_CMD_RESOURCE_UNREF, VIRTIO_GPU_FLAG_FENCE); + request.resource_id = resource_id.value(); + + synchronous_virtio_gpu_command(start_of_scratch_space(), sizeof(request), sizeof(response)); + + VERIFY(response.type == static_cast<u32>(VirtIOGPUCtrlType::VIRTIO_GPU_RESP_OK_NODATA)); +} + +size_t VirtIOGPU::framebuffer_size_in_bytes() const +{ + return m_display_info.rect.width * m_display_info.rect.height * sizeof(u32); +} + +void VirtIOGPU::clear_to_black() +{ + size_t width = m_display_info.rect.width; + size_t height = m_display_info.rect.height; + u8* data = m_framebuffer->vaddr().as_ptr(); + for (size_t i = 0; i < width * height; ++i) { + data[4 * i + 0] = 0x00; + data[4 * i + 1] = 0x00; + data[4 * i + 2] = 0x00; + data[4 * i + 3] = 0xff; + } +} + +} diff --git a/Kernel/Graphics/VirtIOGPU/VirtIOGPU.h b/Kernel/Graphics/VirtIOGPU/VirtIOGPU.h new file mode 100644 index 0000000000..c2e0e588ba --- /dev/null +++ b/Kernel/Graphics/VirtIOGPU/VirtIOGPU.h @@ -0,0 +1,219 @@ +/* + * Copyright (c) 2021, Sahan Fernando <sahan.h.fernando@gmail.com> + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#pragma once + +#include <AK/DistinctNumeric.h> +#include <Kernel/Devices/BlockDevice.h> +#include <Kernel/VirtIO/VirtIO.h> +#include <Kernel/VirtIO/VirtIOQueue.h> + +#define VIRTIO_GPU_F_VIRGL (1 << 0) +#define VIRTIO_GPU_F_EDID (1 << 1) + +#define VIRTIO_GPU_FLAG_FENCE (1 << 0) + +#define VIRTIO_GPU_MAX_SCANOUTS 16 + +#define CONTROLQ 0 +#define CURSORQ 1 + +#define MAX_VIRTIOGPU_RESOLUTION_WIDTH 3840 +#define MAX_VIRTIOGPU_RESOLUTION_HEIGHT 2160 + +namespace Kernel::Graphics { + +TYPEDEF_DISTINCT_ORDERED_ID(u32, VirtIOGPUResourceID); +TYPEDEF_DISTINCT_ORDERED_ID(u32, VirtIOGPUScanoutID); + +enum class VirtIOGPUCtrlType : u32 { + /* 2d commands */ + VIRTIO_GPU_CMD_GET_DISPLAY_INFO = 0x0100, + VIRTIO_GPU_CMD_RESOURCE_CREATE_2D, + VIRTIO_GPU_CMD_RESOURCE_UNREF, + VIRTIO_GPU_CMD_SET_SCANOUT, + VIRTIO_GPU_CMD_RESOURCE_FLUSH, + VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D, + VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING, + VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING, + VIRTIO_GPU_CMD_GET_CAPSET_INFO, + VIRTIO_GPU_CMD_GET_CAPSET, + VIRTIO_GPU_CMD_GET_EDID, + + /* cursor commands */ + VIRTIO_GPU_CMD_UPDATE_CURSOR = 0x0300, + VIRTIO_GPU_CMD_MOVE_CURSOR, + + /* success responses */ + VIRTIO_GPU_RESP_OK_NODATA = 0x1100, + VIRTIO_GPU_RESP_OK_DISPLAY_INFO, + VIRTIO_GPU_RESP_OK_CAPSET_INFO, + VIRTIO_GPU_RESP_OK_CAPSET, + VIRTIO_GPU_RESP_OK_EDID, + + /* error responses */ + VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200, + VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY, + VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID, + VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID, + VIRTIO_GPU_RESP_ERR_INVALID_CONTEXT_ID, + VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER, +}; + +struct VirtIOGPUCtrlHeader { + u32 type; + u32 flags; + u64 fence_id; + u32 context_id; + u32 padding; +}; + +struct VirtIOGPURect { + u32 x; + u32 y; + u32 width; + u32 height; +}; + +struct VirtIOGPURespDisplayInfo { + VirtIOGPUCtrlHeader header; + struct VirtIOGPUDisplayOne { + VirtIOGPURect rect; + u32 enabled; + u32 flags; + } scanout_modes[VIRTIO_GPU_MAX_SCANOUTS]; +}; + +enum class VirtIOGPUFormats : u32 { + VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM = 1, + VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM = 2, + VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM = 3, + VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM = 4, + + VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM = 67, + VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM = 68, + + VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM = 121, + VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM = 134, +}; + +struct VirtIOGPUResourceCreate2D { + VirtIOGPUCtrlHeader header; + u32 resource_id; + u32 format; + u32 width; + u32 height; +}; + +struct VirtioGPUResourceUnref { + VirtIOGPUCtrlHeader header; + u32 resource_id; + u32 padding; +}; + +struct VirtIOGPUSetScanOut { + VirtIOGPUCtrlHeader header; + VirtIOGPURect rect; + u32 scanout_id; + u32 resource_id; +}; + +struct VirtIOGPUMemEntry { + u64 address; + u32 length; + u32 padding; +}; + +struct VirtIOGPUResourceAttachBacking { + VirtIOGPUCtrlHeader header; + u32 resource_id; + u32 num_entries; + VirtIOGPUMemEntry entries[]; +}; + +struct VirtIOGPUResourceDetachBacking { + VirtIOGPUCtrlHeader header; + u32 resource_id; + u32 padding; +}; + +struct VirtIOGPUTransferToHost2D { + VirtIOGPUCtrlHeader header; + VirtIOGPURect rect; + u64 offset; + u32 resource_id; + u32 padding; +}; + +struct VirtIOGPUResourceFlush { + VirtIOGPUCtrlHeader header; + VirtIOGPURect rect; + u32 resource_id; + u32 padding; +}; + +class VirtIOGPU final + : public VirtIODevice + , public RefCounted<VirtIOGPU> { +public: + VirtIOGPU(PCI::Address); + virtual ~VirtIOGPU() override; + + bool try_to_set_resolution(size_t width, size_t height); + void clear_to_black(); + + VMObject& framebuffer_vm_object() { return m_framebuffer->vmobject(); } + Region& framebuffer_region() { return *m_framebuffer; } + + size_t framebuffer_width() { return m_display_info.rect.width; } + size_t framebuffer_height() { return m_display_info.rect.height; } + size_t framebuffer_pitch() { return m_display_info.rect.width * 4; } + + size_t framebuffer_size_in_bytes() const; + size_t calculate_framebuffer_size(size_t width, size_t height) const + { + return sizeof(u32) * width * height; + } + + void flush_dirty_window(VirtIOGPURect dirty_rect); + +private: + virtual bool handle_device_config_change() override; + virtual void handle_queue_update(u16 queue_index) override; + + u32 get_pending_events(); + void clear_pending_events(u32 event_bitmask); + + VirtIOGPUResourceID allocate_resource_id(); + PhysicalAddress start_of_scratch_space() const { return m_scratch_space->physical_page(0)->paddr(); } + void synchronous_virtio_gpu_command(PhysicalAddress buffer_start, size_t request_size, size_t response_size); + void populate_virtio_gpu_request_header(VirtIOGPUCtrlHeader& header, VirtIOGPUCtrlType ctrl_type, u32 flags = 0); + + void query_display_information(); + VirtIOGPUResourceID create_2d_resource(VirtIOGPURect rect); + void delete_resource(VirtIOGPUResourceID resource_id); + void ensure_backing_storage(Region& region, size_t buffer_length, VirtIOGPUResourceID resource_id); + void detach_backing_storage(VirtIOGPUResourceID resource_id); + void set_scanout_resource(VirtIOGPUScanoutID scanout, VirtIOGPUResourceID resource_id, VirtIOGPURect rect); + void draw_ntsc_test_pattern(); + void transfer_framebuffer_data_to_host(VirtIOGPURect rect); + void flush_displayed_image(VirtIOGPURect dirty_rect); + + VirtIOGPURespDisplayInfo::VirtIOGPUDisplayOne m_display_info; + Optional<VirtIOGPUScanoutID> m_chosen_scanout; + VirtIOGPUResourceID m_framebuffer_id { 0 }; + Configuration const* m_device_configuration { nullptr }; + size_t m_num_scanouts { 0 }; + OwnPtr<Region> m_framebuffer; + VirtIOGPUResourceID m_resource_id_counter { 0 }; + + // Synchronous commands + WaitQueue m_outstanding_request; + Lock m_operation_lock; + OwnPtr<Region> m_scratch_space; +}; + +} diff --git a/Kernel/Graphics/VirtIOGPU/VirtIOGPUConsole.cpp b/Kernel/Graphics/VirtIOGPU/VirtIOGPUConsole.cpp new file mode 100644 index 0000000000..ead2b8d580 --- /dev/null +++ b/Kernel/Graphics/VirtIOGPU/VirtIOGPUConsole.cpp @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2021, Sahan Fernando <sahan.h.fernando@gmail.com> + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#include <Kernel/Graphics/VirtIOGPU/VirtIOGPUConsole.h> +#include <Kernel/WorkQueue.h> + +namespace Kernel::Graphics { + +constexpr static AK::Time refresh_interval = AK::Time::from_milliseconds(16); + +void DirtyRect::union_rect(size_t x, size_t y, size_t width, size_t height) +{ + if (width == 0 || height == 0) + return; + if (m_is_dirty) { + m_x0 = min(x, m_x0); + m_y0 = min(y, m_y0); + m_x1 = max(x + width, m_x1); + m_y1 = max(y + height, m_y1); + } else { + m_is_dirty = true; + m_x0 = x; + m_y0 = y; + m_x1 = x + width; + m_y1 = y + height; + } +} + +NonnullRefPtr<VirtIOGPUConsole> VirtIOGPUConsole::initialize(RefPtr<VirtIOGPU> gpu) +{ + return adopt_ref(*new VirtIOGPUConsole(gpu)); +} + +VirtIOGPUConsole::VirtIOGPUConsole(RefPtr<VirtIOGPU> gpu) + : GenericFramebufferConsole(gpu->framebuffer_width(), gpu->framebuffer_height(), gpu->framebuffer_pitch()) + , m_gpu(gpu) +{ + m_framebuffer_region = gpu->framebuffer_region(); + enqueue_refresh_timer(); +} + +void VirtIOGPUConsole::set_resolution(size_t width, size_t height, size_t) +{ + auto did_set_resolution = m_gpu->try_to_set_resolution(width, height); + VERIFY(did_set_resolution); +} + +void VirtIOGPUConsole::flush(size_t x, size_t y, size_t width, size_t height) +{ + m_dirty_rect.union_rect(x, y, width, height); +} + +void VirtIOGPUConsole::enqueue_refresh_timer() +{ + NonnullRefPtr<Timer> refresh_timer = adopt_ref(*new Timer()); + refresh_timer->setup(CLOCK_MONOTONIC, refresh_interval, [this]() { + auto rect = m_dirty_rect; + if (rect.is_dirty()) { + VirtIOGPURect dirty_rect { + .x = (u32)rect.x(), + .y = (u32)rect.y(), + .width = (u32)rect.width(), + .height = (u32)rect.height(), + }; + g_io_work->queue([this, dirty_rect]() { + m_gpu->flush_dirty_window(dirty_rect); + m_dirty_rect.clear(); + }); + } + enqueue_refresh_timer(); + }); + TimerQueue::the().add_timer(move(refresh_timer)); +} + +void VirtIOGPUConsole::enable() +{ + GenericFramebufferConsole::enable(); + m_width = m_gpu->framebuffer_width(); + m_height = m_gpu->framebuffer_height(); + m_pitch = m_gpu->framebuffer_pitch(); + m_dirty_rect.union_rect(0, 0, m_width, m_height); +} + +} diff --git a/Kernel/Graphics/VirtIOGPU/VirtIOGPUConsole.h b/Kernel/Graphics/VirtIOGPU/VirtIOGPUConsole.h new file mode 100644 index 0000000000..a3914b3709 --- /dev/null +++ b/Kernel/Graphics/VirtIOGPU/VirtIOGPUConsole.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2021, Sahan Fernando <sahan.h.fernando@gmail.com> + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#pragma once + +#include <Kernel/Graphics/Console/GenericFramebufferConsole.h> +#include <Kernel/Graphics/VirtIOGPU/VirtIOGPU.h> +#include <Kernel/TimerQueue.h> + +namespace Kernel::Graphics { + +class DirtyRect { +public: + void union_rect(size_t x, size_t y, size_t width, size_t height); + bool is_dirty() const { return m_is_dirty; } + size_t x() const { return m_x0; } + size_t y() const { return m_y0; } + size_t width() const { return m_x1 - m_x0; } + size_t height() const { return m_y1 - m_y0; } + void clear() { m_is_dirty = false; } + +private: + bool m_is_dirty { false }; + size_t m_x0 { 0 }; + size_t m_y0 { 0 }; + size_t m_x1 { 0 }; + size_t m_y1 { 0 }; +}; + +class VirtIOGPUConsole final : public GenericFramebufferConsole { +public: + static NonnullRefPtr<VirtIOGPUConsole> initialize(RefPtr<VirtIOGPU>); + + virtual void set_resolution(size_t width, size_t height, size_t pitch) override; + virtual void flush(size_t x, size_t y, size_t width, size_t height) override; + virtual void enable() override; + +private: + void enqueue_refresh_timer(); + virtual u8* framebuffer_data() override + { + return m_framebuffer_region.unsafe_ptr()->vaddr().as_ptr(); + } + + VirtIOGPUConsole(RefPtr<VirtIOGPU>); + WeakPtr<Region> m_framebuffer_region; + RefPtr<VirtIOGPU> m_gpu; + DirtyRect m_dirty_rect; +}; + +} diff --git a/Kernel/Graphics/VirtIOGPU/VirtIOGraphicsAdapter.cpp b/Kernel/Graphics/VirtIOGPU/VirtIOGraphicsAdapter.cpp new file mode 100644 index 0000000000..bc7404847a --- /dev/null +++ b/Kernel/Graphics/VirtIOGPU/VirtIOGraphicsAdapter.cpp @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2021, Sahan Fernando <sahan.h.fernando@gmail.com> + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#include <Kernel/Graphics/Console/GenericFramebufferConsole.h> +#include <Kernel/Graphics/GraphicsManagement.h> +#include <Kernel/Graphics/VirtIOGPU/VirtIOGPU.h> +#include <Kernel/Graphics/VirtIOGPU/VirtIOGraphicsAdapter.h> + +namespace Kernel::Graphics { + +NonnullRefPtr<VirtIOGraphicsAdapter> VirtIOGraphicsAdapter::initialize(PCI::Address base_address) +{ + return adopt_ref(*new VirtIOGraphicsAdapter(base_address)); +} + +VirtIOGraphicsAdapter::VirtIOGraphicsAdapter(PCI::Address base_address) + : GraphicsDevice(base_address) +{ + m_gpu_device = adopt_ref(*new VirtIOGPU(base_address)).leak_ref(); + m_framebuffer_console = Kernel::Graphics::VirtIOGPUConsole::initialize(m_gpu_device); + // FIXME: This is a very wrong way to do this... + GraphicsManagement::the().m_console = m_framebuffer_console; +} + +void VirtIOGraphicsAdapter::initialize_framebuffer_devices() +{ + dbgln_if(VIRTIO_DEBUG, "VirtIOGPU: Initializing framebuffer devices"); + VERIFY(m_framebuffer_device.is_null()); + m_framebuffer_device = adopt_ref(*new VirtIOFrameBufferDevice(m_gpu_device)).leak_ref(); +} + +void VirtIOGraphicsAdapter::enable_consoles() +{ + dbgln_if(VIRTIO_DEBUG, "VirtIOGPU: Enabling consoles"); + VERIFY(m_framebuffer_console); + if (m_framebuffer_device) + m_framebuffer_device->deactivate_writes(); + m_gpu_device->clear_to_black(); + m_framebuffer_console->enable(); +} + +void VirtIOGraphicsAdapter::disable_consoles() +{ + dbgln_if(VIRTIO_DEBUG, "VirtIOGPU: Disabling consoles"); + VERIFY(m_framebuffer_device); + VERIFY(m_framebuffer_console); + m_framebuffer_console->disable(); + m_framebuffer_device->activate_writes(); +} + +} diff --git a/Kernel/Graphics/VirtIOGPU/VirtIOGraphicsAdapter.h b/Kernel/Graphics/VirtIOGPU/VirtIOGraphicsAdapter.h new file mode 100644 index 0000000000..f7f9ab3945 --- /dev/null +++ b/Kernel/Graphics/VirtIOGPU/VirtIOGraphicsAdapter.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2021, Sahan Fernando <sahan.h.fernando@gmail.com> + * + * SPDX-License-Identifier: BSD-2-Clause + */ + +#pragma once + +#include <Kernel/Graphics/VirtIOGPU/VirtIOFrameBufferDevice.h> +#include <Kernel/Graphics/VirtIOGPU/VirtIOGPU.h> +#include <Kernel/Graphics/VirtIOGPU/VirtIOGPUConsole.h> + +namespace Kernel::Graphics { + +class VirtIOGraphicsAdapter final : public GraphicsDevice { + AK_MAKE_ETERNAL + +public: + static NonnullRefPtr<VirtIOGraphicsAdapter> initialize(PCI::Address); + + virtual bool framebuffer_devices_initialized() const override { return !m_framebuffer_device.is_null(); } + +private: + explicit VirtIOGraphicsAdapter(PCI::Address base_address); + + virtual void initialize_framebuffer_devices() override; + virtual Type type() const override { return Type::Raw; } + + virtual void enable_consoles() override; + virtual void disable_consoles() override; + + virtual bool modesetting_capable() const override { return false; } + virtual bool double_framebuffering_capable() const override { return false; } + + virtual bool try_to_set_resolution(size_t, size_t, size_t) override { return false; } + virtual bool set_y_offset(size_t, size_t) override { return false; } + + RefPtr<VirtIOGPU> m_gpu_device; + RefPtr<VirtIOFrameBufferDevice> m_framebuffer_device; + RefPtr<VirtIOGPUConsole> m_framebuffer_console; +}; + +} |