1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
|
/*
* Copyright (c) 2021, the SerenityOS developers.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Atomic.h>
#include <Kernel/Bus/VirtIO/Queue.h>
namespace Kernel::VirtIO {
Queue::Queue(u16 queue_size, u16 notify_offset)
: m_queue_size(queue_size)
, m_notify_offset(notify_offset)
, m_free_buffers(queue_size)
{
size_t size_of_descriptors = sizeof(QueueDescriptor) * queue_size;
size_t size_of_driver = sizeof(QueueDriver) + queue_size * sizeof(u16);
size_t size_of_device = sizeof(QueueDevice) + queue_size * sizeof(QueueDeviceItem);
auto queue_region_size = Memory::page_round_up(size_of_descriptors + size_of_driver + size_of_device);
if (queue_region_size <= PAGE_SIZE)
m_queue_region = MM.allocate_kernel_region(queue_region_size, "VirtIO Queue", Memory::Region::Access::ReadWrite).release_value();
else
m_queue_region = MM.allocate_contiguous_kernel_region(queue_region_size, "VirtIO Queue", Memory::Region::Access::ReadWrite).release_value();
// TODO: ensure alignment!!!
u8* ptr = m_queue_region->vaddr().as_ptr();
memset(ptr, 0, m_queue_region->size());
m_descriptors = adopt_own_if_nonnull(reinterpret_cast<QueueDescriptor*>(ptr));
m_driver = adopt_own_if_nonnull(reinterpret_cast<QueueDriver*>(ptr + size_of_descriptors));
m_device = adopt_own_if_nonnull(reinterpret_cast<QueueDevice*>(ptr + size_of_descriptors + size_of_driver));
for (auto i = 0; i + 1 < queue_size; i++) {
m_descriptors[i].next = i + 1; // link all of the descriptors in a line
}
enable_interrupts();
}
Queue::~Queue()
{
}
void Queue::enable_interrupts()
{
SpinlockLocker lock(m_lock);
m_driver->flags = 0;
}
void Queue::disable_interrupts()
{
SpinlockLocker lock(m_lock);
m_driver->flags = 1;
}
bool Queue::new_data_available() const
{
const auto index = AK::atomic_load(&m_device->index, AK::MemoryOrder::memory_order_relaxed);
const auto used_tail = AK::atomic_load(&m_used_tail, AK::MemoryOrder::memory_order_relaxed);
return index != used_tail;
}
QueueChain Queue::pop_used_buffer_chain(size_t& used)
{
VERIFY(m_lock.is_locked());
if (!new_data_available()) {
used = 0;
return QueueChain(*this);
}
full_memory_barrier();
// Determine used length
used = m_device->rings[m_used_tail % m_queue_size].length;
// Determine start, end and number of nodes in chain
auto descriptor_index = m_device->rings[m_used_tail % m_queue_size].index;
size_t length_of_chain = 1;
auto last_index = descriptor_index;
while (m_descriptors[last_index].flags & VIRTQ_DESC_F_NEXT) {
++length_of_chain;
last_index = m_descriptors[last_index].next;
}
// We are now done with this buffer chain
m_used_tail++;
return QueueChain(*this, descriptor_index, last_index, length_of_chain);
}
void Queue::discard_used_buffers()
{
VERIFY(m_lock.is_locked());
size_t used;
for (auto buffer = pop_used_buffer_chain(used); !buffer.is_empty(); buffer = pop_used_buffer_chain(used)) {
buffer.release_buffer_slots_to_queue();
}
}
void Queue::reclaim_buffer_chain(u16 chain_start_index, u16 chain_end_index, size_t length_of_chain)
{
VERIFY(m_lock.is_locked());
m_descriptors[chain_end_index].next = m_free_head;
m_free_head = chain_start_index;
m_free_buffers += length_of_chain;
}
bool Queue::has_free_slots() const
{
const auto free_buffers = AK::atomic_load(&m_free_buffers, AK::MemoryOrder::memory_order_relaxed);
return free_buffers > 0;
}
Optional<u16> Queue::take_free_slot()
{
VERIFY(m_lock.is_locked());
if (has_free_slots()) {
auto descriptor_index = m_free_head;
m_free_head = m_descriptors[descriptor_index].next;
--m_free_buffers;
return descriptor_index;
} else {
return {};
}
}
bool Queue::should_notify() const
{
VERIFY(m_lock.is_locked());
auto device_flags = m_device->flags;
return !(device_flags & VIRTQ_USED_F_NO_NOTIFY);
}
bool QueueChain::add_buffer_to_chain(PhysicalAddress buffer_start, size_t buffer_length, BufferType buffer_type)
{
VERIFY(m_queue.lock().is_locked());
// Ensure that no readable pages will be inserted after a writable one, as required by the VirtIO spec
VERIFY(buffer_type == BufferType::DeviceWritable || !m_chain_has_writable_pages);
m_chain_has_writable_pages |= (buffer_type == BufferType::DeviceWritable);
// Take a free slot from the queue
auto descriptor_index = m_queue.take_free_slot();
if (!descriptor_index.has_value())
return false;
if (!m_start_of_chain_index.has_value()) {
// Set start of chain if it hasn't been set
m_start_of_chain_index = descriptor_index.value();
} else {
// Link from previous element in QueueChain
m_queue.m_descriptors[m_end_of_chain_index.value()].flags |= VIRTQ_DESC_F_NEXT;
m_queue.m_descriptors[m_end_of_chain_index.value()].next = descriptor_index.value();
}
// Update end of chain
m_end_of_chain_index = descriptor_index.value();
++m_chain_length;
// Populate buffer info
VERIFY(buffer_length <= NumericLimits<size_t>::max());
m_queue.m_descriptors[descriptor_index.value()].address = static_cast<u64>(buffer_start.get());
m_queue.m_descriptors[descriptor_index.value()].flags = static_cast<u16>(buffer_type);
m_queue.m_descriptors[descriptor_index.value()].length = static_cast<u32>(buffer_length);
return true;
}
void QueueChain::submit_to_queue()
{
VERIFY(m_queue.lock().is_locked());
VERIFY(m_start_of_chain_index.has_value());
auto next_index = m_queue.m_driver_index_shadow % m_queue.m_queue_size;
m_queue.m_driver->rings[next_index] = m_start_of_chain_index.value();
m_queue.m_driver_index_shadow++;
full_memory_barrier();
m_queue.m_driver->index = m_queue.m_driver_index_shadow;
// Reset internal chain state
m_start_of_chain_index = m_end_of_chain_index = {};
m_chain_has_writable_pages = false;
m_chain_length = 0;
}
void QueueChain::release_buffer_slots_to_queue()
{
VERIFY(m_queue.lock().is_locked());
if (m_start_of_chain_index.has_value()) {
// Add the currently stored chain back to the queue's free pool
m_queue.reclaim_buffer_chain(m_start_of_chain_index.value(), m_end_of_chain_index.value(), m_chain_length);
// Reset internal chain state
m_start_of_chain_index = m_end_of_chain_index = {};
m_chain_has_writable_pages = false;
m_chain_length = 0;
}
}
}
|