summaryrefslogtreecommitdiff
path: root/Kernel/Net/NetworkAdapter.cpp
diff options
context:
space:
mode:
authorAndreas Kling <awesomekling@gmail.com>2019-12-14 11:07:37 +0100
committerAndreas Kling <awesomekling@gmail.com>2019-12-14 11:07:37 +0100
commitac215ca601393dc111643b119835ea39e6c9bd23 (patch)
tree2a8175cb798282ac23e487ac11c17e77f9b91515 /Kernel/Net/NetworkAdapter.cpp
parent39246fb6219c7c86d9d3133eb031683e8596ee57 (diff)
downloadserenity-ac215ca601393dc111643b119835ea39e6c9bd23.zip
Net: Try to reuse incoming packet buffers to avoid allocation churn
The majority of the time in NetworkTask was being spent in allocating and deallocating KBuffers for each incoming packet. We'll now keep up to 100 buffers around and reuse them for new packets if the next incoming packet fits in an old buffer. This is pretty naively implemented but definitely cuts down on time spent here.
Diffstat (limited to 'Kernel/Net/NetworkAdapter.cpp')
-rw-r--r--Kernel/Net/NetworkAdapter.cpp35
1 files changed, 30 insertions, 5 deletions
diff --git a/Kernel/Net/NetworkAdapter.cpp b/Kernel/Net/NetworkAdapter.cpp
index f20a963966..5db9d5c286 100644
--- a/Kernel/Net/NetworkAdapter.cpp
+++ b/Kernel/Net/NetworkAdapter.cpp
@@ -1,11 +1,11 @@
#include <AK/HashTable.h>
#include <AK/StringBuilder.h>
+#include <Kernel/Heap/kmalloc.h>
#include <Kernel/Lock.h>
#include <Kernel/Net/EtherType.h>
#include <Kernel/Net/EthernetFrameHeader.h>
#include <Kernel/Net/NetworkAdapter.h>
#include <Kernel/StdLib.h>
-#include <Kernel/Heap/kmalloc.h>
static Lockable<HashTable<NetworkAdapter*>>& all_adapters()
{
@@ -103,17 +103,42 @@ void NetworkAdapter::did_receive(const u8* data, int length)
InterruptDisabler disabler;
m_packets_in++;
m_bytes_in += length;
- m_packet_queue.append(KBuffer::copy(data, length));
+
+ Optional<KBuffer> buffer;
+
+ if (m_unused_packet_buffers.is_empty()) {
+ buffer = KBuffer::copy(data, length);
+ } else {
+ buffer = m_unused_packet_buffers.take_first();
+ --m_unused_packet_buffers_count;
+ if ((size_t)length <= buffer.value().size()) {
+ memcpy(buffer.value().data(), data, length);
+ buffer.value().set_size(length);
+ } else {
+ buffer = KBuffer::copy(data, length);
+ }
+ }
+
+ m_packet_queue.append(buffer.value());
+
if (on_receive)
on_receive();
}
-Optional<KBuffer> NetworkAdapter::dequeue_packet()
+size_t NetworkAdapter::dequeue_packet(u8* buffer, size_t buffer_size)
{
InterruptDisabler disabler;
if (m_packet_queue.is_empty())
- return {};
- return m_packet_queue.take_first();
+ return 0;
+ auto packet = m_packet_queue.take_first();
+ size_t packet_size = packet.size();
+ ASSERT(packet_size <= buffer_size);
+ memcpy(buffer, packet.data(), packet_size);
+ if (m_unused_packet_buffers_count < 100) {
+ m_unused_packet_buffers.append(packet);
+ ++m_unused_packet_buffers_count;
+ }
+ return packet_size;
}
void NetworkAdapter::set_ipv4_address(const IPv4Address& address)