summaryrefslogtreecommitdiff
path: root/Kernel/Net
diff options
context:
space:
mode:
authorAndreas Kling <kling@serenityos.org>2021-08-07 15:42:11 +0200
committerAndreas Kling <kling@serenityos.org>2021-08-07 18:49:27 +0200
commit0cb6c3c8319711eb25518ee9bc2d62f4a1805027 (patch)
tree771719ee4f83f2fdcbf428b6aa18dd18f18ef0d6 /Kernel/Net
parent4c582b57e9522bcca0bc2277cdb44b6085b17676 (diff)
downloadserenity-0cb6c3c8319711eb25518ee9bc2d62f4a1805027.zip
Kernel/TCP: Port TCP retransmit queue to ProtectedValue
I had to switch to exclusive locking since ProtectedValue rightly doesn't allow you to mutate protected data with only a shared lock.
Diffstat (limited to 'Kernel/Net')
-rw-r--r--Kernel/Net/TCPSocket.cpp129
-rw-r--r--Kernel/Net/TCPSocket.h9
2 files changed, 72 insertions, 66 deletions
diff --git a/Kernel/Net/TCPSocket.cpp b/Kernel/Net/TCPSocket.cpp
index faf3352a41..c9af9da414 100644
--- a/Kernel/Net/TCPSocket.cpp
+++ b/Kernel/Net/TCPSocket.cpp
@@ -254,10 +254,11 @@ KResult TCPSocket::send_tcp_packet(u16 flags, const UserOrKernelBuffer* payload,
m_packets_out++;
m_bytes_out += buffer_size;
if (tcp_packet.has_syn() || payload_size > 0) {
- MutexLocker locker(m_not_acked_lock);
- m_not_acked.append({ m_sequence_number, move(packet), ipv4_payload_offset, *routing_decision.adapter });
- m_not_acked_size += payload_size;
- enqueue_for_retransmit();
+ m_unacked_packets.with_exclusive([&](auto& unacked_packets) {
+ unacked_packets.packets.append({ m_sequence_number, move(packet), ipv4_payload_offset, *routing_decision.adapter });
+ unacked_packets.size += payload_size;
+ enqueue_for_retransmit();
+ });
} else {
routing_decision.adapter->release_packet_buffer(*packet);
}
@@ -273,33 +274,34 @@ void TCPSocket::receive_tcp_packet(const TCPPacket& packet, u16 size)
dbgln_if(TCP_SOCKET_DEBUG, "TCPSocket: receive_tcp_packet: {}", ack_number);
int removed = 0;
- MutexLocker locker(m_not_acked_lock);
- while (!m_not_acked.is_empty()) {
- auto& packet = m_not_acked.first();
-
- dbgln_if(TCP_SOCKET_DEBUG, "TCPSocket: iterate: {}", packet.ack_number);
-
- if (packet.ack_number <= ack_number) {
- auto old_adapter = packet.adapter.strong_ref();
- if (old_adapter)
- old_adapter->release_packet_buffer(*packet.buffer);
- TCPPacket& tcp_packet = *(TCPPacket*)(packet.buffer->buffer->data() + packet.ipv4_payload_offset);
- auto payload_size = packet.buffer->buffer->data() + packet.buffer->buffer->size() - (u8*)tcp_packet.payload();
- m_not_acked_size -= payload_size;
- evaluate_block_conditions();
- m_not_acked.take_first();
- removed++;
- } else {
- break;
+ m_unacked_packets.with_exclusive([&](auto& unacked_packets) {
+ while (!unacked_packets.packets.is_empty()) {
+ auto& packet = unacked_packets.packets.first();
+
+ dbgln_if(TCP_SOCKET_DEBUG, "TCPSocket: iterate: {}", packet.ack_number);
+
+ if (packet.ack_number <= ack_number) {
+ auto old_adapter = packet.adapter.strong_ref();
+ if (old_adapter)
+ old_adapter->release_packet_buffer(*packet.buffer);
+ TCPPacket& tcp_packet = *(TCPPacket*)(packet.buffer->buffer->data() + packet.ipv4_payload_offset);
+ auto payload_size = packet.buffer->buffer->data() + packet.buffer->buffer->size() - (u8*)tcp_packet.payload();
+ unacked_packets.size -= payload_size;
+ evaluate_block_conditions();
+ unacked_packets.packets.take_first();
+ removed++;
+ } else {
+ break;
+ }
}
- }
- if (m_not_acked.is_empty()) {
- m_retransmit_attempts = 0;
- dequeue_for_retransmit();
- }
+ if (unacked_packets.packets.is_empty()) {
+ m_retransmit_attempts = 0;
+ dequeue_for_retransmit();
+ }
- dbgln_if(TCP_SOCKET_DEBUG, "TCPSocket: receive_tcp_packet acknowledged {} packets", removed);
+ dbgln_if(TCP_SOCKET_DEBUG, "TCPSocket: receive_tcp_packet acknowledged {} packets", removed);
+ });
}
m_packets_in++;
@@ -560,41 +562,42 @@ void TCPSocket::retransmit_packets()
if (routing_decision.is_zero())
return;
- MutexLocker locker(m_not_acked_lock, Mutex::Mode::Shared);
- for (auto& packet : m_not_acked) {
- packet.tx_counter++;
-
- if constexpr (TCP_SOCKET_DEBUG) {
- auto& tcp_packet = *(const TCPPacket*)(packet.buffer->buffer->data() + packet.ipv4_payload_offset);
- dbgln("Sending TCP packet from {}:{} to {}:{} with ({}{}{}{}) seq_no={}, ack_no={}, tx_counter={}",
- local_address(), local_port(),
- peer_address(), peer_port(),
- (tcp_packet.has_syn() ? "SYN " : ""),
- (tcp_packet.has_ack() ? "ACK " : ""),
- (tcp_packet.has_fin() ? "FIN " : ""),
- (tcp_packet.has_rst() ? "RST " : ""),
- tcp_packet.sequence_number(),
- tcp_packet.ack_number(),
- packet.tx_counter);
- }
+ m_unacked_packets.with_exclusive([&](auto& unacked_packets) {
+ for (auto& packet : unacked_packets.packets) {
+ packet.tx_counter++;
+
+ if constexpr (TCP_SOCKET_DEBUG) {
+ auto& tcp_packet = *(const TCPPacket*)(packet.buffer->buffer->data() + packet.ipv4_payload_offset);
+ dbgln("Sending TCP packet from {}:{} to {}:{} with ({}{}{}{}) seq_no={}, ack_no={}, tx_counter={}",
+ local_address(), local_port(),
+ peer_address(), peer_port(),
+ (tcp_packet.has_syn() ? "SYN " : ""),
+ (tcp_packet.has_ack() ? "ACK " : ""),
+ (tcp_packet.has_fin() ? "FIN " : ""),
+ (tcp_packet.has_rst() ? "RST " : ""),
+ tcp_packet.sequence_number(),
+ tcp_packet.ack_number(),
+ packet.tx_counter);
+ }
- size_t ipv4_payload_offset = routing_decision.adapter->ipv4_payload_offset();
- if (ipv4_payload_offset != packet.ipv4_payload_offset) {
- // FIXME: Add support for this. This can happen if after a route change
- // we ended up on another adapter which doesn't have the same layer 2 type
- // like the previous adapter.
- VERIFY_NOT_REACHED();
- }
+ size_t ipv4_payload_offset = routing_decision.adapter->ipv4_payload_offset();
+ if (ipv4_payload_offset != packet.ipv4_payload_offset) {
+ // FIXME: Add support for this. This can happen if after a route change
+ // we ended up on another adapter which doesn't have the same layer 2 type
+ // like the previous adapter.
+ VERIFY_NOT_REACHED();
+ }
- auto packet_buffer = packet.buffer->bytes();
+ auto packet_buffer = packet.buffer->bytes();
- routing_decision.adapter->fill_in_ipv4_header(*packet.buffer,
- local_address(), routing_decision.next_hop, peer_address(),
- IPv4Protocol::TCP, packet_buffer.size() - ipv4_payload_offset, ttl());
- routing_decision.adapter->send_packet(packet_buffer);
- m_packets_out++;
- m_bytes_out += packet_buffer.size();
- }
+ routing_decision.adapter->fill_in_ipv4_header(*packet.buffer,
+ local_address(), routing_decision.next_hop, peer_address(),
+ IPv4Protocol::TCP, packet_buffer.size() - ipv4_payload_offset, ttl());
+ routing_decision.adapter->send_packet(packet_buffer);
+ m_packets_out++;
+ m_bytes_out += packet_buffer.size();
+ }
+ });
}
bool TCPSocket::can_write(const FileDescription& file_description, size_t size) const
@@ -608,8 +611,8 @@ bool TCPSocket::can_write(const FileDescription& file_description, size_t size)
if (!file_description.is_blocking())
return true;
- MutexLocker lock(m_not_acked_lock);
- return m_not_acked_size + size <= m_send_window_size;
+ return m_unacked_packets.with_shared([&](auto& unacked_packets) {
+ return unacked_packets.size + size <= m_send_window_size;
+ });
}
-
}
diff --git a/Kernel/Net/TCPSocket.h b/Kernel/Net/TCPSocket.h
index 9422f78598..a85145db4b 100644
--- a/Kernel/Net/TCPSocket.h
+++ b/Kernel/Net/TCPSocket.h
@@ -203,9 +203,12 @@ private:
int tx_counter { 0 };
};
- mutable Mutex m_not_acked_lock { "TCPSocket unacked packets" };
- SinglyLinkedList<OutgoingPacket> m_not_acked;
- size_t m_not_acked_size { 0 };
+ struct UnackedPackets {
+ SinglyLinkedList<OutgoingPacket> packets;
+ size_t size { 0 };
+ };
+
+ ProtectedValue<UnackedPackets> m_unacked_packets;
u32 m_duplicate_acks { 0 };