1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
|
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
* Copyright (c) 2021-2022, kleines Filmröllchen <filmroellchen@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include "ConnectionFromClient.h"
#include "FadingProperty.h"
#include <AK/Atomic.h>
#include <AK/Badge.h>
#include <AK/ByteBuffer.h>
#include <AK/Queue.h>
#include <AK/RefCounted.h>
#include <AK/WeakPtr.h>
#include <LibAudio/Queue.h>
#include <LibCore/Timer.h>
#include <LibThreading/ConditionVariable.h>
#include <LibThreading/Mutex.h>
#include <LibThreading/Thread.h>
#include <sys/types.h>
namespace AudioServer {
// Headroom, i.e. fixed attenuation for all audio streams.
// This is to prevent clipping when two streams with low headroom (e.g. normalized & compressed) are playing.
constexpr double SAMPLE_HEADROOM = 0.95;
// The size of the buffer in samples that the hardware receives through write() calls to the audio device.
constexpr size_t HARDWARE_BUFFER_SIZE = 512;
// The hardware buffer size in bytes; there's two channels of 16-bit samples.
constexpr size_t HARDWARE_BUFFER_SIZE_BYTES = HARDWARE_BUFFER_SIZE * 2 * sizeof(i16);
class ConnectionFromClient;
class ClientAudioStream : public RefCounted<ClientAudioStream> {
public:
explicit ClientAudioStream(ConnectionFromClient&);
~ClientAudioStream() = default;
bool get_next_sample(Audio::Sample& sample)
{
if (m_paused)
return false;
if (m_in_chunk_location >= m_current_audio_chunk.size()) {
auto result = m_buffer->dequeue();
if (result.is_error()) {
if (result.error() == Audio::AudioQueue::QueueStatus::Empty) {
dbgln("Audio client {} can't keep up!", m_client->client_id());
// Note: Even though we only check client state here, we will probably close the client much earlier.
if (!m_client->is_open()) {
dbgln("Client socket {} has closed, closing audio server connection.", m_client->client_id());
m_client->shutdown();
}
}
return false;
}
m_current_audio_chunk = result.release_value();
m_in_chunk_location = 0;
}
sample = m_current_audio_chunk[m_in_chunk_location++];
return true;
}
bool is_connected() const { return m_client && m_client->is_open(); }
ConnectionFromClient* client() { return m_client.ptr(); }
void set_buffer(OwnPtr<Audio::AudioQueue> buffer) { m_buffer = move(buffer); }
void clear()
{
ErrorOr<Array<Audio::Sample, Audio::AUDIO_BUFFER_SIZE>, Audio::AudioQueue::QueueStatus> result = Audio::AudioQueue::QueueStatus::Invalid;
do {
result = m_buffer->dequeue();
} while (result.is_error() && result.error() != Audio::AudioQueue::QueueStatus::Empty);
}
void set_paused(bool paused) { m_paused = paused; }
FadingProperty<double>& volume() { return m_volume; }
double volume() const { return m_volume; }
void set_volume(double const volume) { m_volume = volume; }
bool is_muted() const { return m_muted; }
void set_muted(bool muted) { m_muted = muted; }
private:
OwnPtr<Audio::AudioQueue> m_buffer;
Array<Audio::Sample, Audio::AUDIO_BUFFER_SIZE> m_current_audio_chunk;
size_t m_in_chunk_location;
bool m_paused { true };
bool m_muted { false };
WeakPtr<ConnectionFromClient> m_client;
FadingProperty<double> m_volume { 1 };
};
class Mixer : public Core::Object {
C_OBJECT(Mixer)
public:
virtual ~Mixer() override = default;
NonnullRefPtr<ClientAudioStream> create_queue(ConnectionFromClient&);
// To the outside world, we pretend that the target volume is already reached, even though it may be still fading.
double main_volume() const { return m_main_volume.target(); }
void set_main_volume(double volume);
bool is_muted() const { return m_muted; }
void set_muted(bool);
int audiodevice_set_sample_rate(u32 sample_rate);
u32 audiodevice_get_sample_rate() const;
private:
Mixer(NonnullRefPtr<Core::ConfigFile> config);
void request_setting_sync();
Vector<NonnullRefPtr<ClientAudioStream>> m_pending_mixing;
Threading::Mutex m_pending_mutex;
Threading::ConditionVariable m_mixing_necessary { m_pending_mutex };
RefPtr<Core::DeprecatedFile> m_device;
NonnullRefPtr<Threading::Thread> m_sound_thread;
bool m_muted { false };
FadingProperty<double> m_main_volume { 1 };
NonnullRefPtr<Core::ConfigFile> m_config;
RefPtr<Core::Timer> m_config_write_timer;
Array<u8, HARDWARE_BUFFER_SIZE_BYTES> m_stream_buffer;
Array<u8, HARDWARE_BUFFER_SIZE_BYTES> const m_zero_filled_buffer {};
void mix();
};
// Interval in ms when the server tries to save its configuration to disk.
constexpr unsigned AUDIO_CONFIG_WRITE_INTERVAL = 2000;
}
|