Skip to content

Commit

Permalink
FEAT(client, server): Implement server loopback while still sending t…
Browse files Browse the repository at this point in the history
…o others
  • Loading branch information
davidebeatrici committed Jun 1, 2024
1 parent 1eecd5b commit 2e47e0f
Show file tree
Hide file tree
Showing 6 changed files with 124 additions and 100 deletions.
5 changes: 3 additions & 2 deletions src/MumbleProtocol.h
Original file line number Diff line number Diff line change
Expand Up @@ -89,8 +89,9 @@ namespace Protocol {
};

namespace ReservedTargetIDs {
constexpr unsigned int REGULAR_SPEECH = 0;
constexpr unsigned int SERVER_LOOPBACK = 31;
constexpr unsigned int REGULAR_SPEECH = 0;
constexpr unsigned int SERVER_LOOPBACK_REGULAR = 30;
constexpr unsigned int SERVER_LOOPBACK_ONLY = 31;
} // namespace ReservedTargetIDs

using audio_context_t = byte;
Expand Down
3 changes: 2 additions & 1 deletion src/mumble/AudioConfigDialog.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -641,7 +641,8 @@ AudioOutputDialog::AudioOutputDialog(Settings &st) : ConfigWidget(st) {

qcbLoopback->addItem(tr("None"), Settings::None);
qcbLoopback->addItem(tr("Local"), Settings::Local);
qcbLoopback->addItem(tr("Server"), Settings::Server);
qcbLoopback->addItem(tr("Server (don't send to others)"), Settings::ServerOnly);
qcbLoopback->addItem(tr("Server (send to others)"), Settings::ServerRegular);

qcbDevice->view()->setTextElideMode(Qt::ElideRight);

Expand Down
13 changes: 11 additions & 2 deletions src/mumble/AudioInput.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1173,8 +1173,17 @@ void AudioInput::flushCheck(const QByteArray &frame, bool terminator, std::int32
// accordingly once the client whispers for the next time.
Global::get().iPrevTarget = 0;
}
if (Global::get().s.lmLoopMode == Settings::Server) {
audioData.targetOrContext = Mumble::Protocol::ReservedTargetIDs::SERVER_LOOPBACK;

switch (Global::get().s.lmLoopMode) {
case Settings::None:
case Settings::Local:
break;
case Settings::ServerOnly:
audioData.targetOrContext = Mumble::Protocol::ReservedTargetIDs::SERVER_LOOPBACK_ONLY;
break;
case Settings::ServerRegular:
audioData.targetOrContext = Mumble::Protocol::ReservedTargetIDs::SERVER_LOOPBACK_REGULAR;
break;
}

audioData.usedCodec = m_codec;
Expand Down
9 changes: 5 additions & 4 deletions src/mumble/EnumStringConversions.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,11 @@
PROCESS(Settings::VADSource, Amplitude, "Amplitude") \
PROCESS(Settings::VADSource, SignalToNoise, "SignalToNoise")

#define LOOP_MODE_VALUES \
PROCESS(Settings::LoopMode, None, "None") \
PROCESS(Settings::LoopMode, Local, "Local") \
PROCESS(Settings::LoopMode, Server, "Server")
#define LOOP_MODE_VALUES \
PROCESS(Settings::LoopMode, None, "None") \
PROCESS(Settings::LoopMode, Local, "Local") \
PROCESS(Settings::LoopMode, ServerOnly, "ServerOnly") \
PROCESS(Settings::LoopMode, ServerRegular, "ServerRegular")

#define CHANNEL_EXPAND_VALUES \
PROCESS(Settings::ChannelExpand, NoChannels, "NoChannels") \
Expand Down
2 changes: 1 addition & 1 deletion src/mumble/Settings.h
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ struct OverlaySettings {
struct Settings {
enum AudioTransmit { Continuous, VAD, PushToTalk };
enum VADSource { Amplitude, SignalToNoise };
enum LoopMode { None, Local, Server };
enum LoopMode { None, Local, ServerOnly, ServerRegular };
enum ChannelExpand { NoChannels, ChannelsWithUsers, AllChannels };
enum ChannelDrag { Ask, DoNothing, Move };
enum ServerShow { ShowPopulated, ShowReachable, ShowAll };
Expand Down
192 changes: 102 additions & 90 deletions src/murmur/Server.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1165,114 +1165,126 @@ void Server::processMsg(ServerUser *u, Mumble::Protocol::AudioData audioData, Au

buffer.clear();

if (audioData.targetOrContext == Mumble::Protocol::ReservedTargetIDs::SERVER_LOOPBACK) {
buffer.forceAddReceiver(*u, Mumble::Protocol::AudioContext::NORMAL, audioData.containsPositionalData);
} else if (audioData.targetOrContext == Mumble::Protocol::ReservedTargetIDs::REGULAR_SPEECH) {
Channel *c = u->cChannel;

// Send audio to all users that are listening to the channel
foreach (unsigned int currentSession, m_channelListenerManager.getListenersForChannel(c->iId)) {
ServerUser *pDst = static_cast< ServerUser * >(qhUsers.value(currentSession));
if (pDst) {
buffer.addReceiver(*u, *pDst, Mumble::Protocol::AudioContext::LISTEN, audioData.containsPositionalData,
m_channelListenerManager.getListenerVolumeAdjustment(pDst->uiSession, c->iId));
switch (audioData.targetOrContext) {
case Mumble::Protocol::ReservedTargetIDs::SERVER_LOOPBACK_ONLY:
buffer.forceAddReceiver(*u, Mumble::Protocol::AudioContext::NORMAL, audioData.containsPositionalData);
break;
case Mumble::Protocol::ReservedTargetIDs::SERVER_LOOPBACK_REGULAR:
buffer.forceAddReceiver(*u, Mumble::Protocol::AudioContext::NORMAL, audioData.containsPositionalData);
[[fallthrough]];
case Mumble::Protocol::ReservedTargetIDs::REGULAR_SPEECH: {
Channel *c = u->cChannel;

// Send audio to all users that are listening to the channel
foreach (unsigned int currentSession, m_channelListenerManager.getListenersForChannel(c->iId)) {
ServerUser *pDst = static_cast< ServerUser * >(qhUsers.value(currentSession));
if (pDst) {
buffer.addReceiver(*u, *pDst, Mumble::Protocol::AudioContext::LISTEN,
audioData.containsPositionalData,
m_channelListenerManager.getListenerVolumeAdjustment(pDst->uiSession, c->iId));
}
}
}

// Send audio to all users in the same channel
for (User *p : c->qlUsers) {
ServerUser *pDst = static_cast< ServerUser * >(p);
// Send audio to all users in the same channel
for (User *p : c->qlUsers) {
ServerUser *pDst = static_cast< ServerUser * >(p);

buffer.addReceiver(*u, *pDst, Mumble::Protocol::AudioContext::NORMAL, audioData.containsPositionalData);
}
buffer.addReceiver(*u, *pDst, Mumble::Protocol::AudioContext::NORMAL, audioData.containsPositionalData);
}

// Send audio to all linked channels the user has speak-permission
if (!c->qhLinks.isEmpty()) {
QSet< Channel * > chans = c->allLinks();
chans.remove(c);

// Send audio to all linked channels the user has speak-permission
if (!c->qhLinks.isEmpty()) {
QSet< Channel * > chans = c->allLinks();
chans.remove(c);

QMutexLocker qml(&qmCache);

for (Channel *l : chans) {
if (ChanACL::hasPermission(u, l, ChanACL::Speak, &acCache)) {
// Send the audio stream to all users that are listening to the linked channel
for (unsigned int currentSession : m_channelListenerManager.getListenersForChannel(l->iId)) {
ServerUser *pDst = static_cast< ServerUser * >(qhUsers.value(currentSession));
if (pDst) {
buffer.addReceiver(
*u, *pDst, Mumble::Protocol::AudioContext::LISTEN, audioData.containsPositionalData,
m_channelListenerManager.getListenerVolumeAdjustment(pDst->uiSession, l->iId));
QMutexLocker qml(&qmCache);

for (Channel *l : chans) {
if (ChanACL::hasPermission(u, l, ChanACL::Speak, &acCache)) {
// Send the audio stream to all users that are listening to the linked channel
for (unsigned int currentSession : m_channelListenerManager.getListenersForChannel(l->iId)) {
ServerUser *pDst = static_cast< ServerUser * >(qhUsers.value(currentSession));
if (pDst) {
buffer.addReceiver(
*u, *pDst, Mumble::Protocol::AudioContext::LISTEN, audioData.containsPositionalData,
m_channelListenerManager.getListenerVolumeAdjustment(pDst->uiSession, l->iId));
}
}
}

// Send audio to users in the linked channel
for (User *p : l->qlUsers) {
ServerUser *pDst = static_cast< ServerUser * >(p);
// Send audio to users in the linked channel
for (User *p : l->qlUsers) {
ServerUser *pDst = static_cast< ServerUser * >(p);

buffer.addReceiver(*u, *pDst, Mumble::Protocol::AudioContext::NORMAL,
audioData.containsPositionalData);
buffer.addReceiver(*u, *pDst, Mumble::Protocol::AudioContext::NORMAL,
audioData.containsPositionalData);
}
}
}
}
}
} else if (u->qmTargets.contains(static_cast< int >(audioData.targetOrContext))) { // Whisper/Shout
QSet< ServerUser * > channel;
QSet< ServerUser * > direct;
QHash< ServerUser *, VolumeAdjustment > cachedListeners;

if (u->qmTargetCache.contains(static_cast< int >(audioData.targetOrContext))) {
ZoneScopedN(TracyConstants::AUDIO_WHISPER_CACHE_STORE);

const WhisperTargetCache &cache = u->qmTargetCache.value(static_cast< int >(audioData.targetOrContext));
channel = cache.channelTargets;
direct = cache.directTargets;
cachedListeners = cache.listeningTargets;
} else {
ZoneScopedN(TracyConstants::AUDIO_WHISPER_CACHE_CREATE);
default:
if (u->qmTargets.contains(static_cast< int >(audioData.targetOrContext))) { // Whisper/Shout
QSet< ServerUser * > channel;
QSet< ServerUser * > direct;
QHash< ServerUser *, VolumeAdjustment > cachedListeners;

const unsigned int uiSession = u->uiSession;
qrwlVoiceThread.unlock();
qrwlVoiceThread.lockForWrite();
if (u->qmTargetCache.contains(static_cast< int >(audioData.targetOrContext))) {
ZoneScopedN(TracyConstants::AUDIO_WHISPER_CACHE_STORE);

if (!qhUsers.contains(uiSession)) {
return;
}
const WhisperTargetCache &cache =
u->qmTargetCache.value(static_cast< int >(audioData.targetOrContext));
channel = cache.channelTargets;
direct = cache.directTargets;
cachedListeners = cache.listeningTargets;
} else {
ZoneScopedN(TracyConstants::AUDIO_WHISPER_CACHE_CREATE);

// Create cache entry for the given target
// Note: We have to compute the cache entry and add it to the user's cache store in an atomic
// transaction (ensured by the lock) to avoid running into situations in which a user from the cache
// gets deleted without this particular cache entry being purged (which happens, if the cache entry is
// in the store at the point of deleting the user).
const WhisperTarget &wt = u->qmTargets.value(static_cast< int >(audioData.targetOrContext));
WhisperTargetCache cache = createWhisperTargetCacheFor(*u, wt);
const unsigned int uiSession = u->uiSession;
qrwlVoiceThread.unlock();
qrwlVoiceThread.lockForWrite();

u->qmTargetCache.insert(static_cast< int >(audioData.targetOrContext), std::move(cache));
if (!qhUsers.contains(uiSession)) {
return;
}

// Create cache entry for the given target
// Note: We have to compute the cache entry and add it to the user's cache store in an atomic
// transaction (ensured by the lock) to avoid running into situations in which a user from the cache
// gets deleted without this particular cache entry being purged (which happens, if the cache entry
// is in the store at the point of deleting the user).
const WhisperTarget &wt = u->qmTargets.value(static_cast< int >(audioData.targetOrContext));
WhisperTargetCache cache = createWhisperTargetCacheFor(*u, wt);

qrwlVoiceThread.unlock();
qrwlVoiceThread.lockForRead();
if (!qhUsers.contains(uiSession))
return;
}
u->qmTargetCache.insert(static_cast< int >(audioData.targetOrContext), std::move(cache));

// These users receive the audio because someone is shouting to their channel
for (ServerUser *pDst : channel) {
buffer.addReceiver(*u, *pDst, Mumble::Protocol::AudioContext::SHOUT, audioData.containsPositionalData);
}
// These users receive audio because someone is whispering to them
for (ServerUser *pDst : direct) {
buffer.addReceiver(*u, *pDst, Mumble::Protocol::AudioContext::WHISPER, audioData.containsPositionalData);
}
// These users receive audio because someone is sending audio to one of their listeners
QHashIterator< ServerUser *, VolumeAdjustment > it(cachedListeners);
while (it.hasNext()) {
it.next();
ServerUser *user = it.key();
const VolumeAdjustment &volumeAdjustment = it.value();

buffer.addReceiver(*u, *user, Mumble::Protocol::AudioContext::LISTEN, audioData.containsPositionalData,
volumeAdjustment);
}

qrwlVoiceThread.unlock();
qrwlVoiceThread.lockForRead();
if (!qhUsers.contains(uiSession))
return;
}

// These users receive the audio because someone is shouting to their channel
for (ServerUser *pDst : channel) {
buffer.addReceiver(*u, *pDst, Mumble::Protocol::AudioContext::SHOUT,
audioData.containsPositionalData);
}
// These users receive audio because someone is whispering to them
for (ServerUser *pDst : direct) {
buffer.addReceiver(*u, *pDst, Mumble::Protocol::AudioContext::WHISPER,
audioData.containsPositionalData);
}
// These users receive audio because someone is sending audio to one of their listeners
QHashIterator< ServerUser *, VolumeAdjustment > it(cachedListeners);
while (it.hasNext()) {
it.next();
ServerUser *user = it.key();
const VolumeAdjustment &volumeAdjustment = it.value();

buffer.addReceiver(*u, *user, Mumble::Protocol::AudioContext::LISTEN,
audioData.containsPositionalData, volumeAdjustment);
}
}
}

ZoneNamedN(__tracy_scoped_zone2, TracyConstants::AUDIO_SENDOUT_ZONE, true);
Expand Down

0 comments on commit 2e47e0f

Please sign in to comment.