2018-08-04 23:45:14 +02:00
|
|
|
// Copyright 2018 yuzu Emulator Project
|
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
2020-11-17 04:14:29 +01:00
|
|
|
#include <limits>
|
2020-07-12 13:59:14 +02:00
|
|
|
#include <vector>
|
2020-09-25 19:19:39 +02:00
|
|
|
|
2018-09-14 17:54:17 +02:00
|
|
|
#include "audio_core/audio_out.h"
|
2018-08-04 23:45:14 +02:00
|
|
|
#include "audio_core/audio_renderer.h"
|
2020-04-21 04:57:30 +02:00
|
|
|
#include "audio_core/common.h"
|
2020-07-12 13:59:14 +02:00
|
|
|
#include "audio_core/info_updater.h"
|
|
|
|
#include "audio_core/voice_context.h"
|
2018-08-04 23:45:14 +02:00
|
|
|
#include "common/logging/log.h"
|
2018-11-27 00:34:07 +01:00
|
|
|
#include "core/hle/kernel/writable_event.h"
|
2018-08-04 23:45:14 +02:00
|
|
|
#include "core/memory.h"
|
2020-07-12 13:59:14 +02:00
|
|
|
#include "core/settings.h"
|
2018-08-04 23:45:14 +02:00
|
|
|
|
2020-11-17 04:14:29 +01:00
|
|
|
namespace {
|
|
|
|
[[nodiscard]] static constexpr s16 ClampToS16(s32 value) {
|
2020-11-17 05:40:19 +01:00
|
|
|
return static_cast<s16>(std::clamp(value, s32{std::numeric_limits<s16>::min()},
|
|
|
|
s32{std::numeric_limits<s16>::max()}));
|
2020-11-17 04:14:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
[[nodiscard]] static constexpr s16 Mix2To1(s16 l_channel, s16 r_channel) {
|
|
|
|
// Mix 50% from left and 50% from right channel
|
|
|
|
constexpr float l_mix_amount = 50.0f / 100.0f;
|
|
|
|
constexpr float r_mix_amount = 50.0f / 100.0f;
|
|
|
|
return ClampToS16(static_cast<s32>((static_cast<float>(l_channel) * l_mix_amount) +
|
|
|
|
(static_cast<float>(r_channel) * r_mix_amount)));
|
|
|
|
}
|
|
|
|
|
|
|
|
[[nodiscard]] static constexpr std::tuple<s16, s16> Mix6To2(s16 fl_channel, s16 fr_channel,
|
|
|
|
s16 fc_channel,
|
|
|
|
[[maybe_unused]] s16 lf_channel,
|
|
|
|
s16 bl_channel, s16 br_channel) {
|
|
|
|
// Front channels are mixed 36.94%, Center channels are mixed to be 26.12% & the back channels
|
|
|
|
// are mixed to be 36.94%
|
|
|
|
|
|
|
|
constexpr float front_mix_amount = 36.94f / 100.0f;
|
|
|
|
constexpr float center_mix_amount = 26.12f / 100.0f;
|
|
|
|
constexpr float back_mix_amount = 36.94f / 100.0f;
|
|
|
|
|
|
|
|
// Mix 50% from left and 50% from right channel
|
|
|
|
const auto left = front_mix_amount * static_cast<float>(fl_channel) +
|
|
|
|
center_mix_amount * static_cast<float>(fc_channel) +
|
|
|
|
back_mix_amount * static_cast<float>(bl_channel);
|
|
|
|
|
|
|
|
const auto right = front_mix_amount * static_cast<float>(fr_channel) +
|
|
|
|
center_mix_amount * static_cast<float>(fc_channel) +
|
|
|
|
back_mix_amount * static_cast<float>(br_channel);
|
|
|
|
|
|
|
|
return {ClampToS16(static_cast<s32>(left)), ClampToS16(static_cast<s32>(right))};
|
|
|
|
}
|
|
|
|
|
|
|
|
[[nodiscard]] static constexpr std::tuple<s16, s16> Mix6To2WithCoefficients(
|
|
|
|
s16 fl_channel, s16 fr_channel, s16 fc_channel, s16 lf_channel, s16 bl_channel, s16 br_channel,
|
|
|
|
const std::array<float_le, 4>& coeff) {
|
|
|
|
const auto left =
|
|
|
|
static_cast<float>(fl_channel) * coeff[0] + static_cast<float>(fc_channel) * coeff[1] +
|
|
|
|
static_cast<float>(lf_channel) * coeff[2] + static_cast<float>(bl_channel) * coeff[0];
|
|
|
|
|
|
|
|
const auto right =
|
|
|
|
static_cast<float>(fr_channel) * coeff[0] + static_cast<float>(fc_channel) * coeff[1] +
|
|
|
|
static_cast<float>(lf_channel) * coeff[2] + static_cast<float>(br_channel) * coeff[0];
|
|
|
|
|
|
|
|
return {ClampToS16(static_cast<s32>(left)), ClampToS16(static_cast<s32>(right))};
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
2018-08-04 23:45:14 +02:00
|
|
|
namespace AudioCore {
|
2020-03-31 21:10:44 +02:00
|
|
|
AudioRenderer::AudioRenderer(Core::Timing::CoreTiming& core_timing, Core::Memory::Memory& memory_,
|
2020-07-12 13:59:14 +02:00
|
|
|
AudioCommon::AudioRendererParameter params,
|
2020-11-25 21:21:03 +01:00
|
|
|
std::shared_ptr<Kernel::WritableEvent> buffer_event_,
|
2019-07-12 17:35:40 +02:00
|
|
|
std::size_t instance_number)
|
2020-11-25 21:21:03 +01:00
|
|
|
: worker_params{params}, buffer_event{buffer_event_},
|
2020-07-12 13:59:14 +02:00
|
|
|
memory_pool_info(params.effect_count + params.voice_count * 4),
|
|
|
|
voice_context(params.voice_count), effect_context(params.effect_count), mix_context(),
|
|
|
|
sink_context(params.sink_count), splitter_context(),
|
|
|
|
voices(params.voice_count), memory{memory_},
|
2020-08-16 17:23:55 +02:00
|
|
|
command_generator(worker_params, voice_context, mix_context, splitter_context, effect_context,
|
2020-11-28 13:25:28 +01:00
|
|
|
memory) {
|
2020-04-21 04:57:30 +02:00
|
|
|
behavior_info.SetUserRevision(params.revision);
|
2020-07-12 13:59:14 +02:00
|
|
|
splitter_context.Initialize(behavior_info, params.splitter_count,
|
|
|
|
params.num_splitter_send_channels);
|
2020-08-16 17:23:55 +02:00
|
|
|
mix_context.Initialize(behavior_info, params.submix_count + 1, params.effect_count);
|
2018-09-08 17:48:41 +02:00
|
|
|
audio_out = std::make_unique<AudioCore::AudioOut>();
|
2020-07-12 13:59:14 +02:00
|
|
|
stream =
|
|
|
|
audio_out->OpenStream(core_timing, params.sample_rate, AudioCommon::STREAM_NUM_CHANNELS,
|
|
|
|
fmt::format("AudioRenderer-Instance{}", instance_number),
|
2020-11-25 21:21:03 +01:00
|
|
|
[=]() { buffer_event_->Signal(); });
|
2018-09-08 17:48:41 +02:00
|
|
|
audio_out->StartStream(stream);
|
2018-08-04 23:45:14 +02:00
|
|
|
|
|
|
|
QueueMixedBuffer(0);
|
|
|
|
QueueMixedBuffer(1);
|
|
|
|
QueueMixedBuffer(2);
|
2020-07-12 15:56:24 +02:00
|
|
|
QueueMixedBuffer(3);
|
2018-08-04 23:45:14 +02:00
|
|
|
}
|
|
|
|
|
2018-09-14 17:54:17 +02:00
|
|
|
AudioRenderer::~AudioRenderer() = default;
|
|
|
|
|
2018-08-12 06:46:12 +02:00
|
|
|
u32 AudioRenderer::GetSampleRate() const {
|
2018-08-12 06:58:36 +02:00
|
|
|
return worker_params.sample_rate;
|
2018-08-12 06:46:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
u32 AudioRenderer::GetSampleCount() const {
|
|
|
|
return worker_params.sample_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
u32 AudioRenderer::GetMixBufferCount() const {
|
|
|
|
return worker_params.mix_buffer_count;
|
|
|
|
}
|
|
|
|
|
2018-09-24 02:01:02 +02:00
|
|
|
Stream::State AudioRenderer::GetStreamState() const {
|
2018-09-23 14:32:01 +02:00
|
|
|
return stream->GetState();
|
|
|
|
}
|
|
|
|
|
2020-07-12 13:59:14 +02:00
|
|
|
ResultCode AudioRenderer::UpdateAudioRenderer(const std::vector<u8>& input_params,
|
|
|
|
std::vector<u8>& output_params) {
|
2020-04-21 04:57:30 +02:00
|
|
|
|
2020-07-12 13:59:14 +02:00
|
|
|
InfoUpdater info_updater{input_params, output_params, behavior_info};
|
2020-04-21 04:57:30 +02:00
|
|
|
|
2020-07-12 13:59:14 +02:00
|
|
|
if (!info_updater.UpdateBehaviorInfo(behavior_info)) {
|
|
|
|
LOG_ERROR(Audio, "Failed to update behavior info input parameters");
|
|
|
|
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
|
2020-04-21 04:57:30 +02:00
|
|
|
}
|
|
|
|
|
2020-07-12 13:59:14 +02:00
|
|
|
if (!info_updater.UpdateMemoryPools(memory_pool_info)) {
|
|
|
|
LOG_ERROR(Audio, "Failed to update memory pool parameters");
|
|
|
|
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
|
2020-06-13 06:04:28 +02:00
|
|
|
}
|
|
|
|
|
2020-07-12 13:59:14 +02:00
|
|
|
if (!info_updater.UpdateVoiceChannelResources(voice_context)) {
|
|
|
|
LOG_ERROR(Audio, "Failed to update voice channel resource parameters");
|
|
|
|
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
|
2018-08-04 23:45:14 +02:00
|
|
|
}
|
|
|
|
|
2020-07-12 13:59:14 +02:00
|
|
|
if (!info_updater.UpdateVoices(voice_context, memory_pool_info, 0)) {
|
|
|
|
LOG_ERROR(Audio, "Failed to update voice parameters");
|
|
|
|
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
|
2018-08-04 23:45:14 +02:00
|
|
|
}
|
|
|
|
|
2020-07-12 13:59:14 +02:00
|
|
|
// TODO(ogniK): Deal with stopped audio renderer but updates still taking place
|
|
|
|
if (!info_updater.UpdateEffects(effect_context, true)) {
|
|
|
|
LOG_ERROR(Audio, "Failed to update effect parameters");
|
|
|
|
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
|
2018-08-04 23:45:14 +02:00
|
|
|
}
|
|
|
|
|
2020-07-12 13:59:14 +02:00
|
|
|
if (behavior_info.IsSplitterSupported()) {
|
|
|
|
if (!info_updater.UpdateSplitterInfo(splitter_context)) {
|
|
|
|
LOG_ERROR(Audio, "Failed to update splitter parameters");
|
|
|
|
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
|
2018-08-04 23:45:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-17 04:14:29 +01:00
|
|
|
const auto mix_result = info_updater.UpdateMixes(mix_context, worker_params.mix_buffer_count,
|
|
|
|
splitter_context, effect_context);
|
2018-08-04 23:45:14 +02:00
|
|
|
|
2020-07-12 13:59:14 +02:00
|
|
|
if (mix_result.IsError()) {
|
|
|
|
LOG_ERROR(Audio, "Failed to update mix parameters");
|
|
|
|
return mix_result;
|
2018-08-04 23:45:14 +02:00
|
|
|
}
|
|
|
|
|
2020-07-12 13:59:14 +02:00
|
|
|
// TODO(ogniK): Sinks
|
|
|
|
if (!info_updater.UpdateSinks(sink_context)) {
|
|
|
|
LOG_ERROR(Audio, "Failed to update sink parameters");
|
|
|
|
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
|
2018-08-04 23:45:14 +02:00
|
|
|
}
|
2020-04-22 05:03:58 +02:00
|
|
|
|
2020-07-12 13:59:14 +02:00
|
|
|
// TODO(ogniK): Performance buffer
|
|
|
|
if (!info_updater.UpdatePerformanceBuffer()) {
|
|
|
|
LOG_ERROR(Audio, "Failed to update performance buffer parameters");
|
|
|
|
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
|
2020-04-22 05:03:58 +02:00
|
|
|
}
|
|
|
|
|
2020-07-12 13:59:14 +02:00
|
|
|
if (!info_updater.UpdateErrorInfo(behavior_info)) {
|
|
|
|
LOG_ERROR(Audio, "Failed to update error info");
|
|
|
|
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
|
2020-04-22 05:03:58 +02:00
|
|
|
}
|
|
|
|
|
2020-07-12 13:59:14 +02:00
|
|
|
if (behavior_info.IsElapsedFrameCountSupported()) {
|
|
|
|
if (!info_updater.UpdateRendererInfo(elapsed_frame_count)) {
|
|
|
|
LOG_ERROR(Audio, "Failed to update renderer info");
|
|
|
|
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
|
2020-04-22 05:03:58 +02:00
|
|
|
}
|
2018-08-04 23:45:14 +02:00
|
|
|
}
|
2020-07-12 13:59:14 +02:00
|
|
|
// TODO(ogniK): Statistics
|
2018-08-04 23:45:14 +02:00
|
|
|
|
2020-07-12 13:59:14 +02:00
|
|
|
if (!info_updater.WriteOutputHeader()) {
|
|
|
|
LOG_ERROR(Audio, "Failed to write output header");
|
|
|
|
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
|
2018-12-06 18:15:47 +01:00
|
|
|
}
|
2018-08-12 20:32:39 +02:00
|
|
|
|
2020-07-12 13:59:14 +02:00
|
|
|
// TODO(ogniK): Check when all sections are implemented
|
2018-08-04 23:45:14 +02:00
|
|
|
|
2020-07-12 13:59:14 +02:00
|
|
|
if (!info_updater.CheckConsumedSize()) {
|
|
|
|
LOG_ERROR(Audio, "Audio buffers were not consumed!");
|
|
|
|
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
|
2018-10-07 05:14:09 +02:00
|
|
|
}
|
|
|
|
|
2020-07-12 13:59:14 +02:00
|
|
|
ReleaseAndQueueBuffers();
|
|
|
|
|
|
|
|
return RESULT_SUCCESS;
|
2018-08-04 23:45:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void AudioRenderer::QueueMixedBuffer(Buffer::Tag tag) {
|
2020-07-12 13:59:14 +02:00
|
|
|
command_generator.PreCommand();
|
|
|
|
// Clear mix buffers before our next operation
|
|
|
|
command_generator.ClearMixBuffers();
|
|
|
|
|
|
|
|
// If the splitter is not in use, sort our mixes
|
|
|
|
if (!splitter_context.UsingSplitter()) {
|
|
|
|
mix_context.SortInfo();
|
|
|
|
}
|
|
|
|
// Sort our voices
|
|
|
|
voice_context.SortInfo();
|
|
|
|
|
|
|
|
// Handle samples
|
|
|
|
command_generator.GenerateVoiceCommands();
|
|
|
|
command_generator.GenerateSubMixCommands();
|
|
|
|
command_generator.GenerateFinalMixCommands();
|
|
|
|
|
|
|
|
command_generator.PostCommand();
|
|
|
|
// Base sample size
|
|
|
|
std::size_t BUFFER_SIZE{worker_params.sample_count};
|
|
|
|
// Samples
|
2018-08-04 23:45:14 +02:00
|
|
|
std::vector<s16> buffer(BUFFER_SIZE * stream->GetNumChannels());
|
2020-07-12 13:59:14 +02:00
|
|
|
// Make sure to clear our samples
|
|
|
|
std::memset(buffer.data(), 0, buffer.size() * sizeof(s16));
|
|
|
|
|
|
|
|
if (sink_context.InUse()) {
|
|
|
|
const auto stream_channel_count = stream->GetNumChannels();
|
|
|
|
const auto buffer_offsets = sink_context.OutputBuffers();
|
|
|
|
const auto channel_count = buffer_offsets.size();
|
|
|
|
const auto& final_mix = mix_context.GetFinalMixInfo();
|
|
|
|
const auto& in_params = final_mix.GetInParams();
|
|
|
|
std::vector<s32*> mix_buffers(channel_count);
|
|
|
|
for (std::size_t i = 0; i < channel_count; i++) {
|
2020-10-21 04:07:39 +02:00
|
|
|
mix_buffers[i] =
|
|
|
|
command_generator.GetMixBuffer(in_params.buffer_offset + buffer_offsets[i]);
|
2020-04-22 05:03:58 +02:00
|
|
|
}
|
2018-08-04 23:45:14 +02:00
|
|
|
|
2020-07-12 13:59:14 +02:00
|
|
|
for (std::size_t i = 0; i < BUFFER_SIZE; i++) {
|
|
|
|
if (channel_count == 1) {
|
|
|
|
const auto sample = ClampToS16(mix_buffers[0][i]);
|
2020-11-17 04:14:29 +01:00
|
|
|
|
|
|
|
// Place sample in all channels
|
|
|
|
for (u32 channel = 0; channel < stream_channel_count; channel++) {
|
|
|
|
buffer[i * stream_channel_count + channel] = sample;
|
2020-07-12 13:59:14 +02:00
|
|
|
}
|
2020-11-17 04:14:29 +01:00
|
|
|
|
2020-07-12 13:59:14 +02:00
|
|
|
if (stream_channel_count == 6) {
|
2020-11-17 04:14:29 +01:00
|
|
|
// Output stream has a LF channel, mute it!
|
|
|
|
buffer[i * stream_channel_count + 3] = 0;
|
2020-07-12 13:59:14 +02:00
|
|
|
}
|
2020-11-17 04:14:29 +01:00
|
|
|
|
2020-07-12 13:59:14 +02:00
|
|
|
} else if (channel_count == 2) {
|
|
|
|
const auto l_sample = ClampToS16(mix_buffers[0][i]);
|
|
|
|
const auto r_sample = ClampToS16(mix_buffers[1][i]);
|
2020-07-25 05:31:43 +02:00
|
|
|
if (stream_channel_count == 1) {
|
2020-11-17 04:14:29 +01:00
|
|
|
buffer[i * stream_channel_count + 0] = Mix2To1(l_sample, r_sample);
|
2020-07-12 13:59:14 +02:00
|
|
|
} else if (stream_channel_count == 2) {
|
|
|
|
buffer[i * stream_channel_count + 0] = l_sample;
|
|
|
|
buffer[i * stream_channel_count + 1] = r_sample;
|
|
|
|
} else if (stream_channel_count == 6) {
|
|
|
|
buffer[i * stream_channel_count + 0] = l_sample;
|
|
|
|
buffer[i * stream_channel_count + 1] = r_sample;
|
|
|
|
|
2020-11-17 04:14:29 +01:00
|
|
|
// Combine both left and right channels to the center channel
|
|
|
|
buffer[i * stream_channel_count + 2] = Mix2To1(l_sample, r_sample);
|
2020-07-12 13:59:14 +02:00
|
|
|
|
|
|
|
buffer[i * stream_channel_count + 4] = l_sample;
|
|
|
|
buffer[i * stream_channel_count + 5] = r_sample;
|
|
|
|
}
|
|
|
|
|
|
|
|
} else if (channel_count == 6) {
|
|
|
|
const auto fl_sample = ClampToS16(mix_buffers[0][i]);
|
|
|
|
const auto fr_sample = ClampToS16(mix_buffers[1][i]);
|
|
|
|
const auto fc_sample = ClampToS16(mix_buffers[2][i]);
|
|
|
|
const auto lf_sample = ClampToS16(mix_buffers[3][i]);
|
|
|
|
const auto bl_sample = ClampToS16(mix_buffers[4][i]);
|
|
|
|
const auto br_sample = ClampToS16(mix_buffers[5][i]);
|
|
|
|
|
|
|
|
if (stream_channel_count == 1) {
|
2020-11-17 04:14:29 +01:00
|
|
|
// Games seem to ignore the center channel half the time, we use the front left
|
|
|
|
// and right channel for mixing as that's where majority of the audio goes
|
|
|
|
buffer[i * stream_channel_count + 0] = Mix2To1(fl_sample, fr_sample);
|
2020-07-12 13:59:14 +02:00
|
|
|
} else if (stream_channel_count == 2) {
|
2020-11-17 04:14:29 +01:00
|
|
|
// Mix all channels into 2 channels
|
|
|
|
if (sink_context.HasDownMixingCoefficients()) {
|
|
|
|
const auto [left, right] = Mix6To2WithCoefficients(
|
|
|
|
fl_sample, fr_sample, fc_sample, lf_sample, bl_sample, br_sample,
|
|
|
|
sink_context.GetDownmixCoefficients());
|
|
|
|
buffer[i * stream_channel_count + 0] = left;
|
|
|
|
buffer[i * stream_channel_count + 1] = right;
|
|
|
|
} else {
|
|
|
|
const auto [left, right] = Mix6To2(fl_sample, fr_sample, fc_sample,
|
|
|
|
lf_sample, bl_sample, br_sample);
|
|
|
|
buffer[i * stream_channel_count + 0] = left;
|
|
|
|
buffer[i * stream_channel_count + 1] = right;
|
|
|
|
}
|
2020-07-12 13:59:14 +02:00
|
|
|
} else if (stream_channel_count == 6) {
|
2020-11-17 04:14:29 +01:00
|
|
|
// Pass through
|
2020-07-12 13:59:14 +02:00
|
|
|
buffer[i * stream_channel_count + 0] = fl_sample;
|
|
|
|
buffer[i * stream_channel_count + 1] = fr_sample;
|
|
|
|
buffer[i * stream_channel_count + 2] = fc_sample;
|
|
|
|
buffer[i * stream_channel_count + 3] = lf_sample;
|
|
|
|
buffer[i * stream_channel_count + 4] = bl_sample;
|
|
|
|
buffer[i * stream_channel_count + 5] = br_sample;
|
|
|
|
}
|
2018-08-04 23:45:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-07-12 13:59:14 +02:00
|
|
|
|
2018-09-08 17:48:41 +02:00
|
|
|
audio_out->QueueBuffer(stream, tag, std::move(buffer));
|
2020-06-13 06:04:28 +02:00
|
|
|
elapsed_frame_count++;
|
2020-07-12 13:59:14 +02:00
|
|
|
voice_context.UpdateStateByDspShared();
|
2018-08-04 23:45:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void AudioRenderer::ReleaseAndQueueBuffers() {
|
2020-11-17 04:14:29 +01:00
|
|
|
const auto released_buffers{audio_out->GetTagsAndReleaseBuffers(stream)};
|
2018-08-04 23:45:14 +02:00
|
|
|
for (const auto& tag : released_buffers) {
|
|
|
|
QueueMixedBuffer(tag);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace AudioCore
|