Revert "core: Fix clang build"
This commit is contained in:
parent
fdd9154069
commit
3d592972dc
105 changed files with 667 additions and 906 deletions
4
externals/microprofile/microprofile.h
vendored
4
externals/microprofile/microprofile.h
vendored
|
@ -857,7 +857,7 @@ inline int64_t MicroProfileLogTickDifference(MicroProfileLogEntry Start, MicroPr
|
|||
{
|
||||
uint64_t nStart = Start;
|
||||
uint64_t nEnd = End;
|
||||
auto nDifference = static_cast<int64_t>((nEnd << 16) - (nStart << 16));
|
||||
int64_t nDifference = ((nEnd<<16) - (nStart<<16));
|
||||
return nDifference >> 16;
|
||||
}
|
||||
|
||||
|
@ -868,7 +868,7 @@ inline int64_t MicroProfileLogGetTick(MicroProfileLogEntry e)
|
|||
|
||||
inline int64_t MicroProfileLogSetTick(MicroProfileLogEntry e, int64_t nTick)
|
||||
{
|
||||
return static_cast<int64_t>((MP_LOG_TICK_MASK & static_cast<uint64_t>(nTick)) | (e & static_cast<uint64_t>(~MP_LOG_TICK_MASK)));
|
||||
return (MP_LOG_TICK_MASK & nTick) | (e & ~MP_LOG_TICK_MASK);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
|
|
|
@ -51,9 +51,8 @@ if (NOT MSVC)
|
|||
-Werror=implicit-fallthrough
|
||||
-Werror=reorder
|
||||
-Werror=sign-compare
|
||||
-Werror=sign-conversion
|
||||
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter>
|
||||
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable>
|
||||
-Werror=unused-but-set-parameter
|
||||
-Werror=unused-but-set-variable
|
||||
-Werror=unused-variable
|
||||
)
|
||||
endif()
|
||||
|
|
|
@ -167,8 +167,8 @@ std::vector<s16> Interpolate(InterpolationState& state, std::vector<s16> input,
|
|||
output.reserve(static_cast<std::size_t>(static_cast<double>(input.size()) / ratio +
|
||||
InterpolationState::taps));
|
||||
|
||||
for (std::size_t frame = 0; frame < num_frames; ++frame) {
|
||||
const auto lut_index{static_cast<size_t>(state.fraction >> 8) * InterpolationState::taps};
|
||||
for (std::size_t frame{}; frame < num_frames; ++frame) {
|
||||
const std::size_t lut_index{(state.fraction >> 8) * InterpolationState::taps};
|
||||
|
||||
std::rotate(state.history.begin(), state.history.end() - 1, state.history.end());
|
||||
state.history[0][0] = input[frame * 2 + 0];
|
||||
|
@ -225,7 +225,7 @@ void Resample(s32* output, const s32* input, s32 pitch, s32& fraction, std::size
|
|||
|
||||
output[i] = (l0 * s0 + l1 * s1 + l2 * s2 + l3 * s3) >> 15;
|
||||
fraction += pitch;
|
||||
index += static_cast<size_t>(fraction >> 15);
|
||||
index += (fraction >> 15);
|
||||
fraction &= 0x7fff;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -187,8 +187,8 @@ void AudioRenderer::QueueMixedBuffer(Buffer::Tag tag) {
|
|||
const auto& in_params = final_mix.GetInParams();
|
||||
std::vector<s32*> mix_buffers(channel_count);
|
||||
for (std::size_t i = 0; i < channel_count; i++) {
|
||||
mix_buffers[i] = command_generator.GetMixBuffer(
|
||||
static_cast<u32>(in_params.buffer_offset) + buffer_offsets[i]);
|
||||
mix_buffers[i] =
|
||||
command_generator.GetMixBuffer(in_params.buffer_offset + buffer_offsets[i]);
|
||||
}
|
||||
|
||||
for (std::size_t i = 0; i < BUFFER_SIZE; i++) {
|
||||
|
|
|
@ -32,7 +32,7 @@ std::vector<s16> DecodeADPCM(const u8* const data, std::size_t size, const ADPCM
|
|||
for (std::size_t framei = 0; framei < NUM_FRAMES; framei++) {
|
||||
const int frame_header = data[framei * FRAME_LEN];
|
||||
const int scale = 1 << (frame_header & 0xF);
|
||||
const auto idx = static_cast<size_t>((frame_header >> 4) & 0x7);
|
||||
const int idx = (frame_header >> 4) & 0x7;
|
||||
|
||||
// Coefficients are fixed point with 11 bits fractional part.
|
||||
const int coef1 = coeff[idx * 2 + 0];
|
||||
|
@ -57,11 +57,11 @@ std::vector<s16> DecodeADPCM(const u8* const data, std::size_t size, const ADPCM
|
|||
std::size_t outputi = framei * SAMPLES_PER_FRAME;
|
||||
std::size_t datai = framei * FRAME_LEN + 1;
|
||||
for (std::size_t i = 0; i < SAMPLES_PER_FRAME && outputi < sample_count; i += 2) {
|
||||
const s16 sample1 = decode_sample(SIGNED_NIBBLES[static_cast<u32>(data[datai] >> 4)]);
|
||||
const s16 sample1 = decode_sample(SIGNED_NIBBLES[data[datai] >> 4]);
|
||||
ret[outputi] = sample1;
|
||||
outputi++;
|
||||
|
||||
const s16 sample2 = decode_sample(SIGNED_NIBBLES[static_cast<u32>(data[datai] & 0xF)]);
|
||||
const s16 sample2 = decode_sample(SIGNED_NIBBLES[data[datai] & 0xF]);
|
||||
ret[outputi] = sample2;
|
||||
outputi++;
|
||||
|
||||
|
|
|
@ -15,8 +15,8 @@ constexpr std::size_t MIX_BUFFER_SIZE = 0x3f00;
|
|||
constexpr std::size_t SCALED_MIX_BUFFER_SIZE = MIX_BUFFER_SIZE << 15ULL;
|
||||
|
||||
template <std::size_t N>
|
||||
void ApplyMix(s32* output, const s32* input, s32 gain, std::size_t sample_count) {
|
||||
for (std::size_t i = 0; i < sample_count; i += N) {
|
||||
void ApplyMix(s32* output, const s32* input, s32 gain, s32 sample_count) {
|
||||
for (std::size_t i = 0; i < static_cast<std::size_t>(sample_count); i += N) {
|
||||
for (std::size_t j = 0; j < N; j++) {
|
||||
output[i + j] +=
|
||||
static_cast<s32>((static_cast<s64>(input[i + j]) * gain + 0x4000) >> 15);
|
||||
|
@ -111,8 +111,7 @@ void CommandGenerator::GenerateVoiceCommand(ServerVoiceInfo& voice_info) {
|
|||
const auto channel_count = in_params.channel_count;
|
||||
|
||||
for (s32 channel = 0; channel < channel_count; channel++) {
|
||||
const auto resource_id =
|
||||
static_cast<u32>(in_params.voice_channel_resource_id[static_cast<u32>(channel)]);
|
||||
const auto resource_id = in_params.voice_channel_resource_id[channel];
|
||||
auto& dsp_state = voice_context.GetDspSharedState(resource_id);
|
||||
auto& channel_resource = voice_context.GetChannelResource(resource_id);
|
||||
|
||||
|
@ -133,15 +132,14 @@ void CommandGenerator::GenerateVoiceCommand(ServerVoiceInfo& voice_info) {
|
|||
|
||||
if (in_params.mix_id != AudioCommon::NO_MIX) {
|
||||
// If we're using a mix id
|
||||
auto& mix_info = mix_context.GetInfo(static_cast<u32>(in_params.mix_id));
|
||||
auto& mix_info = mix_context.GetInfo(in_params.mix_id);
|
||||
const auto& dest_mix_params = mix_info.GetInParams();
|
||||
|
||||
// Voice Mixing
|
||||
GenerateVoiceMixCommand(
|
||||
channel_resource.GetCurrentMixVolume(), channel_resource.GetLastMixVolume(),
|
||||
dsp_state, static_cast<u32>(dest_mix_params.buffer_offset),
|
||||
static_cast<u32>(dest_mix_params.buffer_count),
|
||||
worker_params.mix_buffer_count + static_cast<u32>(channel), in_params.node_id);
|
||||
dsp_state, dest_mix_params.buffer_offset, dest_mix_params.buffer_count,
|
||||
worker_params.mix_buffer_count + channel, in_params.node_id);
|
||||
|
||||
// Update last mix volumes
|
||||
channel_resource.UpdateLastMixVolumes();
|
||||
|
@ -158,15 +156,12 @@ void CommandGenerator::GenerateVoiceCommand(ServerVoiceInfo& voice_info) {
|
|||
continue;
|
||||
}
|
||||
|
||||
const auto& mix_info =
|
||||
mix_context.GetInfo(static_cast<u32>(destination_data->GetMixId()));
|
||||
const auto& mix_info = mix_context.GetInfo(destination_data->GetMixId());
|
||||
const auto& dest_mix_params = mix_info.GetInParams();
|
||||
GenerateVoiceMixCommand(
|
||||
destination_data->CurrentMixVolumes(), destination_data->LastMixVolumes(),
|
||||
dsp_state, static_cast<u32>(dest_mix_params.buffer_offset),
|
||||
static_cast<u32>(dest_mix_params.buffer_count),
|
||||
worker_params.mix_buffer_count + static_cast<u32>(channel),
|
||||
in_params.node_id);
|
||||
dsp_state, dest_mix_params.buffer_offset, dest_mix_params.buffer_count,
|
||||
worker_params.mix_buffer_count + channel, in_params.node_id);
|
||||
destination_data->MarkDirty();
|
||||
}
|
||||
}
|
||||
|
@ -224,10 +219,9 @@ void CommandGenerator::GenerateDataSourceCommand(ServerVoiceInfo& voice_info, Vo
|
|||
|
||||
if (depop) {
|
||||
if (in_params.mix_id != AudioCommon::NO_MIX) {
|
||||
auto& mix_info = mix_context.GetInfo(static_cast<u32>(in_params.mix_id));
|
||||
auto& mix_info = mix_context.GetInfo(in_params.mix_id);
|
||||
const auto& mix_in = mix_info.GetInParams();
|
||||
GenerateDepopPrepareCommand(dsp_state, static_cast<u32>(mix_in.buffer_count),
|
||||
static_cast<u32>(mix_in.buffer_offset));
|
||||
GenerateDepopPrepareCommand(dsp_state, mix_in.buffer_count, mix_in.buffer_offset);
|
||||
} else if (in_params.splitter_info_id != AudioCommon::NO_SPLITTER) {
|
||||
s32 index{};
|
||||
while (const auto* destination =
|
||||
|
@ -235,24 +229,23 @@ void CommandGenerator::GenerateDataSourceCommand(ServerVoiceInfo& voice_info, Vo
|
|||
if (!destination->IsConfigured()) {
|
||||
continue;
|
||||
}
|
||||
auto& mix_info = mix_context.GetInfo(static_cast<u32>(destination->GetMixId()));
|
||||
auto& mix_info = mix_context.GetInfo(destination->GetMixId());
|
||||
const auto& mix_in = mix_info.GetInParams();
|
||||
GenerateDepopPrepareCommand(dsp_state, static_cast<u32>(mix_in.buffer_count),
|
||||
static_cast<u32>(mix_in.buffer_offset));
|
||||
GenerateDepopPrepareCommand(dsp_state, mix_in.buffer_count, mix_in.buffer_offset);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
switch (in_params.sample_format) {
|
||||
case SampleFormat::Pcm16:
|
||||
DecodeFromWaveBuffers(voice_info, GetChannelMixBuffer(channel), dsp_state, channel,
|
||||
static_cast<s32>(worker_params.sample_rate),
|
||||
static_cast<s32>(worker_params.sample_count), in_params.node_id);
|
||||
worker_params.sample_rate, worker_params.sample_count,
|
||||
in_params.node_id);
|
||||
break;
|
||||
case SampleFormat::Adpcm:
|
||||
ASSERT(channel == 0 && in_params.channel_count == 1);
|
||||
DecodeFromWaveBuffers(voice_info, GetChannelMixBuffer(0), dsp_state, 0,
|
||||
static_cast<s32>(worker_params.sample_rate),
|
||||
static_cast<s32>(worker_params.sample_count), in_params.node_id);
|
||||
worker_params.sample_rate, worker_params.sample_count,
|
||||
in_params.node_id);
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE_MSG("Unimplemented sample format={}", in_params.sample_format);
|
||||
|
@ -262,7 +255,7 @@ void CommandGenerator::GenerateDataSourceCommand(ServerVoiceInfo& voice_info, Vo
|
|||
|
||||
void CommandGenerator::GenerateBiquadFilterCommandForVoice(ServerVoiceInfo& voice_info,
|
||||
VoiceState& dsp_state,
|
||||
u32 mix_buffer_count, s32 channel) {
|
||||
s32 mix_buffer_count, s32 channel) {
|
||||
for (std::size_t i = 0; i < AudioCommon::MAX_BIQUAD_FILTERS; i++) {
|
||||
const auto& in_params = voice_info.GetInParams();
|
||||
auto& biquad_filter = in_params.biquad_filter[i];
|
||||
|
@ -342,8 +335,8 @@ void CommandGenerator::GenerateDepopForMixBuffersCommand(std::size_t mix_buffer_
|
|||
continue;
|
||||
}
|
||||
|
||||
depop_buffer[i] = ApplyMixDepop(GetMixBuffer(i), depop_buffer[i], delta,
|
||||
static_cast<s32>(worker_params.sample_count));
|
||||
depop_buffer[i] =
|
||||
ApplyMixDepop(GetMixBuffer(i), depop_buffer[i], delta, worker_params.sample_count);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -355,7 +348,7 @@ void CommandGenerator::GenerateEffectCommand(ServerMixInfo& mix_info) {
|
|||
if (index == AudioCommon::NO_EFFECT_ORDER) {
|
||||
break;
|
||||
}
|
||||
auto* info = effect_context.GetInfo(static_cast<u32>(index));
|
||||
auto* info = effect_context.GetInfo(index);
|
||||
const auto type = info->GetType();
|
||||
|
||||
// TODO(ogniK): Finish remaining effects
|
||||
|
@ -384,11 +377,11 @@ void CommandGenerator::GenerateI3dl2ReverbEffectCommand(s32 mix_buffer_offset, E
|
|||
}
|
||||
const auto& params = dynamic_cast<EffectI3dl2Reverb*>(info)->GetParams();
|
||||
const auto channel_count = params.channel_count;
|
||||
for (size_t i = 0; i < channel_count; i++) {
|
||||
for (s32 i = 0; i < channel_count; i++) {
|
||||
// TODO(ogniK): Actually implement reverb
|
||||
if (params.input[i] != params.output[i]) {
|
||||
const auto* input = GetMixBuffer(static_cast<u32>(mix_buffer_offset + params.input[i]));
|
||||
auto* output = GetMixBuffer(static_cast<u32>(mix_buffer_offset + params.output[i]));
|
||||
const auto* input = GetMixBuffer(mix_buffer_offset + params.input[i]);
|
||||
auto* output = GetMixBuffer(mix_buffer_offset + params.output[i]);
|
||||
ApplyMix<1>(output, input, 32768, worker_params.sample_count);
|
||||
}
|
||||
}
|
||||
|
@ -399,14 +392,13 @@ void CommandGenerator::GenerateBiquadFilterEffectCommand(s32 mix_buffer_offset,
|
|||
if (!enabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
const auto& params = dynamic_cast<EffectBiquadFilter*>(info)->GetParams();
|
||||
const auto channel_count = static_cast<u32>(params.channel_count);
|
||||
for (size_t i = 0; i < channel_count; i++) {
|
||||
const auto channel_count = params.channel_count;
|
||||
for (s32 i = 0; i < channel_count; i++) {
|
||||
// TODO(ogniK): Actually implement biquad filter
|
||||
if (params.input[i] != params.output[i]) {
|
||||
const auto* input = GetMixBuffer(static_cast<u32>(mix_buffer_offset + params.input[i]));
|
||||
auto* output = GetMixBuffer(static_cast<u32>(mix_buffer_offset + params.output[i]));
|
||||
const auto* input = GetMixBuffer(mix_buffer_offset + params.input[i]);
|
||||
auto* output = GetMixBuffer(mix_buffer_offset + params.output[i]);
|
||||
ApplyMix<1>(output, input, 32768, worker_params.sample_count);
|
||||
}
|
||||
}
|
||||
|
@ -433,30 +425,26 @@ void CommandGenerator::GenerateAuxCommand(s32 mix_buffer_offset, EffectBase* inf
|
|||
memory.ReadBlock(aux->GetSendInfo(), &send_info, sizeof(AuxInfoDSP));
|
||||
memory.ReadBlock(aux->GetRecvInfo(), &recv_info, sizeof(AuxInfoDSP));
|
||||
|
||||
WriteAuxBuffer(send_info, aux->GetSendBuffer(),
|
||||
static_cast<u32>(params.sample_count),
|
||||
GetMixBuffer(static_cast<u32>(input_index)),
|
||||
worker_params.sample_count, offset, write_count);
|
||||
WriteAuxBuffer(send_info, aux->GetSendBuffer(), params.sample_count,
|
||||
GetMixBuffer(input_index), worker_params.sample_count, offset,
|
||||
write_count);
|
||||
memory.WriteBlock(aux->GetSendInfo(), &send_info, sizeof(AuxInfoDSP));
|
||||
|
||||
const auto samples_read = ReadAuxBuffer(
|
||||
recv_info, aux->GetRecvBuffer(), static_cast<u32>(params.sample_count),
|
||||
GetMixBuffer(static_cast<u32>(output_index)), worker_params.sample_count,
|
||||
offset, write_count);
|
||||
recv_info, aux->GetRecvBuffer(), params.sample_count,
|
||||
GetMixBuffer(output_index), worker_params.sample_count, offset, write_count);
|
||||
memory.WriteBlock(aux->GetRecvInfo(), &recv_info, sizeof(AuxInfoDSP));
|
||||
|
||||
if (samples_read != static_cast<int>(worker_params.sample_count) &&
|
||||
samples_read <= params.sample_count) {
|
||||
std::memset(GetMixBuffer(static_cast<u32>(output_index)), 0,
|
||||
static_cast<size_t>(params.sample_count - samples_read));
|
||||
std::memset(GetMixBuffer(output_index), 0, params.sample_count - samples_read);
|
||||
}
|
||||
} else {
|
||||
AuxInfoDSP empty{};
|
||||
memory.WriteBlock(aux->GetSendInfo(), &empty, sizeof(AuxInfoDSP));
|
||||
memory.WriteBlock(aux->GetRecvInfo(), &empty, sizeof(AuxInfoDSP));
|
||||
if (output_index != input_index) {
|
||||
std::memcpy(GetMixBuffer(static_cast<u32>(output_index)),
|
||||
GetMixBuffer(static_cast<u32>(input_index)),
|
||||
std::memcpy(GetMixBuffer(output_index), GetMixBuffer(input_index),
|
||||
worker_params.sample_count * sizeof(s32));
|
||||
}
|
||||
}
|
||||
|
@ -470,8 +458,7 @@ ServerSplitterDestinationData* CommandGenerator::GetDestinationData(s32 splitter
|
|||
if (splitter_id == AudioCommon::NO_SPLITTER) {
|
||||
return nullptr;
|
||||
}
|
||||
return splitter_context.GetDestinationData(static_cast<u32>(splitter_id),
|
||||
static_cast<u32>(index));
|
||||
return splitter_context.GetDestinationData(splitter_id, index);
|
||||
}
|
||||
|
||||
s32 CommandGenerator::WriteAuxBuffer(AuxInfoDSP& dsp_info, VAddr send_buffer, u32 max_samples,
|
||||
|
@ -501,7 +488,7 @@ s32 CommandGenerator::WriteAuxBuffer(AuxInfoDSP& dsp_info, VAddr send_buffer, u3
|
|||
if (write_count != 0) {
|
||||
dsp_info.write_offset = (dsp_info.write_offset + write_count) % max_samples;
|
||||
}
|
||||
return static_cast<s32>(sample_count);
|
||||
return sample_count;
|
||||
}
|
||||
|
||||
s32 CommandGenerator::ReadAuxBuffer(AuxInfoDSP& recv_info, VAddr recv_buffer, u32 max_samples,
|
||||
|
@ -531,7 +518,7 @@ s32 CommandGenerator::ReadAuxBuffer(AuxInfoDSP& recv_info, VAddr recv_buffer, u3
|
|||
if (read_count != 0) {
|
||||
recv_info.read_offset = (recv_info.read_offset + read_count) % max_samples;
|
||||
}
|
||||
return static_cast<s32>(sample_count);
|
||||
return sample_count;
|
||||
}
|
||||
|
||||
void CommandGenerator::GenerateVolumeRampCommand(float last_volume, float current_volume,
|
||||
|
@ -550,15 +537,15 @@ void CommandGenerator::GenerateVolumeRampCommand(float last_volume, float curren
|
|||
}
|
||||
// Apply generic gain on samples
|
||||
ApplyGain(GetChannelMixBuffer(channel), GetChannelMixBuffer(channel), last, delta,
|
||||
static_cast<s32>(worker_params.sample_count));
|
||||
worker_params.sample_count);
|
||||
}
|
||||
|
||||
void CommandGenerator::GenerateVoiceMixCommand(const MixVolumeBuffer& mix_volumes,
|
||||
const MixVolumeBuffer& last_mix_volumes,
|
||||
VoiceState& dsp_state, u32 mix_buffer_offset,
|
||||
u32 mix_buffer_count, u32 voice_index, s32 node_id) {
|
||||
VoiceState& dsp_state, s32 mix_buffer_offset,
|
||||
s32 mix_buffer_count, s32 voice_index, s32 node_id) {
|
||||
// Loop all our mix buffers
|
||||
for (size_t i = 0; i < mix_buffer_count; i++) {
|
||||
for (s32 i = 0; i < mix_buffer_count; i++) {
|
||||
if (last_mix_volumes[i] != 0.0f || mix_volumes[i] != 0.0f) {
|
||||
const auto delta = static_cast<float>((mix_volumes[i] - last_mix_volumes[i])) /
|
||||
static_cast<float>(worker_params.sample_count);
|
||||
|
@ -571,9 +558,9 @@ void CommandGenerator::GenerateVoiceMixCommand(const MixVolumeBuffer& mix_volume
|
|||
mix_volumes[i]);
|
||||
}
|
||||
|
||||
dsp_state.previous_samples[i] = ApplyMixRamp(
|
||||
GetMixBuffer(mix_buffer_offset + i), GetMixBuffer(voice_index), last_mix_volumes[i],
|
||||
delta, static_cast<s32>(worker_params.sample_count));
|
||||
dsp_state.previous_samples[i] =
|
||||
ApplyMixRamp(GetMixBuffer(mix_buffer_offset + i), GetMixBuffer(voice_index),
|
||||
last_mix_volumes[i], delta, worker_params.sample_count);
|
||||
} else {
|
||||
dsp_state.previous_samples[i] = 0;
|
||||
}
|
||||
|
@ -585,8 +572,7 @@ void CommandGenerator::GenerateSubMixCommand(ServerMixInfo& mix_info) {
|
|||
LOG_DEBUG(Audio, "(DSP_TRACE) GenerateSubMixCommand");
|
||||
}
|
||||
const auto& in_params = mix_info.GetInParams();
|
||||
GenerateDepopForMixBuffersCommand(static_cast<u32>(in_params.buffer_count),
|
||||
static_cast<u32>(in_params.buffer_offset),
|
||||
GenerateDepopForMixBuffersCommand(in_params.buffer_count, in_params.buffer_offset,
|
||||
in_params.sample_rate);
|
||||
|
||||
GenerateEffectCommand(mix_info);
|
||||
|
@ -600,18 +586,18 @@ void CommandGenerator::GenerateMixCommands(ServerMixInfo& mix_info) {
|
|||
}
|
||||
const auto& in_params = mix_info.GetInParams();
|
||||
if (in_params.dest_mix_id != AudioCommon::NO_MIX) {
|
||||
const auto& dest_mix = mix_context.GetInfo(static_cast<u32>(in_params.dest_mix_id));
|
||||
const auto& dest_mix = mix_context.GetInfo(in_params.dest_mix_id);
|
||||
const auto& dest_in_params = dest_mix.GetInParams();
|
||||
|
||||
const auto buffer_count = static_cast<u32>(in_params.buffer_count);
|
||||
const auto buffer_count = in_params.buffer_count;
|
||||
|
||||
for (u32 i = 0; i < buffer_count; i++) {
|
||||
for (u32 j = 0; j < static_cast<u32>(dest_in_params.buffer_count); j++) {
|
||||
for (s32 i = 0; i < buffer_count; i++) {
|
||||
for (s32 j = 0; j < dest_in_params.buffer_count; j++) {
|
||||
const auto mixed_volume = in_params.volume * in_params.mix_volume[i][j];
|
||||
if (mixed_volume != 0.0f) {
|
||||
GenerateMixCommand(static_cast<size_t>(dest_in_params.buffer_offset) + j,
|
||||
static_cast<size_t>(in_params.buffer_offset) + i,
|
||||
mixed_volume, static_cast<s32>(in_params.node_id));
|
||||
GenerateMixCommand(dest_in_params.buffer_offset + j,
|
||||
in_params.buffer_offset + i, mixed_volume,
|
||||
in_params.node_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -622,17 +608,15 @@ void CommandGenerator::GenerateMixCommands(ServerMixInfo& mix_info) {
|
|||
continue;
|
||||
}
|
||||
|
||||
const auto& dest_mix =
|
||||
mix_context.GetInfo(static_cast<u32>(destination_data->GetMixId()));
|
||||
const auto& dest_mix = mix_context.GetInfo(destination_data->GetMixId());
|
||||
const auto& dest_in_params = dest_mix.GetInParams();
|
||||
const auto mix_index = (base - 1) % in_params.buffer_count + in_params.buffer_offset;
|
||||
for (std::size_t i = 0; i < static_cast<std::size_t>(dest_in_params.buffer_count);
|
||||
i++) {
|
||||
const auto mixed_volume = in_params.volume * destination_data->GetMixVolume(i);
|
||||
if (mixed_volume != 0.0f) {
|
||||
GenerateMixCommand(static_cast<size_t>(dest_in_params.buffer_offset) + i,
|
||||
static_cast<size_t>(mix_index), mixed_volume,
|
||||
static_cast<s32>(in_params.node_id));
|
||||
GenerateMixCommand(dest_in_params.buffer_offset + i, mix_index, mixed_volume,
|
||||
in_params.node_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -651,8 +635,7 @@ void CommandGenerator::GenerateMixCommand(std::size_t output_offset, std::size_t
|
|||
auto* output = GetMixBuffer(output_offset);
|
||||
const auto* input = GetMixBuffer(input_offset);
|
||||
|
||||
const auto gain = static_cast<s32>(volume * 32768.0f);
|
||||
|
||||
const s32 gain = static_cast<s32>(volume * 32768.0f);
|
||||
// Mix with loop unrolling
|
||||
if (worker_params.sample_count % 4 == 0) {
|
||||
ApplyMix<4>(output, input, gain, worker_params.sample_count);
|
||||
|
@ -670,8 +653,7 @@ void CommandGenerator::GenerateFinalMixCommand() {
|
|||
auto& mix_info = mix_context.GetFinalMixInfo();
|
||||
const auto& in_params = mix_info.GetInParams();
|
||||
|
||||
GenerateDepopForMixBuffersCommand(static_cast<u32>(in_params.buffer_count),
|
||||
static_cast<u32>(in_params.buffer_offset),
|
||||
GenerateDepopForMixBuffersCommand(in_params.buffer_count, in_params.buffer_offset,
|
||||
in_params.sample_rate);
|
||||
|
||||
GenerateEffectCommand(mix_info);
|
||||
|
@ -685,16 +667,16 @@ void CommandGenerator::GenerateFinalMixCommand() {
|
|||
in_params.node_id, in_params.buffer_offset + i, in_params.buffer_offset + i,
|
||||
in_params.volume);
|
||||
}
|
||||
ApplyGainWithoutDelta(GetMixBuffer(static_cast<size_t>(in_params.buffer_offset + i)),
|
||||
GetMixBuffer(static_cast<size_t>(in_params.buffer_offset + i)), gain,
|
||||
static_cast<s32>(worker_params.sample_count));
|
||||
ApplyGainWithoutDelta(GetMixBuffer(in_params.buffer_offset + i),
|
||||
GetMixBuffer(in_params.buffer_offset + i), gain,
|
||||
worker_params.sample_count);
|
||||
}
|
||||
}
|
||||
|
||||
s32 CommandGenerator::DecodePcm16(ServerVoiceInfo& voice_info, VoiceState& dsp_state,
|
||||
s32 sample_count, s32 channel, std::size_t mix_offset) {
|
||||
const auto& in_params = voice_info.GetInParams();
|
||||
const auto& wave_buffer = in_params.wave_buffer[static_cast<u32>(dsp_state.wave_buffer_index)];
|
||||
const auto& wave_buffer = in_params.wave_buffer[dsp_state.wave_buffer_index];
|
||||
if (wave_buffer.buffer_address == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
@ -707,26 +689,24 @@ s32 CommandGenerator::DecodePcm16(ServerVoiceInfo& voice_info, VoiceState& dsp_s
|
|||
const auto samples_remaining =
|
||||
(wave_buffer.end_sample_offset - wave_buffer.start_sample_offset) - dsp_state.offset;
|
||||
const auto start_offset =
|
||||
static_cast<size_t>((wave_buffer.start_sample_offset + dsp_state.offset) *
|
||||
in_params.channel_count) *
|
||||
((wave_buffer.start_sample_offset + dsp_state.offset) * in_params.channel_count) *
|
||||
sizeof(s16);
|
||||
const auto buffer_pos = wave_buffer.buffer_address + start_offset;
|
||||
const auto samples_processed = std::min(sample_count, samples_remaining);
|
||||
|
||||
if (in_params.channel_count == 1) {
|
||||
std::vector<s16> buffer(static_cast<size_t>(samples_processed));
|
||||
std::vector<s16> buffer(samples_processed);
|
||||
memory.ReadBlock(buffer_pos, buffer.data(), buffer.size() * sizeof(s16));
|
||||
for (std::size_t i = 0; i < buffer.size(); i++) {
|
||||
sample_buffer[mix_offset + i] = buffer[i];
|
||||
}
|
||||
} else {
|
||||
const auto channel_count = in_params.channel_count;
|
||||
std::vector<s16> buffer(static_cast<size_t>(samples_processed * channel_count));
|
||||
std::vector<s16> buffer(samples_processed * channel_count);
|
||||
memory.ReadBlock(buffer_pos, buffer.data(), buffer.size() * sizeof(s16));
|
||||
|
||||
for (std::size_t i = 0; i < static_cast<std::size_t>(samples_processed); i++) {
|
||||
sample_buffer[mix_offset + i] =
|
||||
buffer[i * static_cast<u32>(channel_count) + static_cast<u32>(channel)];
|
||||
sample_buffer[mix_offset + i] = buffer[i * channel_count + channel];
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -736,7 +716,7 @@ s32 CommandGenerator::DecodePcm16(ServerVoiceInfo& voice_info, VoiceState& dsp_s
|
|||
s32 CommandGenerator::DecodeAdpcm(ServerVoiceInfo& voice_info, VoiceState& dsp_state,
|
||||
s32 sample_count, s32 channel, std::size_t mix_offset) {
|
||||
const auto& in_params = voice_info.GetInParams();
|
||||
const auto& wave_buffer = in_params.wave_buffer[static_cast<u32>(dsp_state.wave_buffer_index)];
|
||||
const auto& wave_buffer = in_params.wave_buffer[dsp_state.wave_buffer_index];
|
||||
if (wave_buffer.buffer_address == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
@ -756,7 +736,7 @@ s32 CommandGenerator::DecodeAdpcm(ServerVoiceInfo& voice_info, VoiceState& dsp_s
|
|||
constexpr std::size_t SAMPLES_PER_FRAME = 14;
|
||||
|
||||
auto frame_header = dsp_state.context.header;
|
||||
auto idx = static_cast<size_t>((frame_header >> 4) & 0xf);
|
||||
s32 idx = (frame_header >> 4) & 0xf;
|
||||
s32 scale = frame_header & 0xf;
|
||||
s16 yn1 = dsp_state.context.yn1;
|
||||
s16 yn2 = dsp_state.context.yn2;
|
||||
|
@ -773,9 +753,8 @@ s32 CommandGenerator::DecodeAdpcm(ServerVoiceInfo& voice_info, VoiceState& dsp_s
|
|||
const auto samples_processed = std::min(sample_count, samples_remaining);
|
||||
const auto sample_pos = wave_buffer.start_sample_offset + dsp_state.offset;
|
||||
|
||||
const auto samples_remaining_in_frame = static_cast<u32>(sample_pos) % SAMPLES_PER_FRAME;
|
||||
auto position_in_frame =
|
||||
((static_cast<u32>(sample_pos) / SAMPLES_PER_FRAME) * NIBBLES_PER_SAMPLE) +
|
||||
const auto samples_remaining_in_frame = sample_pos % SAMPLES_PER_FRAME;
|
||||
auto position_in_frame = ((sample_pos / SAMPLES_PER_FRAME) * NIBBLES_PER_SAMPLE) +
|
||||
samples_remaining_in_frame + (samples_remaining_in_frame != 0 ? 2 : 0);
|
||||
|
||||
const auto decode_sample = [&](const int nibble) -> s16 {
|
||||
|
@ -795,7 +774,7 @@ s32 CommandGenerator::DecodeAdpcm(ServerVoiceInfo& voice_info, VoiceState& dsp_s
|
|||
|
||||
std::size_t buffer_offset{};
|
||||
std::vector<u8> buffer(
|
||||
std::max((static_cast<u32>(samples_processed) / FRAME_LEN) * SAMPLES_PER_FRAME, FRAME_LEN));
|
||||
std::max((samples_processed / FRAME_LEN) * SAMPLES_PER_FRAME, FRAME_LEN));
|
||||
memory.ReadBlock(wave_buffer.buffer_address + (position_in_frame / 2), buffer.data(),
|
||||
buffer.size());
|
||||
std::size_t cur_mix_offset = mix_offset;
|
||||
|
@ -805,7 +784,7 @@ s32 CommandGenerator::DecodeAdpcm(ServerVoiceInfo& voice_info, VoiceState& dsp_s
|
|||
if (position_in_frame % NIBBLES_PER_SAMPLE == 0) {
|
||||
// Read header
|
||||
frame_header = buffer[buffer_offset++];
|
||||
idx = static_cast<size_t>((frame_header >> 4) & 0xf);
|
||||
idx = (frame_header >> 4) & 0xf;
|
||||
scale = frame_header & 0xf;
|
||||
coef1 = coeffs[idx * 2];
|
||||
coef2 = coeffs[idx * 2 + 1];
|
||||
|
@ -815,8 +794,8 @@ s32 CommandGenerator::DecodeAdpcm(ServerVoiceInfo& voice_info, VoiceState& dsp_s
|
|||
if (remaining_samples >= static_cast<int>(SAMPLES_PER_FRAME)) {
|
||||
for (std::size_t i = 0; i < SAMPLES_PER_FRAME / 2; i++) {
|
||||
// Sample 1
|
||||
const s32 s0 = SIGNED_NIBBLES[static_cast<u32>(buffer[buffer_offset] >> 4)];
|
||||
const s32 s1 = SIGNED_NIBBLES[static_cast<u32>(buffer[buffer_offset++] & 0xf)];
|
||||
const s32 s0 = SIGNED_NIBBLES[buffer[buffer_offset] >> 4];
|
||||
const s32 s1 = SIGNED_NIBBLES[buffer[buffer_offset++] & 0xf];
|
||||
const s16 sample_1 = decode_sample(s0);
|
||||
const s16 sample_2 = decode_sample(s1);
|
||||
sample_buffer[cur_mix_offset++] = sample_1;
|
||||
|
@ -828,14 +807,14 @@ s32 CommandGenerator::DecodeAdpcm(ServerVoiceInfo& voice_info, VoiceState& dsp_s
|
|||
}
|
||||
}
|
||||
// Decode mid frame
|
||||
auto current_nibble = static_cast<s32>(buffer[buffer_offset]);
|
||||
if ((position_in_frame++ & 1) != 0) {
|
||||
s32 current_nibble = buffer[buffer_offset];
|
||||
if (position_in_frame++ & 0x1) {
|
||||
current_nibble &= 0xf;
|
||||
buffer_offset++;
|
||||
} else {
|
||||
current_nibble >>= 4;
|
||||
}
|
||||
const s16 sample = decode_sample(SIGNED_NIBBLES[static_cast<u32>(current_nibble)]);
|
||||
const s16 sample = decode_sample(SIGNED_NIBBLES[current_nibble]);
|
||||
sample_buffer[cur_mix_offset++] = sample;
|
||||
remaining_samples--;
|
||||
}
|
||||
|
@ -856,7 +835,7 @@ const s32* CommandGenerator::GetMixBuffer(std::size_t index) const {
|
|||
}
|
||||
|
||||
std::size_t CommandGenerator::GetMixChannelBufferOffset(s32 channel) const {
|
||||
return worker_params.mix_buffer_count + static_cast<u32>(channel);
|
||||
return worker_params.mix_buffer_count + channel;
|
||||
}
|
||||
|
||||
std::size_t CommandGenerator::GetTotalMixBufferCount() const {
|
||||
|
@ -864,11 +843,11 @@ std::size_t CommandGenerator::GetTotalMixBufferCount() const {
|
|||
}
|
||||
|
||||
s32* CommandGenerator::GetChannelMixBuffer(s32 channel) {
|
||||
return GetMixBuffer(worker_params.mix_buffer_count + static_cast<u32>(channel));
|
||||
return GetMixBuffer(worker_params.mix_buffer_count + channel);
|
||||
}
|
||||
|
||||
const s32* CommandGenerator::GetChannelMixBuffer(s32 channel) const {
|
||||
return GetMixBuffer(worker_params.mix_buffer_count + static_cast<u32>(channel));
|
||||
return GetMixBuffer(worker_params.mix_buffer_count + channel);
|
||||
}
|
||||
|
||||
void CommandGenerator::DecodeFromWaveBuffers(ServerVoiceInfo& voice_info, s32* output,
|
||||
|
@ -916,10 +895,9 @@ void CommandGenerator::DecodeFromWaveBuffers(ServerVoiceInfo& voice_info, s32* o
|
|||
|
||||
s32 samples_read{};
|
||||
while (samples_read < samples_to_read) {
|
||||
const auto& wave_buffer =
|
||||
in_params.wave_buffer[static_cast<u32>(dsp_state.wave_buffer_index)];
|
||||
const auto& wave_buffer = in_params.wave_buffer[dsp_state.wave_buffer_index];
|
||||
// No more data can be read
|
||||
if (!dsp_state.is_wave_buffer_valid[static_cast<u32>(dsp_state.wave_buffer_index)]) {
|
||||
if (!dsp_state.is_wave_buffer_valid[dsp_state.wave_buffer_index]) {
|
||||
is_buffer_completed = true;
|
||||
break;
|
||||
}
|
||||
|
@ -943,7 +921,7 @@ void CommandGenerator::DecodeFromWaveBuffers(ServerVoiceInfo& voice_info, s32* o
|
|||
UNREACHABLE_MSG("Unimplemented sample format={}", in_params.sample_format);
|
||||
}
|
||||
|
||||
temp_mix_offset += static_cast<size_t>(samples_decoded);
|
||||
temp_mix_offset += samples_decoded;
|
||||
samples_read += samples_decoded;
|
||||
dsp_state.offset += samples_decoded;
|
||||
dsp_state.played_sample_count += samples_decoded;
|
||||
|
@ -966,12 +944,10 @@ void CommandGenerator::DecodeFromWaveBuffers(ServerVoiceInfo& voice_info, s32* o
|
|||
} else {
|
||||
|
||||
// Update our wave buffer states
|
||||
dsp_state.is_wave_buffer_valid[static_cast<u32>(dsp_state.wave_buffer_index)] =
|
||||
false;
|
||||
dsp_state.is_wave_buffer_valid[dsp_state.wave_buffer_index] = false;
|
||||
dsp_state.wave_buffer_consumed++;
|
||||
dsp_state.wave_buffer_index =
|
||||
static_cast<u32>(dsp_state.wave_buffer_index + 1) %
|
||||
AudioCommon::MAX_WAVE_BUFFERS;
|
||||
(dsp_state.wave_buffer_index + 1) % AudioCommon::MAX_WAVE_BUFFERS;
|
||||
if (wave_buffer.end_of_stream) {
|
||||
dsp_state.played_sample_count = 0;
|
||||
}
|
||||
|
@ -981,20 +957,16 @@ void CommandGenerator::DecodeFromWaveBuffers(ServerVoiceInfo& voice_info, s32* o
|
|||
|
||||
if (in_params.behavior_flags.is_pitch_and_src_skipped.Value()) {
|
||||
// No need to resample
|
||||
std::memcpy(output, sample_buffer.data(),
|
||||
static_cast<size_t>(samples_read) * sizeof(s32));
|
||||
std::memcpy(output, sample_buffer.data(), samples_read * sizeof(s32));
|
||||
} else {
|
||||
{
|
||||
const auto begin = sample_buffer.begin() + static_cast<ptrdiff_t>(temp_mix_offset);
|
||||
const auto end = begin + (samples_to_read - samples_read);
|
||||
std::fill(begin, end, 0);
|
||||
}
|
||||
std::fill(sample_buffer.begin() + temp_mix_offset,
|
||||
sample_buffer.begin() + temp_mix_offset + (samples_to_read - samples_read),
|
||||
0);
|
||||
AudioCore::Resample(output, sample_buffer.data(), resample_rate, dsp_state.fraction,
|
||||
static_cast<size_t>(samples_to_output));
|
||||
samples_to_output);
|
||||
// Resample
|
||||
for (std::size_t i = 0; i < AudioCommon::MAX_SAMPLE_HISTORY; i++) {
|
||||
dsp_state.sample_history[i] =
|
||||
sample_buffer[static_cast<size_t>(samples_to_read) + i];
|
||||
dsp_state.sample_history[i] = sample_buffer[samples_to_read + i];
|
||||
}
|
||||
}
|
||||
output += samples_to_output;
|
||||
|
|
|
@ -50,12 +50,12 @@ public:
|
|||
private:
|
||||
void GenerateDataSourceCommand(ServerVoiceInfo& voice_info, VoiceState& dsp_state, s32 channel);
|
||||
void GenerateBiquadFilterCommandForVoice(ServerVoiceInfo& voice_info, VoiceState& dsp_state,
|
||||
u32 mix_buffer_count, s32 channel);
|
||||
s32 mix_buffer_count, s32 channel);
|
||||
void GenerateVolumeRampCommand(float last_volume, float current_volume, s32 channel,
|
||||
s32 node_id);
|
||||
void GenerateVoiceMixCommand(const MixVolumeBuffer& mix_volumes,
|
||||
const MixVolumeBuffer& last_mix_volumes, VoiceState& dsp_state,
|
||||
u32 mix_buffer_offset, u32 mix_buffer_count, u32 voice_index,
|
||||
s32 mix_buffer_offset, s32 mix_buffer_count, s32 voice_index,
|
||||
s32 node_id);
|
||||
void GenerateSubMixCommand(ServerMixInfo& mix_info);
|
||||
void GenerateMixCommands(ServerMixInfo& mix_info);
|
||||
|
|
|
@ -202,7 +202,7 @@ long CubebSinkStream::DataCallback(cubeb_stream* stream, void* user_data, const
|
|||
}
|
||||
|
||||
const std::size_t num_channels = impl->GetNumChannels();
|
||||
const std::size_t samples_to_write = num_channels * static_cast<u64>(num_frames);
|
||||
const std::size_t samples_to_write = num_channels * num_frames;
|
||||
std::size_t samples_written;
|
||||
|
||||
/*
|
||||
|
|
|
@ -27,7 +27,7 @@ private:
|
|||
std::vector<SinkStreamPtr> sink_streams;
|
||||
|
||||
#ifdef _WIN32
|
||||
s32 com_init_result = 0;
|
||||
u32 com_init_result = 0;
|
||||
#endif
|
||||
};
|
||||
|
||||
|
|
|
@ -350,7 +350,7 @@ ResultCode InfoUpdater::UpdateMixes(MixContext& mix_context, std::size_t mix_buf
|
|||
std::size_t total_buffer_count{};
|
||||
for (std::size_t i = 0; i < mix_count; i++) {
|
||||
const auto& in = mix_in_params[i];
|
||||
total_buffer_count += static_cast<size_t>(in.buffer_count);
|
||||
total_buffer_count += in.buffer_count;
|
||||
if (static_cast<std::size_t>(in.dest_mix_id) > mix_count &&
|
||||
in.dest_mix_id != AudioCommon::NO_MIX && in.mix_id != AudioCommon::FINAL_MIX) {
|
||||
LOG_ERROR(
|
||||
|
@ -379,7 +379,7 @@ ResultCode InfoUpdater::UpdateMixes(MixContext& mix_context, std::size_t mix_buf
|
|||
const auto& mix_in = mix_in_params[i];
|
||||
std::size_t target_mix{};
|
||||
if (behavior_info.IsMixInParameterDirtyOnlyUpdateSupported()) {
|
||||
target_mix = static_cast<size_t>(mix_in.mix_id);
|
||||
target_mix = mix_in.mix_id;
|
||||
} else {
|
||||
// Non dirty supported games just use i instead of the actual mix_id
|
||||
target_mix = i;
|
||||
|
|
|
@ -62,7 +62,7 @@ void MixContext::UpdateDistancesFromFinalMix() {
|
|||
distance_to_final_mix = AudioCommon::NO_FINAL_MIX;
|
||||
break;
|
||||
} else {
|
||||
const auto& dest_mix = GetInfo(static_cast<u32>(mix_id));
|
||||
const auto& dest_mix = GetInfo(mix_id);
|
||||
const auto dest_mix_distance = dest_mix.GetInParams().final_mix_distance;
|
||||
|
||||
if (dest_mix_distance == AudioCommon::NO_FINAL_MIX) {
|
||||
|
@ -129,7 +129,7 @@ bool MixContext::TsortInfo(SplitterContext& splitter_context) {
|
|||
std::size_t info_id{};
|
||||
for (auto itr = sorted_list.rbegin(); itr != sorted_list.rend(); ++itr) {
|
||||
// Set our sorted info
|
||||
sorted_info[info_id++] = &GetInfo(static_cast<u32>(*itr));
|
||||
sorted_info[info_id++] = &GetInfo(*itr);
|
||||
}
|
||||
|
||||
// Calculate the mix buffer offset
|
||||
|
@ -218,8 +218,7 @@ bool ServerMixInfo::Update(EdgeMatrix& edge_matrix, const MixInfo::InParams& mix
|
|||
for (std::size_t i = 0; i < effect_count; i++) {
|
||||
auto* effect_info = effect_context.GetInfo(i);
|
||||
if (effect_info->GetMixID() == in_params.mix_id) {
|
||||
const auto processing_order = static_cast<u32>(effect_info->GetProcessingOrder());
|
||||
effect_processing_order[processing_order] = static_cast<s32>(i);
|
||||
effect_processing_order[effect_info->GetProcessingOrder()] = static_cast<s32>(i);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -266,7 +265,7 @@ bool ServerMixInfo::UpdateConnection(EdgeMatrix& edge_matrix, const MixInfo::InP
|
|||
if (in_params.dest_mix_id == mix_in.dest_mix_id &&
|
||||
in_params.splitter_id == mix_in.splitter_id &&
|
||||
((in_params.splitter_id == AudioCommon::NO_SPLITTER) ||
|
||||
!splitter_context.GetInfo(static_cast<u32>(in_params.splitter_id)).HasNewConnection())) {
|
||||
!splitter_context.GetInfo(in_params.splitter_id).HasNewConnection())) {
|
||||
return false;
|
||||
}
|
||||
// Remove current edges for mix id
|
||||
|
@ -276,11 +275,11 @@ bool ServerMixInfo::UpdateConnection(EdgeMatrix& edge_matrix, const MixInfo::InP
|
|||
edge_matrix.Connect(in_params.mix_id, mix_in.dest_mix_id);
|
||||
} else if (mix_in.splitter_id != AudioCommon::NO_SPLITTER) {
|
||||
// Recurse our splitter linked and set our edges
|
||||
auto& splitter_info = splitter_context.GetInfo(static_cast<u32>(mix_in.splitter_id));
|
||||
const auto length = static_cast<size_t>(splitter_info.GetLength());
|
||||
for (size_t i = 0; i < length; i++) {
|
||||
auto& splitter_info = splitter_context.GetInfo(mix_in.splitter_id);
|
||||
const auto length = splitter_info.GetLength();
|
||||
for (s32 i = 0; i < length; i++) {
|
||||
const auto* splitter_destination =
|
||||
splitter_context.GetDestinationData(static_cast<u32>(mix_in.splitter_id), i);
|
||||
splitter_context.GetDestinationData(mix_in.splitter_id, i);
|
||||
if (splitter_destination == nullptr) {
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -23,9 +23,8 @@ bool SinkContext::InUse() const {
|
|||
}
|
||||
|
||||
std::vector<u8> SinkContext::OutputBuffers() const {
|
||||
const auto output_use_count = static_cast<size_t>(use_count);
|
||||
std::vector<u8> buffer_ret(output_use_count);
|
||||
std::memcpy(buffer_ret.data(), buffers.data(), output_use_count);
|
||||
std::vector<u8> buffer_ret(use_count);
|
||||
std::memcpy(buffer_ret.data(), buffers.data(), use_count);
|
||||
return buffer_ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -109,7 +109,7 @@ std::size_t ServerSplitterInfo::Update(SplitterInfo::InInfoPrams& header) {
|
|||
new_connection = true;
|
||||
// We need to update the size here due to the splitter bug being present and providing an
|
||||
// incorrect size. We're suppose to also update the header here but we just ignore and continue
|
||||
return (sizeof(s32_le) * static_cast<size_t>(header.length - 1)) + (sizeof(s32_le) * 3);
|
||||
return (sizeof(s32_le) * (header.length - 1)) + (sizeof(s32_le) * 3);
|
||||
}
|
||||
|
||||
ServerSplitterDestinationData* ServerSplitterInfo::GetHead() {
|
||||
|
@ -306,14 +306,13 @@ bool SplitterContext::UpdateInfo(const std::vector<u8>& input, std::size_t& inpu
|
|||
break;
|
||||
}
|
||||
|
||||
const auto send_id = static_cast<std::size_t>(header.send_id);
|
||||
if (header.send_id < 0 || send_id > info_count) {
|
||||
if (header.send_id < 0 || static_cast<std::size_t>(header.send_id) > info_count) {
|
||||
LOG_ERROR(Audio, "Bad splitter data id");
|
||||
break;
|
||||
}
|
||||
|
||||
UpdateOffsets(sizeof(SplitterInfo::InInfoPrams));
|
||||
auto& info = GetInfo(send_id);
|
||||
auto& info = GetInfo(header.send_id);
|
||||
if (!RecomposeDestination(info, header, input, input_offset)) {
|
||||
LOG_ERROR(Audio, "Failed to recompose destination for splitter!");
|
||||
return false;
|
||||
|
@ -349,12 +348,11 @@ bool SplitterContext::UpdateData(const std::vector<u8>& input, std::size_t& inpu
|
|||
break;
|
||||
}
|
||||
|
||||
const auto splitter_id = static_cast<std::size_t>(header.splitter_id);
|
||||
if (header.splitter_id < 0 || splitter_id > data_count) {
|
||||
if (header.splitter_id < 0 || static_cast<std::size_t>(header.splitter_id) > data_count) {
|
||||
LOG_ERROR(Audio, "Bad splitter data id");
|
||||
break;
|
||||
}
|
||||
GetData(splitter_id).Update(header);
|
||||
GetData(header.splitter_id).Update(header);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -388,9 +386,9 @@ bool SplitterContext::RecomposeDestination(ServerSplitterInfo& info,
|
|||
return true;
|
||||
}
|
||||
|
||||
auto* start_head = &GetData(static_cast<u32>(header.resource_id_base));
|
||||
auto* start_head = &GetData(header.resource_id_base);
|
||||
current_head = start_head;
|
||||
std::vector<s32_le> resource_ids(static_cast<size_t>(size - 1));
|
||||
std::vector<s32_le> resource_ids(size - 1);
|
||||
if (!AudioCommon::CanConsumeBuffer(input.size(), input_offset,
|
||||
resource_ids.size() * sizeof(s32_le))) {
|
||||
LOG_ERROR(Audio, "Buffer is an invalid size!");
|
||||
|
@ -399,8 +397,8 @@ bool SplitterContext::RecomposeDestination(ServerSplitterInfo& info,
|
|||
std::memcpy(resource_ids.data(), input.data() + input_offset,
|
||||
resource_ids.size() * sizeof(s32_le));
|
||||
|
||||
for (const auto resource_id : resource_ids) {
|
||||
auto* head = &GetData(static_cast<u32>(resource_id));
|
||||
for (auto resource_id : resource_ids) {
|
||||
auto* head = &GetData(resource_id);
|
||||
current_head->SetNextDestination(head);
|
||||
current_head = head;
|
||||
}
|
||||
|
@ -446,7 +444,7 @@ bool NodeStates::DepthFirstSearch(EdgeMatrix& edge_matrix) {
|
|||
const auto node_id = static_cast<s32>(i);
|
||||
|
||||
// If we don't have a state, send to our index stack for work
|
||||
if (GetState(i) == State::NoState) {
|
||||
if (GetState(i) == NodeStates::State::NoState) {
|
||||
index_stack.push(node_id);
|
||||
}
|
||||
|
||||
|
@ -455,19 +453,19 @@ bool NodeStates::DepthFirstSearch(EdgeMatrix& edge_matrix) {
|
|||
// Get the current node
|
||||
const auto current_stack_index = index_stack.top();
|
||||
// Check if we've seen the node yet
|
||||
const auto index_state = GetState(static_cast<u32>(current_stack_index));
|
||||
if (index_state == State::NoState) {
|
||||
const auto index_state = GetState(current_stack_index);
|
||||
if (index_state == NodeStates::State::NoState) {
|
||||
// Mark the node as seen
|
||||
UpdateState(State::InFound, static_cast<u32>(current_stack_index));
|
||||
} else if (index_state == State::InFound) {
|
||||
UpdateState(NodeStates::State::InFound, current_stack_index);
|
||||
} else if (index_state == NodeStates::State::InFound) {
|
||||
// We've seen this node before, mark it as completed
|
||||
UpdateState(State::InCompleted, static_cast<u32>(current_stack_index));
|
||||
UpdateState(NodeStates::State::InCompleted, current_stack_index);
|
||||
// Update our index list
|
||||
PushTsortResult(current_stack_index);
|
||||
// Pop the stack
|
||||
index_stack.pop();
|
||||
continue;
|
||||
} else if (index_state == State::InCompleted) {
|
||||
} else if (index_state == NodeStates::State::InCompleted) {
|
||||
// If our node is already sorted, clear it
|
||||
index_stack.pop();
|
||||
continue;
|
||||
|
@ -481,11 +479,11 @@ bool NodeStates::DepthFirstSearch(EdgeMatrix& edge_matrix) {
|
|||
}
|
||||
|
||||
// Check if our node exists
|
||||
const auto node_state = GetState(static_cast<u32>(j));
|
||||
if (node_state == State::NoState) {
|
||||
const auto node_state = GetState(j);
|
||||
if (node_state == NodeStates::State::NoState) {
|
||||
// Add more work
|
||||
index_stack.push(j);
|
||||
} else if (node_state == State::InFound) {
|
||||
} else if (node_state == NodeStates::State::InFound) {
|
||||
UNREACHABLE_MSG("Node start marked as found");
|
||||
ResetState();
|
||||
return false;
|
||||
|
@ -509,17 +507,17 @@ void NodeStates::ResetState() {
|
|||
}
|
||||
}
|
||||
|
||||
void NodeStates::UpdateState(State state, std::size_t i) {
|
||||
void NodeStates::UpdateState(NodeStates::State state, std::size_t i) {
|
||||
switch (state) {
|
||||
case State::NoState:
|
||||
case NodeStates::State::NoState:
|
||||
was_node_found[i] = false;
|
||||
was_node_completed[i] = false;
|
||||
break;
|
||||
case State::InFound:
|
||||
case NodeStates::State::InFound:
|
||||
was_node_found[i] = true;
|
||||
was_node_completed[i] = false;
|
||||
break;
|
||||
case State::InCompleted:
|
||||
case NodeStates::State::InCompleted:
|
||||
was_node_found[i] = false;
|
||||
was_node_completed[i] = true;
|
||||
break;
|
||||
|
@ -530,13 +528,13 @@ NodeStates::State NodeStates::GetState(std::size_t i) {
|
|||
ASSERT(i < node_count);
|
||||
if (was_node_found[i]) {
|
||||
// If our node exists in our found list
|
||||
return State::InFound;
|
||||
return NodeStates::State::InFound;
|
||||
} else if (was_node_completed[i]) {
|
||||
// If node is in the completed list
|
||||
return State::InCompleted;
|
||||
return NodeStates::State::InCompleted;
|
||||
} else {
|
||||
// If in neither
|
||||
return State::NoState;
|
||||
return NodeStates::State::NoState;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -603,16 +601,16 @@ std::size_t EdgeMatrix::GetNodeCount() const {
|
|||
|
||||
void EdgeMatrix::SetState(s32 a, s32 b, bool state) {
|
||||
ASSERT(InRange(a, b));
|
||||
edge_matrix.at(static_cast<u32>(a) * node_count + static_cast<u32>(b)) = state;
|
||||
edge_matrix.at(a * node_count + b) = state;
|
||||
}
|
||||
|
||||
bool EdgeMatrix::GetState(s32 a, s32 b) {
|
||||
ASSERT(InRange(a, b));
|
||||
return edge_matrix.at(static_cast<u32>(a) * node_count + static_cast<u32>(b));
|
||||
return edge_matrix.at(a * node_count + b);
|
||||
}
|
||||
|
||||
bool EdgeMatrix::InRange(s32 a, s32 b) const {
|
||||
const std::size_t pos = static_cast<u32>(a) * node_count + static_cast<u32>(b);
|
||||
const std::size_t pos = a * node_count + b;
|
||||
return pos < (node_count * node_count);
|
||||
}
|
||||
|
||||
|
|
|
@ -5,16 +5,8 @@
|
|||
#pragma once
|
||||
|
||||
#include <cstddef>
|
||||
#include "common/common_types.h"
|
||||
|
||||
#if defined(__GNUC__)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wsign-conversion"
|
||||
#endif
|
||||
#include <SoundTouch.h>
|
||||
#if defined(__GNUC__)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace AudioCore {
|
||||
|
||||
|
|
|
@ -98,7 +98,7 @@ void ServerVoiceInfo::UpdateParameters(const VoiceInfo::InParams& voice_in,
|
|||
BehaviorInfo& behavior_info) {
|
||||
in_params.in_use = voice_in.is_in_use;
|
||||
in_params.id = voice_in.id;
|
||||
in_params.node_id = static_cast<s32>(voice_in.node_id);
|
||||
in_params.node_id = voice_in.node_id;
|
||||
in_params.last_playstate = in_params.current_playstate;
|
||||
switch (voice_in.play_state) {
|
||||
case PlayState::Paused:
|
||||
|
@ -220,10 +220,8 @@ void ServerVoiceInfo::UpdateWaveBuffer(ServerWaveBuffer& out_wavebuffer,
|
|||
if (sample_format == SampleFormat::Pcm16) {
|
||||
const auto buffer_size = in_wave_buffer.buffer_size;
|
||||
if (in_wave_buffer.start_sample_offset < 0 || in_wave_buffer.end_sample_offset < 0 ||
|
||||
(buffer_size <
|
||||
(sizeof(s16) * static_cast<u32>(in_wave_buffer.start_sample_offset))) ||
|
||||
(buffer_size <
|
||||
(sizeof(s16) * static_cast<u32>(in_wave_buffer.end_sample_offset)))) {
|
||||
(buffer_size < (sizeof(s16) * in_wave_buffer.start_sample_offset)) ||
|
||||
(buffer_size < (sizeof(s16) * in_wave_buffer.end_sample_offset))) {
|
||||
// TODO(ogniK): Write error info
|
||||
return;
|
||||
}
|
||||
|
@ -256,8 +254,8 @@ void ServerVoiceInfo::WriteOutStatus(
|
|||
voice_out.played_sample_count = 0;
|
||||
voice_out.voice_dropped = false;
|
||||
} else if (!in_params.is_new) {
|
||||
voice_out.wave_buffer_consumed = static_cast<u32>(voice_states[0]->wave_buffer_consumed);
|
||||
voice_out.played_sample_count = static_cast<u64>(voice_states[0]->played_sample_count);
|
||||
voice_out.wave_buffer_consumed = voice_states[0]->wave_buffer_consumed;
|
||||
voice_out.played_sample_count = voice_states[0]->played_sample_count;
|
||||
voice_out.voice_dropped = in_params.voice_drop_flag;
|
||||
} else {
|
||||
voice_out.wave_buffer_consumed = 0;
|
||||
|
@ -295,8 +293,8 @@ bool ServerVoiceInfo::UpdateForCommandGeneration(VoiceContext& voice_context) {
|
|||
in_params.is_new = false;
|
||||
}
|
||||
|
||||
const auto channel_count = static_cast<size_t>(in_params.channel_count);
|
||||
for (size_t i = 0; i < channel_count; i++) {
|
||||
const s32 channel_count = in_params.channel_count;
|
||||
for (s32 i = 0; i < channel_count; i++) {
|
||||
const auto channel_resource = in_params.voice_channel_resource_id[i];
|
||||
dsp_voice_states[i] =
|
||||
&voice_context.GetDspSharedState(static_cast<std::size_t>(channel_resource));
|
||||
|
@ -305,9 +303,8 @@ bool ServerVoiceInfo::UpdateForCommandGeneration(VoiceContext& voice_context) {
|
|||
}
|
||||
|
||||
void ServerVoiceInfo::ResetResources(VoiceContext& voice_context) {
|
||||
const auto channel_count = static_cast<size_t>(in_params.channel_count);
|
||||
|
||||
for (size_t i = 0; i < channel_count; i++) {
|
||||
const s32 channel_count = in_params.channel_count;
|
||||
for (s32 i = 0; i < channel_count; i++) {
|
||||
const auto channel_resource = in_params.voice_channel_resource_id[i];
|
||||
auto& dsp_state =
|
||||
voice_context.GetDspSharedState(static_cast<std::size_t>(channel_resource));
|
||||
|
@ -328,9 +325,9 @@ bool ServerVoiceInfo::UpdateParametersForCommandGeneration(
|
|||
|
||||
switch (in_params.current_playstate) {
|
||||
case ServerPlayState::Play: {
|
||||
for (size_t i = 0; i < AudioCommon::MAX_WAVE_BUFFERS; i++) {
|
||||
for (std::size_t i = 0; i < AudioCommon::MAX_WAVE_BUFFERS; i++) {
|
||||
if (!in_params.wave_buffer[i].sent_to_dsp) {
|
||||
for (size_t channel = 0; channel < static_cast<size_t>(channel_count); channel++) {
|
||||
for (s32 channel = 0; channel < channel_count; channel++) {
|
||||
dsp_voice_states[channel]->is_wave_buffer_valid[i] = true;
|
||||
}
|
||||
in_params.wave_buffer[i].sent_to_dsp = true;
|
||||
|
@ -347,13 +344,12 @@ bool ServerVoiceInfo::UpdateParametersForCommandGeneration(
|
|||
case ServerPlayState::RequestStop: {
|
||||
for (std::size_t i = 0; i < AudioCommon::MAX_WAVE_BUFFERS; i++) {
|
||||
in_params.wave_buffer[i].sent_to_dsp = true;
|
||||
for (std::size_t channel = 0; channel < static_cast<size_t>(channel_count); channel++) {
|
||||
for (s32 channel = 0; channel < channel_count; channel++) {
|
||||
auto* dsp_state = dsp_voice_states[channel];
|
||||
|
||||
if (dsp_state->is_wave_buffer_valid[i]) {
|
||||
dsp_state->wave_buffer_index =
|
||||
static_cast<s32>(static_cast<u32>(dsp_state->wave_buffer_index + 1) %
|
||||
AudioCommon::MAX_WAVE_BUFFERS);
|
||||
(dsp_state->wave_buffer_index + 1) % AudioCommon::MAX_WAVE_BUFFERS;
|
||||
dsp_state->wave_buffer_consumed++;
|
||||
}
|
||||
|
||||
|
@ -361,7 +357,7 @@ bool ServerVoiceInfo::UpdateParametersForCommandGeneration(
|
|||
}
|
||||
}
|
||||
|
||||
for (size_t channel = 0; channel < static_cast<size_t>(channel_count); channel++) {
|
||||
for (s32 channel = 0; channel < channel_count; channel++) {
|
||||
auto* dsp_state = dsp_voice_states[channel];
|
||||
dsp_state->offset = 0;
|
||||
dsp_state->played_sample_count = 0;
|
||||
|
@ -387,16 +383,15 @@ void ServerVoiceInfo::FlushWaveBuffers(
|
|||
auto wave_head = in_params.wave_bufffer_head;
|
||||
|
||||
for (u8 i = 0; i < flush_count; i++) {
|
||||
in_params.wave_buffer[static_cast<u16>(wave_head)].sent_to_dsp = true;
|
||||
for (size_t channel = 0; channel < static_cast<size_t>(channel_count); channel++) {
|
||||
in_params.wave_buffer[wave_head].sent_to_dsp = true;
|
||||
for (s32 channel = 0; channel < channel_count; channel++) {
|
||||
auto* dsp_state = dsp_voice_states[channel];
|
||||
dsp_state->wave_buffer_consumed++;
|
||||
dsp_state->is_wave_buffer_valid[static_cast<u16>(wave_head)] = false;
|
||||
dsp_state->wave_buffer_index = static_cast<s32>(
|
||||
static_cast<u32>(dsp_state->wave_buffer_index + 1) % AudioCommon::MAX_WAVE_BUFFERS);
|
||||
dsp_state->is_wave_buffer_valid[wave_head] = false;
|
||||
dsp_state->wave_buffer_index =
|
||||
(dsp_state->wave_buffer_index + 1) % AudioCommon::MAX_WAVE_BUFFERS;
|
||||
}
|
||||
wave_head =
|
||||
static_cast<s16>(static_cast<u32>(wave_head + 1) % AudioCommon::MAX_WAVE_BUFFERS);
|
||||
wave_head = (wave_head + 1) % AudioCommon::MAX_WAVE_BUFFERS;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -488,7 +483,7 @@ s32 VoiceContext::DecodePcm16(s32* output_buffer, ServerWaveBuffer* wave_buffer,
|
|||
const auto samples_remaining =
|
||||
(wave_buffer->end_sample_offset - wave_buffer->start_sample_offset) - buffer_offset;
|
||||
const auto start_offset = (wave_buffer->start_sample_offset + buffer_offset) * channel_count;
|
||||
const auto buffer_pos = wave_buffer->buffer_address + static_cast<VAddr>(start_offset);
|
||||
const auto buffer_pos = wave_buffer->buffer_address + start_offset;
|
||||
|
||||
s16* buffer_data = reinterpret_cast<s16*>(memory.GetPointer(buffer_pos));
|
||||
|
||||
|
|
|
@ -41,8 +41,8 @@ public:
|
|||
Fiber(const Fiber&) = delete;
|
||||
Fiber& operator=(const Fiber&) = delete;
|
||||
|
||||
Fiber(Fiber&&) = delete;
|
||||
Fiber& operator=(Fiber&&) = delete;
|
||||
Fiber(Fiber&&) = default;
|
||||
Fiber& operator=(Fiber&&) = default;
|
||||
|
||||
/// Yields control from Fiber 'from' to Fiber 'to'
|
||||
/// Fiber 'from' must be the currently running fiber.
|
||||
|
|
|
@ -189,8 +189,7 @@ template <typename T>
|
|||
return {};
|
||||
}
|
||||
last = std::min<std::size_t>(last, vector.size());
|
||||
return std::vector<T>(vector.begin() + static_cast<std::ptrdiff_t>(first),
|
||||
vector.begin() + static_cast<std::ptrdiff_t>(first + last));
|
||||
return std::vector<T>(vector.begin() + first, vector.begin() + first + last);
|
||||
}
|
||||
|
||||
enum class DirectorySeparator {
|
||||
|
|
|
@ -27,7 +27,7 @@ struct Rectangle {
|
|||
if constexpr (std::is_floating_point_v<T>) {
|
||||
return std::abs(right - left);
|
||||
} else {
|
||||
return static_cast<T>(std::abs(static_cast<std::make_signed_t<T>>(right - left)));
|
||||
return std::abs(static_cast<std::make_signed_t<T>>(right - left));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -35,7 +35,7 @@ struct Rectangle {
|
|||
if constexpr (std::is_floating_point_v<T>) {
|
||||
return std::abs(bottom - top);
|
||||
} else {
|
||||
return static_cast<T>(std::abs(static_cast<std::make_signed_t<T>>(bottom - top)));
|
||||
return std::abs(static_cast<std::make_signed_t<T>>(bottom - top));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -320,7 +320,7 @@ private:
|
|||
}
|
||||
|
||||
const auto begin_range = list.begin();
|
||||
const auto end_range = std::next(begin_range, static_cast<std::ptrdiff_t>(shift));
|
||||
const auto end_range = std::next(begin_range, shift);
|
||||
list.splice(list.end(), list, begin_range, end_range);
|
||||
}
|
||||
|
||||
|
|
|
@ -15,14 +15,6 @@ namespace Common {
|
|||
*/
|
||||
class SpinLock {
|
||||
public:
|
||||
SpinLock() = default;
|
||||
|
||||
SpinLock(const SpinLock&) = delete;
|
||||
SpinLock& operator=(const SpinLock&) = delete;
|
||||
|
||||
SpinLock(SpinLock&&) = delete;
|
||||
SpinLock& operator=(SpinLock&&) = delete;
|
||||
|
||||
void lock();
|
||||
void unlock();
|
||||
[[nodiscard]] bool try_lock();
|
||||
|
|
|
@ -504,35 +504,35 @@ bool operator==(const S& p, const swap_struct_t<T, F> v) {
|
|||
template <typename T>
|
||||
struct swap_64_t {
|
||||
static T swap(T x) {
|
||||
return static_cast<T>(Common::swap64(static_cast<u64>(x)));
|
||||
return static_cast<T>(Common::swap64(x));
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct swap_32_t {
|
||||
static T swap(T x) {
|
||||
return static_cast<T>(Common::swap32(static_cast<u32>(x)));
|
||||
return static_cast<T>(Common::swap32(x));
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct swap_16_t {
|
||||
static T swap(T x) {
|
||||
return static_cast<T>(Common::swap16(static_cast<u16>(x)));
|
||||
return static_cast<T>(Common::swap16(x));
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct swap_float_t {
|
||||
static T swap(T x) {
|
||||
return static_cast<T>(Common::swapf(static_cast<float>(x)));
|
||||
return static_cast<T>(Common::swapf(x));
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct swap_double_t {
|
||||
static T swap(T x) {
|
||||
return static_cast<T>(Common::swapd(static_cast<double>(x)));
|
||||
return static_cast<T>(Common::swapd(x));
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ struct ThreadQueueList {
|
|||
}
|
||||
}
|
||||
|
||||
return static_cast<Priority>(-1);
|
||||
return -1;
|
||||
}
|
||||
|
||||
[[nodiscard]] T get_first() const {
|
||||
|
@ -156,7 +156,7 @@ private:
|
|||
void link(Priority priority) {
|
||||
Queue* cur = &queues[priority];
|
||||
|
||||
for (auto i = static_cast<int>(priority - 1); i >= 0; --i) {
|
||||
for (int i = priority - 1; i >= 0; --i) {
|
||||
if (queues[i].next_nonempty != UnlinkedTag()) {
|
||||
cur->next_nonempty = queues[i].next_nonempty;
|
||||
queues[i].next_nonempty = cur;
|
||||
|
|
|
@ -630,9 +630,8 @@ else()
|
|||
-Werror=implicit-fallthrough
|
||||
-Werror=reorder
|
||||
-Werror=sign-compare
|
||||
-Werror=sign-conversion
|
||||
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter>
|
||||
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable>
|
||||
-Werror=unused-but-set-parameter
|
||||
-Werror=unused-but-set-variable
|
||||
-Werror=unused-variable
|
||||
)
|
||||
endif()
|
||||
|
|
|
@ -147,18 +147,10 @@ std::vector<ARM_Interface::BacktraceEntry> ARM_Interface::GetBacktraceFromContex
|
|||
auto fp = ctx.cpu_registers[29];
|
||||
auto lr = ctx.cpu_registers[30];
|
||||
while (true) {
|
||||
out.push_back({
|
||||
.module = "",
|
||||
.address = 0,
|
||||
.original_address = lr,
|
||||
.offset = 0,
|
||||
.name = "",
|
||||
});
|
||||
|
||||
if (fp == 0) {
|
||||
out.push_back({"", 0, lr, 0});
|
||||
if (!fp) {
|
||||
break;
|
||||
}
|
||||
|
||||
lr = memory.Read64(fp + 8) - 4;
|
||||
fp = memory.Read64(fp);
|
||||
}
|
||||
|
@ -211,18 +203,10 @@ std::vector<ARM_Interface::BacktraceEntry> ARM_Interface::GetBacktrace() const {
|
|||
auto fp = GetReg(29);
|
||||
auto lr = GetReg(30);
|
||||
while (true) {
|
||||
out.push_back({
|
||||
.module = "",
|
||||
.address = 0,
|
||||
.original_address = lr,
|
||||
.offset = 0,
|
||||
.name = "",
|
||||
});
|
||||
|
||||
if (fp == 0) {
|
||||
out.push_back({"", 0, lr, 0, ""});
|
||||
if (!fp) {
|
||||
break;
|
||||
}
|
||||
|
||||
lr = memory.Read64(fp + 8) - 4;
|
||||
fp = memory.Read64(fp);
|
||||
}
|
||||
|
|
|
@ -93,14 +93,14 @@ public:
|
|||
* @param index Register index
|
||||
* @return Returns the value in the register
|
||||
*/
|
||||
virtual u64 GetReg(std::size_t index) const = 0;
|
||||
virtual u64 GetReg(int index) const = 0;
|
||||
|
||||
/**
|
||||
* Set an ARM register
|
||||
* @param index Register index
|
||||
* @param value Value to set register to
|
||||
*/
|
||||
virtual void SetReg(std::size_t index, u64 value) = 0;
|
||||
virtual void SetReg(int index, u64 value) = 0;
|
||||
|
||||
/**
|
||||
* Gets the value of a specified vector register.
|
||||
|
@ -108,7 +108,7 @@ public:
|
|||
* @param index The index of the vector register.
|
||||
* @return the value within the vector register.
|
||||
*/
|
||||
virtual u128 GetVectorReg(std::size_t index) const = 0;
|
||||
virtual u128 GetVectorReg(int index) const = 0;
|
||||
|
||||
/**
|
||||
* Sets a given value into a vector register.
|
||||
|
@ -116,7 +116,7 @@ public:
|
|||
* @param index The index of the vector register.
|
||||
* @param value The new value to place in the register.
|
||||
*/
|
||||
virtual void SetVectorReg(std::size_t index, u128 value) = 0;
|
||||
virtual void SetVectorReg(int index, u128 value) = 0;
|
||||
|
||||
/**
|
||||
* Get the current PSTATE register
|
||||
|
|
|
@ -21,8 +21,8 @@ public:
|
|||
CPUInterruptHandler(const CPUInterruptHandler&) = delete;
|
||||
CPUInterruptHandler& operator=(const CPUInterruptHandler&) = delete;
|
||||
|
||||
CPUInterruptHandler(CPUInterruptHandler&&) = delete;
|
||||
CPUInterruptHandler& operator=(CPUInterruptHandler&&) = delete;
|
||||
CPUInterruptHandler(CPUInterruptHandler&&) = default;
|
||||
CPUInterruptHandler& operator=(CPUInterruptHandler&&) = default;
|
||||
|
||||
bool IsInterrupted() const {
|
||||
return is_interrupted;
|
||||
|
|
|
@ -111,7 +111,7 @@ public:
|
|||
}
|
||||
return 0U;
|
||||
}
|
||||
return static_cast<u64>(std::max<s64>(parent.system.CoreTiming().GetDowncount(), 0));
|
||||
return std::max<s64>(parent.system.CoreTiming().GetDowncount(), 0);
|
||||
}
|
||||
|
||||
ARM_Dynarmic_32& parent;
|
||||
|
@ -210,19 +210,19 @@ u64 ARM_Dynarmic_32::GetPC() const {
|
|||
return jit->Regs()[15];
|
||||
}
|
||||
|
||||
u64 ARM_Dynarmic_32::GetReg(std::size_t index) const {
|
||||
u64 ARM_Dynarmic_32::GetReg(int index) const {
|
||||
return jit->Regs()[index];
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::SetReg(std::size_t index, u64 value) {
|
||||
void ARM_Dynarmic_32::SetReg(int index, u64 value) {
|
||||
jit->Regs()[index] = static_cast<u32>(value);
|
||||
}
|
||||
|
||||
u128 ARM_Dynarmic_32::GetVectorReg(std::size_t index) const {
|
||||
u128 ARM_Dynarmic_32::GetVectorReg(int index) const {
|
||||
return {};
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::SetVectorReg(std::size_t index, u128 value) {}
|
||||
void ARM_Dynarmic_32::SetVectorReg(int index, u128 value) {}
|
||||
|
||||
u32 ARM_Dynarmic_32::GetPSTATE() const {
|
||||
return jit->Cpsr();
|
||||
|
|
|
@ -35,10 +35,10 @@ public:
|
|||
|
||||
void SetPC(u64 pc) override;
|
||||
u64 GetPC() const override;
|
||||
u64 GetReg(std::size_t index) const override;
|
||||
void SetReg(std::size_t index, u64 value) override;
|
||||
u128 GetVectorReg(std::size_t index) const override;
|
||||
void SetVectorReg(std::size_t index, u128 value) override;
|
||||
u64 GetReg(int index) const override;
|
||||
void SetReg(int index, u64 value) override;
|
||||
u128 GetVectorReg(int index) const override;
|
||||
void SetVectorReg(int index, u128 value) override;
|
||||
u32 GetPSTATE() const override;
|
||||
void SetPSTATE(u32 pstate) override;
|
||||
void Run() override;
|
||||
|
|
|
@ -148,7 +148,7 @@ public:
|
|||
}
|
||||
return 0U;
|
||||
}
|
||||
return static_cast<u64>(std::max<s64>(parent.system.CoreTiming().GetDowncount(), 0));
|
||||
return std::max<s64>(parent.system.CoreTiming().GetDowncount(), 0);
|
||||
}
|
||||
|
||||
u64 GetCNTPCT() override {
|
||||
|
@ -265,19 +265,19 @@ u64 ARM_Dynarmic_64::GetPC() const {
|
|||
return jit->GetPC();
|
||||
}
|
||||
|
||||
u64 ARM_Dynarmic_64::GetReg(std::size_t index) const {
|
||||
u64 ARM_Dynarmic_64::GetReg(int index) const {
|
||||
return jit->GetRegister(index);
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_64::SetReg(std::size_t index, u64 value) {
|
||||
void ARM_Dynarmic_64::SetReg(int index, u64 value) {
|
||||
jit->SetRegister(index, value);
|
||||
}
|
||||
|
||||
u128 ARM_Dynarmic_64::GetVectorReg(std::size_t index) const {
|
||||
u128 ARM_Dynarmic_64::GetVectorReg(int index) const {
|
||||
return jit->GetVector(index);
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_64::SetVectorReg(std::size_t index, u128 value) {
|
||||
void ARM_Dynarmic_64::SetVectorReg(int index, u128 value) {
|
||||
jit->SetVector(index, value);
|
||||
}
|
||||
|
||||
|
|
|
@ -33,10 +33,10 @@ public:
|
|||
|
||||
void SetPC(u64 pc) override;
|
||||
u64 GetPC() const override;
|
||||
u64 GetReg(std::size_t index) const override;
|
||||
void SetReg(std::size_t index, u64 value) override;
|
||||
u128 GetVectorReg(std::size_t index) const override;
|
||||
void SetVectorReg(std::size_t index, u128 value) override;
|
||||
u64 GetReg(int index) const override;
|
||||
void SetReg(int index, u64 value) override;
|
||||
u128 GetVectorReg(int index) const override;
|
||||
void SetVectorReg(int index, u128 value) override;
|
||||
u32 GetPSTATE() const override;
|
||||
void SetPSTATE(u32 pstate) override;
|
||||
void Run() override;
|
||||
|
|
|
@ -96,35 +96,35 @@ u64 ARM_Unicorn::GetPC() const {
|
|||
return val;
|
||||
}
|
||||
|
||||
u64 ARM_Unicorn::GetReg(std::size_t index) const {
|
||||
u64 ARM_Unicorn::GetReg(int regn) const {
|
||||
u64 val{};
|
||||
auto treg = UC_ARM64_REG_SP;
|
||||
if (index <= 28) {
|
||||
treg = static_cast<uc_arm64_reg>(UC_ARM64_REG_X0 + static_cast<int>(index));
|
||||
} else if (index < 31) {
|
||||
treg = static_cast<uc_arm64_reg>(UC_ARM64_REG_X29 + static_cast<int>(index) - 29);
|
||||
if (regn <= 28) {
|
||||
treg = (uc_arm64_reg)(UC_ARM64_REG_X0 + regn);
|
||||
} else if (regn < 31) {
|
||||
treg = (uc_arm64_reg)(UC_ARM64_REG_X29 + regn - 29);
|
||||
}
|
||||
CHECKED(uc_reg_read(uc, treg, &val));
|
||||
return val;
|
||||
}
|
||||
|
||||
void ARM_Unicorn::SetReg(std::size_t index, u64 value) {
|
||||
void ARM_Unicorn::SetReg(int regn, u64 val) {
|
||||
auto treg = UC_ARM64_REG_SP;
|
||||
if (index <= 28) {
|
||||
treg = static_cast<uc_arm64_reg>(UC_ARM64_REG_X0 + static_cast<int>(index));
|
||||
} else if (index < 31) {
|
||||
treg = static_cast<uc_arm64_reg>(UC_ARM64_REG_X29 + static_cast<int>(index) - 29);
|
||||
if (regn <= 28) {
|
||||
treg = (uc_arm64_reg)(UC_ARM64_REG_X0 + regn);
|
||||
} else if (regn < 31) {
|
||||
treg = (uc_arm64_reg)(UC_ARM64_REG_X29 + regn - 29);
|
||||
}
|
||||
CHECKED(uc_reg_write(uc, treg, &value));
|
||||
CHECKED(uc_reg_write(uc, treg, &val));
|
||||
}
|
||||
|
||||
u128 ARM_Unicorn::GetVectorReg(std::size_t /*index*/) const {
|
||||
u128 ARM_Unicorn::GetVectorReg(int /*index*/) const {
|
||||
UNIMPLEMENTED();
|
||||
static constexpr u128 res{};
|
||||
return res;
|
||||
}
|
||||
|
||||
void ARM_Unicorn::SetVectorReg(std::size_t /*index*/, u128 /*value*/) {
|
||||
void ARM_Unicorn::SetVectorReg(int /*index*/, u128 /*value*/) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
|
@ -217,8 +217,8 @@ void ARM_Unicorn::SaveContext(ThreadContext64& ctx) {
|
|||
CHECKED(uc_reg_read(uc, UC_ARM64_REG_PC, &ctx.pc));
|
||||
CHECKED(uc_reg_read(uc, UC_ARM64_REG_NZCV, &ctx.pstate));
|
||||
|
||||
for (std::size_t i = 0; i < 29; ++i) {
|
||||
uregs[i] = UC_ARM64_REG_X0 + static_cast<int>(i);
|
||||
for (auto i = 0; i < 29; ++i) {
|
||||
uregs[i] = UC_ARM64_REG_X0 + i;
|
||||
tregs[i] = &ctx.cpu_registers[i];
|
||||
}
|
||||
uregs[29] = UC_ARM64_REG_X29;
|
||||
|
@ -228,8 +228,8 @@ void ARM_Unicorn::SaveContext(ThreadContext64& ctx) {
|
|||
|
||||
CHECKED(uc_reg_read_batch(uc, uregs, tregs, 31));
|
||||
|
||||
for (std::size_t i = 0; i < 32; ++i) {
|
||||
uregs[i] = UC_ARM64_REG_Q0 + static_cast<int>(i);
|
||||
for (int i = 0; i < 32; ++i) {
|
||||
uregs[i] = UC_ARM64_REG_Q0 + i;
|
||||
tregs[i] = &ctx.vector_registers[i];
|
||||
}
|
||||
|
||||
|
@ -244,8 +244,8 @@ void ARM_Unicorn::LoadContext(const ThreadContext64& ctx) {
|
|||
CHECKED(uc_reg_write(uc, UC_ARM64_REG_PC, &ctx.pc));
|
||||
CHECKED(uc_reg_write(uc, UC_ARM64_REG_NZCV, &ctx.pstate));
|
||||
|
||||
for (std::size_t i = 0; i < 29; ++i) {
|
||||
uregs[i] = UC_ARM64_REG_X0 + static_cast<int>(i);
|
||||
for (int i = 0; i < 29; ++i) {
|
||||
uregs[i] = UC_ARM64_REG_X0 + i;
|
||||
tregs[i] = (void*)&ctx.cpu_registers[i];
|
||||
}
|
||||
uregs[29] = UC_ARM64_REG_X29;
|
||||
|
@ -255,8 +255,8 @@ void ARM_Unicorn::LoadContext(const ThreadContext64& ctx) {
|
|||
|
||||
CHECKED(uc_reg_write_batch(uc, uregs, tregs, 31));
|
||||
|
||||
for (std::size_t i = 0; i < 32; ++i) {
|
||||
uregs[i] = UC_ARM64_REG_Q0 + static_cast<int>(i);
|
||||
for (auto i = 0; i < 32; ++i) {
|
||||
uregs[i] = UC_ARM64_REG_Q0 + i;
|
||||
tregs[i] = (void*)&ctx.vector_registers[i];
|
||||
}
|
||||
|
||||
|
|
|
@ -26,10 +26,10 @@ public:
|
|||
|
||||
void SetPC(u64 pc) override;
|
||||
u64 GetPC() const override;
|
||||
u64 GetReg(std::size_t index) const override;
|
||||
void SetReg(std::size_t index, u64 value) override;
|
||||
u128 GetVectorReg(std::size_t index) const override;
|
||||
void SetVectorReg(std::size_t index, u128 value) override;
|
||||
u64 GetReg(int index) const override;
|
||||
void SetReg(int index, u64 value) override;
|
||||
u128 GetVectorReg(int index) const override;
|
||||
void SetVectorReg(int index, u128 value) override;
|
||||
u32 GetPSTATE() const override;
|
||||
void SetPSTATE(u32 pstate) override;
|
||||
VAddr GetTlsAddress() const override;
|
||||
|
|
|
@ -140,8 +140,7 @@ void CoreTiming::AddTicks(u64 ticks) {
|
|||
void CoreTiming::Idle() {
|
||||
if (!event_queue.empty()) {
|
||||
const u64 next_event_time = event_queue.front().time;
|
||||
const u64 next_ticks =
|
||||
static_cast<u64>(nsToCycles(std::chrono::nanoseconds(next_event_time))) + 10;
|
||||
const u64 next_ticks = nsToCycles(std::chrono::nanoseconds(next_event_time)) + 10U;
|
||||
if (next_ticks > ticks) {
|
||||
ticks = next_ticks;
|
||||
}
|
||||
|
@ -188,7 +187,7 @@ void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
|
|||
|
||||
std::optional<s64> CoreTiming::Advance() {
|
||||
std::scoped_lock lock{advance_lock, basic_lock};
|
||||
global_timer = static_cast<u64>(GetGlobalTimeNs().count());
|
||||
global_timer = GetGlobalTimeNs().count();
|
||||
|
||||
while (!event_queue.empty() && event_queue.front().time <= global_timer) {
|
||||
Event evt = std::move(event_queue.front());
|
||||
|
@ -202,11 +201,11 @@ std::optional<s64> CoreTiming::Advance() {
|
|||
}
|
||||
|
||||
basic_lock.lock();
|
||||
global_timer = static_cast<u64>(GetGlobalTimeNs().count());
|
||||
global_timer = GetGlobalTimeNs().count();
|
||||
}
|
||||
|
||||
if (!event_queue.empty()) {
|
||||
const auto next_time = static_cast<s64>(event_queue.front().time - global_timer);
|
||||
const s64 next_time = event_queue.front().time - global_timer;
|
||||
return next_time;
|
||||
} else {
|
||||
return std::nullopt;
|
||||
|
@ -241,14 +240,14 @@ std::chrono::nanoseconds CoreTiming::GetGlobalTimeNs() const {
|
|||
if (is_multicore) {
|
||||
return clock->GetTimeNS();
|
||||
}
|
||||
return CyclesToNs(static_cast<s64>(ticks));
|
||||
return CyclesToNs(ticks);
|
||||
}
|
||||
|
||||
std::chrono::microseconds CoreTiming::GetGlobalTimeUs() const {
|
||||
if (is_multicore) {
|
||||
return clock->GetTimeUS();
|
||||
}
|
||||
return CyclesToUs(static_cast<s64>(ticks));
|
||||
return CyclesToUs(ticks);
|
||||
}
|
||||
|
||||
} // namespace Core::Timing
|
||||
|
|
|
@ -21,9 +21,9 @@ s64 msToCycles(std::chrono::milliseconds ms) {
|
|||
}
|
||||
if (static_cast<u64>(ms.count()) > MAX_VALUE_TO_MULTIPLY) {
|
||||
LOG_DEBUG(Core_Timing, "Time very big, do rounding");
|
||||
return static_cast<s64>(Hardware::BASE_CLOCK_RATE * static_cast<u64>(ms.count() / 1000));
|
||||
return Hardware::BASE_CLOCK_RATE * (ms.count() / 1000);
|
||||
}
|
||||
return static_cast<s64>((Hardware::BASE_CLOCK_RATE * static_cast<u64>(ms.count())) / 1000);
|
||||
return (Hardware::BASE_CLOCK_RATE * ms.count()) / 1000;
|
||||
}
|
||||
|
||||
s64 usToCycles(std::chrono::microseconds us) {
|
||||
|
@ -33,55 +33,51 @@ s64 usToCycles(std::chrono::microseconds us) {
|
|||
}
|
||||
if (static_cast<u64>(us.count()) > MAX_VALUE_TO_MULTIPLY) {
|
||||
LOG_DEBUG(Core_Timing, "Time very big, do rounding");
|
||||
return static_cast<s64>(Hardware::BASE_CLOCK_RATE * static_cast<u64>(us.count() / 1000000));
|
||||
return Hardware::BASE_CLOCK_RATE * (us.count() / 1000000);
|
||||
}
|
||||
return static_cast<s64>((Hardware::BASE_CLOCK_RATE * static_cast<u64>(us.count())) / 1000000);
|
||||
return (Hardware::BASE_CLOCK_RATE * us.count()) / 1000000;
|
||||
}
|
||||
|
||||
s64 nsToCycles(std::chrono::nanoseconds ns) {
|
||||
const u128 temp =
|
||||
Common::Multiply64Into128(static_cast<u64>(ns.count()), Hardware::BASE_CLOCK_RATE);
|
||||
return static_cast<s64>(Common::Divide128On32(temp, static_cast<u32>(1000000000)).first);
|
||||
const u128 temporal = Common::Multiply64Into128(ns.count(), Hardware::BASE_CLOCK_RATE);
|
||||
return Common::Divide128On32(temporal, static_cast<u32>(1000000000)).first;
|
||||
}
|
||||
|
||||
u64 msToClockCycles(std::chrono::milliseconds ms) {
|
||||
const auto count = static_cast<u64>(ms.count());
|
||||
const u128 temp = Common::Multiply64Into128(count, Hardware::CNTFREQ);
|
||||
u64 msToClockCycles(std::chrono::milliseconds ns) {
|
||||
const u128 temp = Common::Multiply64Into128(ns.count(), Hardware::CNTFREQ);
|
||||
return Common::Divide128On32(temp, 1000).first;
|
||||
}
|
||||
|
||||
u64 usToClockCycles(std::chrono::microseconds us) {
|
||||
const auto count = static_cast<u64>(us.count());
|
||||
const u128 temp = Common::Multiply64Into128(count, Hardware::CNTFREQ);
|
||||
u64 usToClockCycles(std::chrono::microseconds ns) {
|
||||
const u128 temp = Common::Multiply64Into128(ns.count(), Hardware::CNTFREQ);
|
||||
return Common::Divide128On32(temp, 1000000).first;
|
||||
}
|
||||
|
||||
u64 nsToClockCycles(std::chrono::nanoseconds ns) {
|
||||
const auto count = static_cast<u64>(ns.count());
|
||||
const u128 temp = Common::Multiply64Into128(count, Hardware::CNTFREQ);
|
||||
const u128 temp = Common::Multiply64Into128(ns.count(), Hardware::CNTFREQ);
|
||||
return Common::Divide128On32(temp, 1000000000).first;
|
||||
}
|
||||
|
||||
u64 CpuCyclesToClockCycles(u64 ticks) {
|
||||
const u128 temp = Common::Multiply64Into128(ticks, Hardware::CNTFREQ);
|
||||
return Common::Divide128On32(temp, static_cast<u32>(Hardware::BASE_CLOCK_RATE)).first;
|
||||
const u128 temporal = Common::Multiply64Into128(ticks, Hardware::CNTFREQ);
|
||||
return Common::Divide128On32(temporal, static_cast<u32>(Hardware::BASE_CLOCK_RATE)).first;
|
||||
}
|
||||
|
||||
std::chrono::milliseconds CyclesToMs(s64 cycles) {
|
||||
const u128 temp = Common::Multiply64Into128(static_cast<u64>(cycles), 1000);
|
||||
const u64 ms = Common::Divide128On32(temp, static_cast<u32>(Hardware::BASE_CLOCK_RATE)).first;
|
||||
const u128 temporal = Common::Multiply64Into128(cycles, 1000);
|
||||
u64 ms = Common::Divide128On32(temporal, static_cast<u32>(Hardware::BASE_CLOCK_RATE)).first;
|
||||
return std::chrono::milliseconds(ms);
|
||||
}
|
||||
|
||||
std::chrono::nanoseconds CyclesToNs(s64 cycles) {
|
||||
const u128 temp = Common::Multiply64Into128(static_cast<u64>(cycles), 1000000000);
|
||||
const u64 ns = Common::Divide128On32(temp, static_cast<u32>(Hardware::BASE_CLOCK_RATE)).first;
|
||||
const u128 temporal = Common::Multiply64Into128(cycles, 1000000000);
|
||||
u64 ns = Common::Divide128On32(temporal, static_cast<u32>(Hardware::BASE_CLOCK_RATE)).first;
|
||||
return std::chrono::nanoseconds(ns);
|
||||
}
|
||||
|
||||
std::chrono::microseconds CyclesToUs(s64 cycles) {
|
||||
const u128 temp = Common::Multiply64Into128(static_cast<u64>(cycles), 1000000);
|
||||
const u64 us = Common::Divide128On32(temp, static_cast<u32>(Hardware::BASE_CLOCK_RATE)).first;
|
||||
const u128 temporal = Common::Multiply64Into128(cycles, 1000000);
|
||||
u64 us = Common::Divide128On32(temporal, static_cast<u32>(Hardware::BASE_CLOCK_RATE)).first;
|
||||
return std::chrono::microseconds(us);
|
||||
}
|
||||
|
||||
|
|
|
@ -12,8 +12,8 @@ namespace Core::Timing {
|
|||
s64 msToCycles(std::chrono::milliseconds ms);
|
||||
s64 usToCycles(std::chrono::microseconds us);
|
||||
s64 nsToCycles(std::chrono::nanoseconds ns);
|
||||
u64 msToClockCycles(std::chrono::milliseconds ms);
|
||||
u64 usToClockCycles(std::chrono::microseconds us);
|
||||
u64 msToClockCycles(std::chrono::milliseconds ns);
|
||||
u64 usToClockCycles(std::chrono::microseconds ns);
|
||||
u64 nsToClockCycles(std::chrono::nanoseconds ns);
|
||||
std::chrono::milliseconds CyclesToMs(s64 cycles);
|
||||
std::chrono::nanoseconds CyclesToNs(s64 cycles);
|
||||
|
|
|
@ -143,7 +143,6 @@ u64 GetSignatureTypeDataSize(SignatureType type) {
|
|||
return 0x3C;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
u64 GetSignatureTypePaddingSize(SignatureType type) {
|
||||
|
@ -158,7 +157,6 @@ u64 GetSignatureTypePaddingSize(SignatureType type) {
|
|||
return 0x40;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
SignatureType Ticket::GetSignatureType() const {
|
||||
|
@ -173,7 +171,6 @@ SignatureType Ticket::GetSignatureType() const {
|
|||
}
|
||||
|
||||
UNREACHABLE();
|
||||
return {};
|
||||
}
|
||||
|
||||
TicketData& Ticket::GetData() {
|
||||
|
@ -351,7 +348,7 @@ std::optional<Key128> DeriveSDSeed() {
|
|||
std::array<u8, 0x10> buffer{};
|
||||
std::size_t offset = 0;
|
||||
for (; offset + 0x10 < save_43.GetSize(); ++offset) {
|
||||
if (!save_43.Seek(static_cast<s64>(offset), SEEK_SET)) {
|
||||
if (!save_43.Seek(offset, SEEK_SET)) {
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
|
@ -361,7 +358,7 @@ std::optional<Key128> DeriveSDSeed() {
|
|||
}
|
||||
}
|
||||
|
||||
if (!save_43.Seek(static_cast<s64>(offset + 0x10), SEEK_SET)) {
|
||||
if (!save_43.Seek(offset + 0x10, SEEK_SET)) {
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
|
|
|
@ -161,7 +161,7 @@ static constexpr u8 CalculateMaxKeyblobSourceHash() {
|
|||
return true;
|
||||
};
|
||||
|
||||
for (std::size_t i = 0x1F; i <= 0x1F; --i) {
|
||||
for (s8 i = 0x1F; i >= 0; --i) {
|
||||
if (!is_zero(keyblob_source_hashes[i])) {
|
||||
return static_cast<u8>(i + 1);
|
||||
}
|
||||
|
|
|
@ -201,9 +201,9 @@ bool NCA::HandlePotentialHeaderDecryption() {
|
|||
}
|
||||
|
||||
std::vector<NCASectionHeader> NCA::ReadSectionHeaders() const {
|
||||
const auto number_sections = static_cast<std::size_t>(
|
||||
const std::ptrdiff_t number_sections =
|
||||
std::count_if(std::begin(header.section_tables), std::end(header.section_tables),
|
||||
[](NCASectionTableEntry entry) { return entry.media_offset > 0; }));
|
||||
[](NCASectionTableEntry entry) { return entry.media_offset > 0; });
|
||||
|
||||
std::vector<NCASectionHeader> sections(number_sections);
|
||||
const auto length_sections = SECTION_HEADER_SIZE * number_sections;
|
||||
|
|
|
@ -103,7 +103,7 @@ static u32 romfs_calc_path_hash(u32 parent, std::string_view path, u32 start,
|
|||
u32 hash = parent ^ 123456789;
|
||||
for (u32 i = 0; i < path_len; i++) {
|
||||
hash = (hash >> 5) | (hash << 27);
|
||||
hash ^= static_cast<u32>(path[start + i]);
|
||||
hash ^= path[start + i];
|
||||
}
|
||||
|
||||
return hash;
|
||||
|
|
|
@ -66,14 +66,12 @@ static bool IsEOF(IPSFileType type, const std::vector<u8>& data) {
|
|||
}
|
||||
|
||||
VirtualFile PatchIPS(const VirtualFile& in, const VirtualFile& ips) {
|
||||
if (in == nullptr || ips == nullptr) {
|
||||
if (in == nullptr || ips == nullptr)
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const auto type = IdentifyMagic(ips->ReadBytes(0x5));
|
||||
if (type == IPSFileType::Error) {
|
||||
if (type == IPSFileType::Error)
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
auto in_data = in->ReadAllBytes();
|
||||
|
||||
|
@ -86,46 +84,37 @@ VirtualFile PatchIPS(const VirtualFile& in, const VirtualFile& ips) {
|
|||
}
|
||||
|
||||
u32 real_offset{};
|
||||
if (type == IPSFileType::IPS32) {
|
||||
real_offset = static_cast<u32>(temp[0] << 24) | static_cast<u32>(temp[1] << 16) |
|
||||
static_cast<u32>(temp[2] << 8) | temp[3];
|
||||
} else {
|
||||
real_offset =
|
||||
static_cast<u32>(temp[0] << 16) | static_cast<u32>(temp[1] << 8) | temp[2];
|
||||
}
|
||||
if (type == IPSFileType::IPS32)
|
||||
real_offset = (temp[0] << 24) | (temp[1] << 16) | (temp[2] << 8) | temp[3];
|
||||
else
|
||||
real_offset = (temp[0] << 16) | (temp[1] << 8) | temp[2];
|
||||
|
||||
u16 data_size{};
|
||||
if (ips->ReadObject(&data_size, offset) != sizeof(u16)) {
|
||||
if (ips->ReadObject(&data_size, offset) != sizeof(u16))
|
||||
return nullptr;
|
||||
}
|
||||
data_size = Common::swap16(data_size);
|
||||
offset += sizeof(u16);
|
||||
|
||||
if (data_size == 0) { // RLE
|
||||
u16 rle_size{};
|
||||
if (ips->ReadObject(&rle_size, offset) != sizeof(u16)) {
|
||||
if (ips->ReadObject(&rle_size, offset) != sizeof(u16))
|
||||
return nullptr;
|
||||
}
|
||||
rle_size = Common::swap16(rle_size);
|
||||
offset += sizeof(u16);
|
||||
|
||||
const auto data = ips->ReadByte(offset++);
|
||||
if (!data) {
|
||||
if (!data)
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (real_offset + rle_size > in_data.size()) {
|
||||
if (real_offset + rle_size > in_data.size())
|
||||
rle_size = static_cast<u16>(in_data.size() - real_offset);
|
||||
}
|
||||
std::memset(in_data.data() + real_offset, *data, rle_size);
|
||||
} else { // Standard Patch
|
||||
auto read = data_size;
|
||||
if (real_offset + read > in_data.size()) {
|
||||
if (real_offset + read > in_data.size())
|
||||
read = static_cast<u16>(in_data.size() - real_offset);
|
||||
}
|
||||
if (ips->Read(in_data.data() + real_offset, read, offset) != data_size) {
|
||||
if (ips->Read(in_data.data() + real_offset, read, offset) != data_size)
|
||||
return nullptr;
|
||||
}
|
||||
offset += data_size;
|
||||
}
|
||||
}
|
||||
|
@ -193,16 +182,14 @@ void IPSwitchCompiler::ParseFlag(const std::string& line) {
|
|||
void IPSwitchCompiler::Parse() {
|
||||
const auto bytes = patch_text->ReadAllBytes();
|
||||
std::stringstream s;
|
||||
s.write(reinterpret_cast<const char*>(bytes.data()),
|
||||
static_cast<std::streamsize>(bytes.size()));
|
||||
s.write(reinterpret_cast<const char*>(bytes.data()), bytes.size());
|
||||
|
||||
std::vector<std::string> lines;
|
||||
std::string stream_line;
|
||||
while (std::getline(s, stream_line)) {
|
||||
// Remove a trailing \r
|
||||
if (!stream_line.empty() && stream_line.back() == '\r') {
|
||||
if (!stream_line.empty() && stream_line.back() == '\r')
|
||||
stream_line.pop_back();
|
||||
}
|
||||
lines.push_back(std::move(stream_line));
|
||||
}
|
||||
|
||||
|
|
|
@ -36,14 +36,14 @@ bool DecompressBLZ(std::vector<u8>& data) {
|
|||
while (out_index > 0) {
|
||||
--index;
|
||||
auto control = data[index + start_offset];
|
||||
for (std::size_t i = 0; i < 8; ++i) {
|
||||
for (size_t i = 0; i < 8; ++i) {
|
||||
if (((control << i) & 0x80) > 0) {
|
||||
if (index < 2) {
|
||||
return false;
|
||||
}
|
||||
index -= 2;
|
||||
std::size_t segment_offset = static_cast<u32>(data[index + start_offset]) |
|
||||
static_cast<u32>(data[index + start_offset + 1] << 8);
|
||||
std::size_t segment_offset =
|
||||
data[index + start_offset] | data[index + start_offset + 1] << 8;
|
||||
std::size_t segment_size = ((segment_offset >> 12) & 0xF) + 3;
|
||||
segment_offset &= 0xFFF;
|
||||
segment_offset += 3;
|
||||
|
|
|
@ -25,9 +25,9 @@ std::pair<std::size_t, std::size_t> SearchBucketEntry(u64 offset, const BlockTyp
|
|||
ASSERT_MSG(offset <= block.size, "Offset is out of bounds in BKTR relocation block.");
|
||||
}
|
||||
|
||||
const auto bucket_id = static_cast<std::size_t>(std::count_if(
|
||||
std::size_t bucket_id = std::count_if(
|
||||
block.base_offsets.begin() + 1, block.base_offsets.begin() + block.number_buckets,
|
||||
[&offset](u64 base_offset) { return base_offset <= offset; }));
|
||||
[&offset](u64 base_offset) { return base_offset <= offset; });
|
||||
|
||||
const auto& bucket = buckets[bucket_id];
|
||||
|
||||
|
@ -53,7 +53,6 @@ std::pair<std::size_t, std::size_t> SearchBucketEntry(u64 offset, const BlockTyp
|
|||
}
|
||||
|
||||
UNREACHABLE_MSG("Offset could not be found in BKTR block.");
|
||||
return {};
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
|
@ -137,7 +136,7 @@ std::size_t BKTR::Read(u8* data, std::size_t length, std::size_t offset) const {
|
|||
|
||||
const auto block_offset = section_offset & 0xF;
|
||||
if (block_offset != 0) {
|
||||
auto block = bktr_romfs->ReadBytes(0x10, section_offset & ~0xFU);
|
||||
auto block = bktr_romfs->ReadBytes(0x10, section_offset & ~0xF);
|
||||
cipher.Transcode(block.data(), block.size(), block.data(), Core::Crypto::Op::Decrypt);
|
||||
if (length + block_offset < 0x10) {
|
||||
std::memcpy(data, block.data() + block_offset, std::min(length, block.size()));
|
||||
|
|
|
@ -30,7 +30,7 @@ void DefaultControllerApplet::ReconfigureControllers(std::function<void()> callb
|
|||
auto& players = Settings::values.players;
|
||||
|
||||
const std::size_t min_supported_players =
|
||||
parameters.enable_single_mode ? 1 : static_cast<std::size_t>(parameters.min_players);
|
||||
parameters.enable_single_mode ? 1 : parameters.min_players;
|
||||
|
||||
// Disconnect Handheld first.
|
||||
npad.DisconnectNPadAtIndex(8);
|
||||
|
|
|
@ -12,9 +12,8 @@ ProfileSelectApplet::~ProfileSelectApplet() = default;
|
|||
|
||||
void DefaultProfileSelectApplet::SelectProfile(
|
||||
std::function<void(std::optional<Common::UUID>)> callback) const {
|
||||
const auto user_index = static_cast<std::size_t>(Settings::values.current_user);
|
||||
Service::Account::ProfileManager manager;
|
||||
callback(manager.GetUser(user_index).value_or(Common::UUID{}));
|
||||
callback(manager.GetUser(Settings::values.current_user).value_or(Common::UUID{}));
|
||||
LOG_INFO(Service_ACC, "called, selecting current user instead of prompting...");
|
||||
}
|
||||
|
||||
|
|
|
@ -205,7 +205,7 @@ static Kernel::Thread* FindThreadById(s64 id) {
|
|||
const auto& threads = Core::System::GetInstance().GlobalScheduler().GetThreadList();
|
||||
for (auto& thread : threads) {
|
||||
if (thread->GetThreadID() == static_cast<u64>(id)) {
|
||||
current_core = static_cast<u32>(thread->GetProcessorID());
|
||||
current_core = thread->GetProcessorID();
|
||||
return thread.get();
|
||||
}
|
||||
}
|
||||
|
@ -457,14 +457,7 @@ static u128 GdbHexToU128(const u8* src) {
|
|||
/// Read a byte from the gdb client.
|
||||
static u8 ReadByte() {
|
||||
u8 c;
|
||||
|
||||
#ifdef WIN32
|
||||
const auto socket_id = static_cast<SOCKET>(gdbserver_socket);
|
||||
#else
|
||||
const auto socket_id = gdbserver_socket;
|
||||
#endif
|
||||
|
||||
const auto received_size = recv(socket_id, reinterpret_cast<char*>(&c), 1, MSG_WAITALL);
|
||||
std::size_t received_size = recv(gdbserver_socket, reinterpret_cast<char*>(&c), 1, MSG_WAITALL);
|
||||
if (received_size != 1) {
|
||||
LOG_ERROR(Debug_GDBStub, "recv failed: {}", received_size);
|
||||
Shutdown();
|
||||
|
@ -581,13 +574,7 @@ bool CheckBreakpoint(VAddr addr, BreakpointType type) {
|
|||
* @param packet Packet to be sent to client.
|
||||
*/
|
||||
static void SendPacket(const char packet) {
|
||||
#ifdef WIN32
|
||||
const auto socket_id = static_cast<SOCKET>(gdbserver_socket);
|
||||
#else
|
||||
const auto socket_id = gdbserver_socket;
|
||||
#endif
|
||||
|
||||
const auto sent_size = send(socket_id, &packet, 1, 0);
|
||||
std::size_t sent_size = send(gdbserver_socket, &packet, 1, 0);
|
||||
if (sent_size != 1) {
|
||||
LOG_ERROR(Debug_GDBStub, "send failed");
|
||||
}
|
||||
|
@ -624,13 +611,7 @@ static void SendReply(const char* reply) {
|
|||
u8* ptr = command_buffer;
|
||||
u32 left = command_length + 4;
|
||||
while (left > 0) {
|
||||
#ifdef WIN32
|
||||
const auto socket_id = static_cast<SOCKET>(gdbserver_socket);
|
||||
#else
|
||||
const auto socket_id = gdbserver_socket;
|
||||
#endif
|
||||
const auto sent_size =
|
||||
send(socket_id, reinterpret_cast<char*>(ptr), static_cast<socklen_t>(left), 0);
|
||||
const auto sent_size = send(gdbserver_socket, reinterpret_cast<char*>(ptr), left, 0);
|
||||
if (sent_size < 0) {
|
||||
LOG_ERROR(Debug_GDBStub, "gdb: send failed");
|
||||
return Shutdown();
|
||||
|
@ -1313,13 +1294,8 @@ static void Init(u16 port) {
|
|||
WSAStartup(MAKEWORD(2, 2), &InitData);
|
||||
#endif
|
||||
|
||||
#ifdef WIN32
|
||||
using socket_type = SOCKET;
|
||||
#else
|
||||
using socket_type = int;
|
||||
#endif
|
||||
const auto tmpsock = static_cast<socket_type>(socket(PF_INET, SOCK_STREAM, 0));
|
||||
if (tmpsock == static_cast<socket_type>(-1)) {
|
||||
int tmpsock = static_cast<int>(socket(PF_INET, SOCK_STREAM, 0));
|
||||
if (tmpsock == -1) {
|
||||
LOG_ERROR(Debug_GDBStub, "Failed to create gdb socket");
|
||||
}
|
||||
|
||||
|
@ -1359,7 +1335,7 @@ static void Init(u16 port) {
|
|||
}
|
||||
|
||||
// Clean up temporary socket if it's still alive at this point.
|
||||
if (tmpsock != static_cast<socket_type>(-1)) {
|
||||
if (tmpsock != -1) {
|
||||
shutdown(tmpsock, SHUT_RDWR);
|
||||
}
|
||||
}
|
||||
|
@ -1376,12 +1352,7 @@ void Shutdown() {
|
|||
|
||||
LOG_INFO(Debug_GDBStub, "Stopping GDB ...");
|
||||
if (gdbserver_socket != -1) {
|
||||
#ifdef WIN32
|
||||
const auto tmpsock = static_cast<SOCKET>(socket(PF_INET, SOCK_STREAM, 0));
|
||||
#else
|
||||
const auto tmpsock = static_cast<int>(socket(PF_INET, SOCK_STREAM, 0));
|
||||
#endif
|
||||
shutdown(tmpsock, SHUT_RDWR);
|
||||
shutdown(gdbserver_socket, SHUT_RDWR);
|
||||
gdbserver_socket = -1;
|
||||
}
|
||||
|
||||
|
@ -1412,7 +1383,7 @@ void SetCpuStepFlag(bool is_step) {
|
|||
step_loop = is_step;
|
||||
}
|
||||
|
||||
void SendTrap(Kernel::Thread* thread, u32 trap) {
|
||||
void SendTrap(Kernel::Thread* thread, int trap) {
|
||||
if (!send_trap) {
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -110,5 +110,5 @@ void SetCpuStepFlag(bool is_step);
|
|||
* @param thread Sending thread.
|
||||
* @param trap Trap no.
|
||||
*/
|
||||
void SendTrap(Kernel::Thread* thread, u32 trap);
|
||||
void SendTrap(Kernel::Thread* thread, int trap);
|
||||
} // namespace GDBStub
|
||||
|
|
|
@ -233,7 +233,7 @@ void ResponseBuilder::PushRaw(const T& value) {
|
|||
static_assert(std::is_trivially_copyable_v<T>,
|
||||
"It's undefined behavior to use memcpy with non-trivially copyable objects");
|
||||
std::memcpy(cmdbuf + index, &value, sizeof(T));
|
||||
index += static_cast<std::ptrdiff_t>((sizeof(T) + 3) / 4); // round up to word length
|
||||
index += (sizeof(T) + 3) / 4; // round up to word length
|
||||
}
|
||||
|
||||
template <>
|
||||
|
@ -390,7 +390,7 @@ void RequestParser::PopRaw(T& value) {
|
|||
static_assert(std::is_trivially_copyable_v<T>,
|
||||
"It's undefined behavior to use memcpy with non-trivially copyable objects");
|
||||
std::memcpy(&value, cmdbuf + index, sizeof(T));
|
||||
index += static_cast<std::ptrdiff_t>((sizeof(T) + 3) / 4); // round up to word length
|
||||
index += (sizeof(T) + 3) / 4; // round up to word length
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
|
|
|
@ -108,7 +108,7 @@ ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr a
|
|||
auto& monitor = system.Monitor();
|
||||
s32 updated_value;
|
||||
do {
|
||||
updated_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
|
||||
updated_value = monitor.ExclusiveRead32(current_core, address);
|
||||
|
||||
if (updated_value != value) {
|
||||
return ERR_INVALID_STATE;
|
||||
|
@ -129,7 +129,7 @@ ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr a
|
|||
updated_value = value;
|
||||
}
|
||||
}
|
||||
} while (!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(updated_value)));
|
||||
} while (!monitor.ExclusiveWrite32(current_core, address, updated_value));
|
||||
|
||||
WakeThreads(waiting_threads, num_to_wake);
|
||||
return RESULT_SUCCESS;
|
||||
|
|
|
@ -68,7 +68,7 @@ ResultVal<Handle> HandleTable::Create(std::shared_ptr<Object> obj) {
|
|||
generations[slot] = generation;
|
||||
objects[slot] = std::move(obj);
|
||||
|
||||
const auto handle = static_cast<Handle>(generation | static_cast<u16>(slot << 15));
|
||||
Handle handle = generation | (slot << 15);
|
||||
return MakeResult<Handle>(handle);
|
||||
}
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ std::shared_ptr<WritableEvent> HLERequestContext::SleepClientThread(
|
|||
|
||||
{
|
||||
Handle event_handle = InvalidHandle;
|
||||
SchedulerLockAndSleep lock(kernel, event_handle, thread.get(), static_cast<s64>(timeout));
|
||||
SchedulerLockAndSleep lock(kernel, event_handle, thread.get(), timeout);
|
||||
thread->SetHLECallback(
|
||||
[context = *this, callback](std::shared_ptr<Thread> thread) mutable -> bool {
|
||||
ThreadWakeupReason reason = thread->GetSignalingResult() == RESULT_TIMEOUT
|
||||
|
|
|
@ -171,7 +171,7 @@ struct KernelCore::Impl {
|
|||
const auto type =
|
||||
static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_SUSPEND);
|
||||
auto thread_res =
|
||||
Thread::Create(system, type, std::move(name), 0, 0, 0, static_cast<s32>(i), 0,
|
||||
Thread::Create(system, type, std::move(name), 0, 0, 0, static_cast<u32>(i), 0,
|
||||
nullptr, std::move(init_func), init_func_parameter);
|
||||
|
||||
suspend_threads[i] = std::move(thread_res).Unwrap();
|
||||
|
|
|
@ -96,7 +96,6 @@ u64 AddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) {
|
|||
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) {
|
||||
|
@ -113,7 +112,6 @@ std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type)
|
|||
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
|
|
|
@ -71,7 +71,7 @@ VAddr MemoryManager::AllocateContinuous(std::size_t num_pages, std::size_t align
|
|||
}
|
||||
|
||||
// If we allocated more than we need, free some
|
||||
const auto allocated_pages{PageHeap::GetBlockNumPages(static_cast<u32>(heap_index))};
|
||||
const auto allocated_pages{PageHeap::GetBlockNumPages(heap_index)};
|
||||
if (allocated_pages > num_pages) {
|
||||
chosen_manager.Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages);
|
||||
}
|
||||
|
@ -112,7 +112,7 @@ ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pa
|
|||
|
||||
// Keep allocating until we've allocated all our pages
|
||||
for (s32 index{heap_index}; index >= 0 && num_pages > 0; index--) {
|
||||
const auto pages_per_alloc{PageHeap::GetBlockNumPages(static_cast<u32>(index))};
|
||||
const auto pages_per_alloc{PageHeap::GetBlockNumPages(index)};
|
||||
|
||||
while (num_pages >= pages_per_alloc) {
|
||||
// Allocate a block
|
||||
|
|
|
@ -33,12 +33,11 @@ void PageHeap::Initialize(VAddr address, std::size_t size, std::size_t metadata_
|
|||
}
|
||||
|
||||
VAddr PageHeap::AllocateBlock(s32 index) {
|
||||
const auto u_index = static_cast<std::size_t>(index);
|
||||
const auto needed_size{blocks[u_index].GetSize()};
|
||||
const std::size_t needed_size{blocks[index].GetSize()};
|
||||
|
||||
for (auto i = u_index; i < MemoryBlockPageShifts.size(); i++) {
|
||||
if (const VAddr addr = blocks[i].PopBlock(); addr != 0) {
|
||||
if (const std::size_t allocated_size = blocks[i].GetSize();
|
||||
for (s32 i{index}; i < static_cast<s32>(MemoryBlockPageShifts.size()); i++) {
|
||||
if (const VAddr addr{blocks[i].PopBlock()}; addr) {
|
||||
if (const std::size_t allocated_size{blocks[i].GetSize()};
|
||||
allocated_size > needed_size) {
|
||||
Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
|
||||
}
|
||||
|
@ -51,7 +50,7 @@ VAddr PageHeap::AllocateBlock(s32 index) {
|
|||
|
||||
void PageHeap::FreeBlock(VAddr block, s32 index) {
|
||||
do {
|
||||
block = blocks[static_cast<std::size_t>(index++)].PushBlock(block);
|
||||
block = blocks[index++].PushBlock(block);
|
||||
} while (block != 0);
|
||||
}
|
||||
|
||||
|
@ -70,7 +69,7 @@ void PageHeap::Free(VAddr addr, std::size_t num_pages) {
|
|||
VAddr after_start{end};
|
||||
VAddr after_end{end};
|
||||
while (big_index >= 0) {
|
||||
const std::size_t block_size{blocks[static_cast<std::size_t>(big_index)].GetSize()};
|
||||
const std::size_t block_size{blocks[big_index].GetSize()};
|
||||
const VAddr big_start{Common::AlignUp((start), block_size)};
|
||||
const VAddr big_end{Common::AlignDown((end), block_size)};
|
||||
if (big_start < big_end) {
|
||||
|
@ -88,7 +87,7 @@ void PageHeap::Free(VAddr addr, std::size_t num_pages) {
|
|||
|
||||
// Free space before the big blocks
|
||||
for (s32 i{big_index - 1}; i >= 0; i--) {
|
||||
const std::size_t block_size{blocks[static_cast<size_t>(i)].GetSize()};
|
||||
const std::size_t block_size{blocks[i].GetSize()};
|
||||
while (before_start + block_size <= before_end) {
|
||||
before_end -= block_size;
|
||||
FreeBlock(before_end, i);
|
||||
|
@ -97,7 +96,7 @@ void PageHeap::Free(VAddr addr, std::size_t num_pages) {
|
|||
|
||||
// Free space after the big blocks
|
||||
for (s32 i{big_index - 1}; i >= 0; i--) {
|
||||
const std::size_t block_size{blocks[static_cast<size_t>(i)].GetSize()};
|
||||
const std::size_t block_size{blocks[i].GetSize()};
|
||||
while (after_start + block_size <= after_end) {
|
||||
FreeBlock(after_start, i);
|
||||
after_start += block_size;
|
||||
|
|
|
@ -34,9 +34,7 @@ public:
|
|||
|
||||
static constexpr s32 GetBlockIndex(std::size_t num_pages) {
|
||||
for (s32 i{static_cast<s32>(NumMemoryBlockPageShifts) - 1}; i >= 0; i--) {
|
||||
const auto shift_index = static_cast<std::size_t>(i);
|
||||
if (num_pages >=
|
||||
(static_cast<std::size_t>(1) << MemoryBlockPageShifts[shift_index]) / PageSize) {
|
||||
if (num_pages >= (static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
@ -88,7 +86,7 @@ private:
|
|||
|
||||
// Set the bitmap pointers
|
||||
for (s32 depth{GetHighestDepthIndex()}; depth >= 0; depth--) {
|
||||
bit_storages[static_cast<std::size_t>(depth)] = storage;
|
||||
bit_storages[depth] = storage;
|
||||
size = Common::AlignUp(size, 64) / 64;
|
||||
storage += size;
|
||||
}
|
||||
|
@ -101,7 +99,7 @@ private:
|
|||
s32 depth{};
|
||||
|
||||
do {
|
||||
const u64 v{bit_storages[static_cast<std::size_t>(depth)][offset]};
|
||||
const u64 v{bit_storages[depth][offset]};
|
||||
if (v == 0) {
|
||||
// Non-zero depth indicates that a previous level had a free block
|
||||
ASSERT(depth == 0);
|
||||
|
@ -127,7 +125,7 @@ private:
|
|||
constexpr bool ClearRange(std::size_t offset, std::size_t count) {
|
||||
const s32 depth{GetHighestDepthIndex()};
|
||||
const auto bit_ind{offset / 64};
|
||||
u64* bits{bit_storages[static_cast<std::size_t>(depth)]};
|
||||
u64* bits{bit_storages[depth]};
|
||||
if (count < 64) {
|
||||
const auto shift{offset % 64};
|
||||
ASSERT(shift + count <= 64);
|
||||
|
@ -179,11 +177,11 @@ private:
|
|||
const auto which{offset % 64};
|
||||
const u64 mask{1ULL << which};
|
||||
|
||||
u64* bit{std::addressof(bit_storages[static_cast<std::size_t>(depth)][ind])};
|
||||
u64* bit{std::addressof(bit_storages[depth][ind])};
|
||||
const u64 v{*bit};
|
||||
ASSERT((v & mask) == 0);
|
||||
*bit = v | mask;
|
||||
if (v != 0) {
|
||||
if (v) {
|
||||
break;
|
||||
}
|
||||
offset = ind;
|
||||
|
@ -197,12 +195,12 @@ private:
|
|||
const auto which{offset % 64};
|
||||
const u64 mask{1ULL << which};
|
||||
|
||||
u64* bit{std::addressof(bit_storages[static_cast<std::size_t>(depth)][ind])};
|
||||
u64* bit{std::addressof(bit_storages[depth][ind])};
|
||||
u64 v{*bit};
|
||||
ASSERT((v & mask) != 0);
|
||||
v &= ~mask;
|
||||
*bit = v;
|
||||
if (v != 0) {
|
||||
if (v) {
|
||||
break;
|
||||
}
|
||||
offset = ind;
|
||||
|
|
|
@ -414,8 +414,7 @@ ResultCode PageTable::MapPhysicalMemory(VAddr addr, std::size_t size) {
|
|||
const std::size_t remaining_pages{remaining_size / PageSize};
|
||||
|
||||
if (process->GetResourceLimit() &&
|
||||
!process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory,
|
||||
static_cast<s64>(remaining_size))) {
|
||||
!process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, remaining_size)) {
|
||||
return ERR_RESOURCE_LIMIT_EXCEEDED;
|
||||
}
|
||||
|
||||
|
@ -779,8 +778,7 @@ ResultVal<VAddr> PageTable::SetHeapSize(std::size_t size) {
|
|||
|
||||
auto process{system.Kernel().CurrentProcess()};
|
||||
if (process->GetResourceLimit() && delta != 0 &&
|
||||
!process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory,
|
||||
static_cast<s64>(delta))) {
|
||||
!process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, delta)) {
|
||||
return ERR_RESOURCE_LIMIT_EXCEEDED;
|
||||
}
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ public:
|
|||
PhysicalCore& operator=(const PhysicalCore&) = delete;
|
||||
|
||||
PhysicalCore(PhysicalCore&&) = default;
|
||||
PhysicalCore& operator=(PhysicalCore&&) = delete;
|
||||
PhysicalCore& operator=(PhysicalCore&&) = default;
|
||||
|
||||
void Idle();
|
||||
/// Interrupt this physical core.
|
||||
|
|
|
@ -137,8 +137,7 @@ std::shared_ptr<ResourceLimit> Process::GetResourceLimit() const {
|
|||
}
|
||||
|
||||
u64 Process::GetTotalPhysicalMemoryAvailable() const {
|
||||
const u64 capacity{
|
||||
static_cast<u64>(resource_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory)) +
|
||||
const u64 capacity{resource_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory) +
|
||||
page_table->GetTotalHeapSize() + GetSystemResourceSize() + image_size +
|
||||
main_thread_stack_size};
|
||||
|
||||
|
@ -280,12 +279,12 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
|
|||
// Set initial resource limits
|
||||
resource_limit->SetLimitValue(
|
||||
ResourceType::PhysicalMemory,
|
||||
static_cast<s64>(kernel.MemoryManager().GetSize(Memory::MemoryManager::Pool::Application)));
|
||||
kernel.MemoryManager().GetSize(Memory::MemoryManager::Pool::Application));
|
||||
resource_limit->SetLimitValue(ResourceType::Threads, 608);
|
||||
resource_limit->SetLimitValue(ResourceType::Events, 700);
|
||||
resource_limit->SetLimitValue(ResourceType::TransferMemory, 128);
|
||||
resource_limit->SetLimitValue(ResourceType::Sessions, 894);
|
||||
ASSERT(resource_limit->Reserve(ResourceType::PhysicalMemory, static_cast<s64>(code_size)));
|
||||
ASSERT(resource_limit->Reserve(ResourceType::PhysicalMemory, code_size));
|
||||
|
||||
// Create TLS region
|
||||
tls_region_address = CreateTLSRegion();
|
||||
|
@ -301,9 +300,9 @@ void Process::Run(s32 main_thread_priority, u64 stack_size) {
|
|||
|
||||
ChangeStatus(ProcessStatus::Running);
|
||||
|
||||
SetupMainThread(system, *this, static_cast<u32>(main_thread_priority), main_thread_stack_top);
|
||||
SetupMainThread(system, *this, main_thread_priority, main_thread_stack_top);
|
||||
resource_limit->Reserve(ResourceType::Threads, 1);
|
||||
resource_limit->Reserve(ResourceType::PhysicalMemory, static_cast<s64>(main_thread_stack_size));
|
||||
resource_limit->Reserve(ResourceType::PhysicalMemory, main_thread_stack_size);
|
||||
}
|
||||
|
||||
void Process::PrepareForTermination() {
|
||||
|
@ -364,7 +363,7 @@ VAddr Process::CreateTLSRegion() {
|
|||
->AllocateAndMapMemory(1, Memory::PageSize, true, start, size / Memory::PageSize,
|
||||
Memory::MemoryState::ThreadLocal,
|
||||
Memory::MemoryPermission::ReadAndWrite, tls_map_addr)
|
||||
.ValueOr(0U)};
|
||||
.ValueOr(0)};
|
||||
|
||||
ASSERT(tls_page_addr);
|
||||
|
||||
|
|
|
@ -43,8 +43,8 @@ void ResourceLimit::Release(ResourceType resource, u64 amount) {
|
|||
void ResourceLimit::Release(ResourceType resource, u64 used_amount, u64 available_amount) {
|
||||
const std::size_t index{ResourceTypeToIndex(resource)};
|
||||
|
||||
current[index] -= static_cast<s64>(used_amount);
|
||||
available[index] -= static_cast<s64>(available_amount);
|
||||
current[index] -= used_amount;
|
||||
available[index] -= available_amount;
|
||||
}
|
||||
|
||||
std::shared_ptr<ResourceLimit> ResourceLimit::Create(KernelCore& kernel) {
|
||||
|
|
|
@ -89,11 +89,9 @@ u32 GlobalScheduler::SelectThreads() {
|
|||
while (iter != suggested_queue[core_id].end()) {
|
||||
suggested = *iter;
|
||||
iter++;
|
||||
const s32 suggested_core_id = suggested->GetProcessorID();
|
||||
Thread* top_thread = suggested_core_id >= 0
|
||||
? top_threads[static_cast<u32>(suggested_core_id)]
|
||||
: nullptr;
|
||||
|
||||
s32 suggested_core_id = suggested->GetProcessorID();
|
||||
Thread* top_thread =
|
||||
suggested_core_id >= 0 ? top_threads[suggested_core_id] : nullptr;
|
||||
if (top_thread != suggested) {
|
||||
if (top_thread != nullptr &&
|
||||
top_thread->GetPriority() < THREADPRIO_MAX_CORE_MIGRATION) {
|
||||
|
@ -104,19 +102,16 @@ u32 GlobalScheduler::SelectThreads() {
|
|||
TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id), suggested);
|
||||
break;
|
||||
}
|
||||
|
||||
suggested = nullptr;
|
||||
migration_candidates[num_candidates++] = suggested_core_id;
|
||||
}
|
||||
|
||||
// Step 3: Select a suggested thread from another core
|
||||
if (suggested == nullptr) {
|
||||
for (std::size_t i = 0; i < num_candidates; i++) {
|
||||
const auto candidate_core = static_cast<u32>(migration_candidates[i]);
|
||||
s32 candidate_core = migration_candidates[i];
|
||||
suggested = top_threads[candidate_core];
|
||||
auto it = scheduled_queue[candidate_core].begin();
|
||||
++it;
|
||||
|
||||
it++;
|
||||
Thread* next = it != scheduled_queue[candidate_core].end() ? *it : nullptr;
|
||||
if (next != nullptr) {
|
||||
TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id),
|
||||
|
@ -133,8 +128,7 @@ u32 GlobalScheduler::SelectThreads() {
|
|||
|
||||
idle_cores &= ~(1U << core_id);
|
||||
}
|
||||
|
||||
u32 cores_needing_context_switch = 0;
|
||||
u32 cores_needing_context_switch{};
|
||||
for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
|
||||
Scheduler& sched = kernel.Scheduler(core);
|
||||
ASSERT(top_threads[core] == nullptr ||
|
||||
|
@ -192,16 +186,13 @@ bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
|
|||
for (auto& thread : suggested_queue[core_id]) {
|
||||
const s32 source_core = thread->GetProcessorID();
|
||||
if (source_core >= 0) {
|
||||
const auto sanitized_source_core = static_cast<u32>(source_core);
|
||||
|
||||
if (current_threads[sanitized_source_core] != nullptr) {
|
||||
if (thread == current_threads[sanitized_source_core] ||
|
||||
current_threads[sanitized_source_core]->GetPriority() < min_regular_priority) {
|
||||
if (current_threads[source_core] != nullptr) {
|
||||
if (thread == current_threads[source_core] ||
|
||||
current_threads[source_core]->GetPriority() < min_regular_priority) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (next_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks() ||
|
||||
next_thread->GetPriority() < thread->GetPriority()) {
|
||||
if (thread->GetPriority() <= priority) {
|
||||
|
@ -249,25 +240,17 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread
|
|||
for (std::size_t i = 0; i < current_threads.size(); i++) {
|
||||
current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front();
|
||||
}
|
||||
|
||||
for (auto& thread : suggested_queue[core_id]) {
|
||||
const s32 source_core = thread->GetProcessorID();
|
||||
if (source_core < 0) {
|
||||
if (source_core < 0 || thread == current_threads[source_core]) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const auto sanitized_source_core = static_cast<u32>(source_core);
|
||||
if (thread == current_threads[sanitized_source_core]) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (current_threads[sanitized_source_core] == nullptr ||
|
||||
current_threads[sanitized_source_core]->GetPriority() >= min_regular_priority) {
|
||||
if (current_threads[source_core] == nullptr ||
|
||||
current_threads[source_core]->GetPriority() >= min_regular_priority) {
|
||||
winner = thread;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (winner != nullptr) {
|
||||
if (winner != yielding_thread) {
|
||||
TransferToCore(winner->GetPriority(), static_cast<s32>(core_id), winner);
|
||||
|
@ -309,22 +292,17 @@ void GlobalScheduler::PreemptThreads() {
|
|||
if (thread->GetPriority() != priority) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (source_core >= 0) {
|
||||
const auto sanitized_source_core = static_cast<u32>(source_core);
|
||||
Thread* next_thread = scheduled_queue[sanitized_source_core].empty()
|
||||
Thread* next_thread = scheduled_queue[source_core].empty()
|
||||
? nullptr
|
||||
: scheduled_queue[sanitized_source_core].front();
|
||||
|
||||
: scheduled_queue[source_core].front();
|
||||
if (next_thread != nullptr && next_thread->GetPriority() < 2) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (next_thread == thread) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (current_thread != nullptr &&
|
||||
current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
|
||||
winner = thread;
|
||||
|
@ -344,22 +322,17 @@ void GlobalScheduler::PreemptThreads() {
|
|||
if (thread->GetPriority() < priority) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (source_core >= 0) {
|
||||
const auto sanitized_source_core = static_cast<u32>(source_core);
|
||||
Thread* next_thread = scheduled_queue[sanitized_source_core].empty()
|
||||
Thread* next_thread = scheduled_queue[source_core].empty()
|
||||
? nullptr
|
||||
: scheduled_queue[sanitized_source_core].front();
|
||||
|
||||
: scheduled_queue[source_core].front();
|
||||
if (next_thread != nullptr && next_thread->GetPriority() < 2) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (next_thread == thread) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (current_thread != nullptr &&
|
||||
current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
|
||||
winner = thread;
|
||||
|
@ -379,11 +352,11 @@ void GlobalScheduler::PreemptThreads() {
|
|||
|
||||
void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule,
|
||||
Core::EmuThreadHandle global_thread) {
|
||||
const u32 current_core = global_thread.host_handle;
|
||||
u32 current_core = global_thread.host_handle;
|
||||
bool must_context_switch = global_thread.guest_handle != InvalidHandle &&
|
||||
(current_core < Core::Hardware::NUM_CPU_CORES);
|
||||
while (cores_pending_reschedule != 0) {
|
||||
const u32 core = Common::CountTrailingZeroes32(cores_pending_reschedule);
|
||||
u32 core = Common::CountTrailingZeroes32(cores_pending_reschedule);
|
||||
ASSERT(core < Core::Hardware::NUM_CPU_CORES);
|
||||
if (!must_context_switch || core != current_core) {
|
||||
auto& phys_core = kernel.PhysicalCore(core);
|
||||
|
@ -393,7 +366,6 @@ void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule,
|
|||
}
|
||||
cores_pending_reschedule &= ~(1U << core);
|
||||
}
|
||||
|
||||
if (must_context_switch) {
|
||||
auto& core_scheduler = kernel.CurrentScheduler();
|
||||
kernel.ExitSVCProfile();
|
||||
|
@ -831,11 +803,9 @@ void Scheduler::Initialize() {
|
|||
std::string name = "Idle Thread Id:" + std::to_string(core_id);
|
||||
std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc();
|
||||
void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
|
||||
const auto type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE);
|
||||
auto thread_res =
|
||||
Thread::Create(system, type, std::move(name), 0, 64, 0, static_cast<s32>(core_id), 0,
|
||||
ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE);
|
||||
auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0,
|
||||
nullptr, std::move(init_func), init_func_parameter);
|
||||
|
||||
idle_thread = std::move(thread_res).Unwrap();
|
||||
}
|
||||
|
||||
|
|
|
@ -482,8 +482,7 @@ static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr
|
|||
static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address,
|
||||
s32 handle_count, u32 timeout_high, Handle* index) {
|
||||
const s64 nano_seconds{(static_cast<s64>(timeout_high) << 32) | static_cast<s64>(timeout_low)};
|
||||
return WaitSynchronization(system, index, handles_address, static_cast<u32>(handle_count),
|
||||
nano_seconds);
|
||||
return WaitSynchronization(system, index, handles_address, handle_count, nano_seconds);
|
||||
}
|
||||
|
||||
/// Resumes a thread waiting on WaitSynchronization
|
||||
|
@ -2003,7 +2002,7 @@ static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle,
|
|||
return ERR_INVALID_HANDLE;
|
||||
}
|
||||
|
||||
*core = static_cast<u32>(thread->GetIdealCore());
|
||||
*core = thread->GetIdealCore();
|
||||
*mask = thread->GetAffinityMask();
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
|
@ -2071,7 +2070,7 @@ static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle,
|
|||
return ERR_INVALID_HANDLE;
|
||||
}
|
||||
|
||||
return thread->SetCoreAndAffinityMask(static_cast<s32>(core), affinity_mask);
|
||||
return thread->SetCoreAndAffinityMask(core, affinity_mask);
|
||||
}
|
||||
|
||||
static ResultCode SetThreadCoreMask32(Core::System& system, Handle thread_handle, u32 core,
|
||||
|
|
|
@ -11,11 +11,11 @@
|
|||
|
||||
namespace Kernel {
|
||||
|
||||
static inline u64 Param(const Core::System& system, std::size_t n) {
|
||||
static inline u64 Param(const Core::System& system, int n) {
|
||||
return system.CurrentArmInterface().GetReg(n);
|
||||
}
|
||||
|
||||
static inline u32 Param32(const Core::System& system, std::size_t n) {
|
||||
static inline u32 Param32(const Core::System& system, int n) {
|
||||
return static_cast<u32>(system.CurrentArmInterface().GetReg(n));
|
||||
}
|
||||
|
||||
|
@ -29,7 +29,7 @@ static inline void FuncReturn(Core::System& system, u64 result) {
|
|||
}
|
||||
|
||||
static inline void FuncReturn32(Core::System& system, u32 result) {
|
||||
system.CurrentArmInterface().SetReg(0, static_cast<u64>(result));
|
||||
system.CurrentArmInterface().SetReg(0, (u64)result);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -386,9 +386,8 @@ template <ResultCode func(Core::System&, Handle*, u32, u32, u32, u32, s32)>
|
|||
void SvcWrap32(Core::System& system) {
|
||||
Handle param_1 = 0;
|
||||
|
||||
const u32 retval =
|
||||
func(system, ¶m_1, Param32(system, 0), Param32(system, 1), Param32(system, 2),
|
||||
Param32(system, 3), static_cast<s32>(Param32(system, 4)))
|
||||
const u32 retval = func(system, ¶m_1, Param32(system, 0), Param32(system, 1),
|
||||
Param32(system, 2), Param32(system, 3), Param32(system, 4))
|
||||
.raw;
|
||||
|
||||
system.CurrentArmInterface().SetReg(1, param_1);
|
||||
|
@ -543,8 +542,8 @@ void SvcWrap32(Core::System& system) {
|
|||
template <ResultCode func(Core::System&, u32, u32, s32, u32, Handle*)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
u32 param_1 = 0;
|
||||
const u32 retval = func(system, Param32(system, 0), Param32(system, 1),
|
||||
static_cast<s32>(Param32(system, 2)), Param32(system, 3), ¶m_1)
|
||||
const u32 retval = func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2),
|
||||
Param32(system, 3), ¶m_1)
|
||||
.raw;
|
||||
system.CurrentArmInterface().SetReg(1, param_1);
|
||||
FuncReturn(system, retval);
|
||||
|
|
|
@ -51,7 +51,7 @@ std::pair<ResultCode, Handle> Synchronization::WaitFor(
|
|||
// We found a ready object, acquire it and set the result value
|
||||
SynchronizationObject* object = itr->get();
|
||||
object->Acquire(thread);
|
||||
const auto index = static_cast<u32>(std::distance(sync_objects.begin(), itr));
|
||||
const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr));
|
||||
lock.CancelSleep();
|
||||
return {RESULT_SUCCESS, index};
|
||||
}
|
||||
|
@ -105,7 +105,7 @@ std::pair<ResultCode, Handle> Synchronization::WaitFor(
|
|||
});
|
||||
ASSERT(itr != sync_objects.end());
|
||||
signaling_object->Acquire(thread);
|
||||
const auto index = static_cast<u32>(std::distance(sync_objects.begin(), itr));
|
||||
const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr));
|
||||
return {signaling_result, index};
|
||||
}
|
||||
return {signaling_result, -1};
|
||||
|
|
|
@ -525,7 +525,7 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
|
|||
if (old_affinity_mask != new_affinity_mask) {
|
||||
const s32 old_core = processor_id;
|
||||
if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) {
|
||||
if (ideal_core < 0) {
|
||||
if (static_cast<s32>(ideal_core) < 0) {
|
||||
processor_id = HighestSetCore(affinity_mask, Core::Hardware::NUM_CPU_CORES);
|
||||
} else {
|
||||
processor_id = ideal_core;
|
||||
|
|
|
@ -470,7 +470,7 @@ public:
|
|||
|
||||
bool InvokeHLECallback(std::shared_ptr<Thread> thread);
|
||||
|
||||
s32 GetIdealCore() const {
|
||||
u32 GetIdealCore() const {
|
||||
return ideal_core;
|
||||
}
|
||||
|
||||
|
@ -654,8 +654,8 @@ private:
|
|||
|
||||
Scheduler* scheduler = nullptr;
|
||||
|
||||
s32 ideal_core = -1;
|
||||
u64 affinity_mask = 1;
|
||||
u32 ideal_core{0xFFFFFFFF};
|
||||
u64 affinity_mask{0x1};
|
||||
|
||||
s32 ideal_core_override = -1;
|
||||
u64 affinity_mask_override = 0x1;
|
||||
|
|
|
@ -41,15 +41,12 @@ constexpr char ACC_SAVE_AVATORS_BASE_PATH[] = "/system/save/8000000000000010/su/
|
|||
ProfileManager::ProfileManager() {
|
||||
ParseUserSaveFile();
|
||||
|
||||
if (user_count == 0) {
|
||||
if (user_count == 0)
|
||||
CreateNewUser(UUID::Generate(), "yuzu");
|
||||
}
|
||||
|
||||
auto current = static_cast<size_t>(
|
||||
std::clamp(Settings::values.current_user, 0, static_cast<s32>(MAX_USERS - 1)));
|
||||
if (UserExistsIndex(current)) {
|
||||
auto current = std::clamp<int>(Settings::values.current_user, 0, MAX_USERS - 1);
|
||||
if (UserExistsIndex(current))
|
||||
current = 0;
|
||||
}
|
||||
|
||||
OpenUser(*GetUser(current));
|
||||
}
|
||||
|
@ -192,8 +189,8 @@ std::size_t ProfileManager::GetUserCount() const {
|
|||
/// booting
|
||||
|
||||
std::size_t ProfileManager::GetOpenUserCount() const {
|
||||
return static_cast<std::size_t>(std::count_if(profiles.begin(), profiles.end(),
|
||||
[](const ProfileInfo& p) { return p.is_open; }));
|
||||
return std::count_if(profiles.begin(), profiles.end(),
|
||||
[](const ProfileInfo& p) { return p.is_open; });
|
||||
}
|
||||
|
||||
/// Checks if a user id exists in our profile manager
|
||||
|
|
|
@ -1311,7 +1311,7 @@ void IApplicationFunctions::PopLaunchParameter(Kernel::HLERequestContext& ctx) {
|
|||
params.is_account_selected = 1;
|
||||
|
||||
Account::ProfileManager profile_manager{};
|
||||
const auto uuid = profile_manager.GetUser(static_cast<u32>(Settings::values.current_user));
|
||||
const auto uuid = profile_manager.GetUser(Settings::values.current_user);
|
||||
ASSERT(uuid);
|
||||
params.current_user = uuid->uuid;
|
||||
|
||||
|
|
|
@ -178,23 +178,23 @@ void Controller::Execute() {
|
|||
}
|
||||
|
||||
void Controller::ConfigurationComplete() {
|
||||
ControllerSupportResultInfo result_info{};
|
||||
|
||||
const auto& players = Settings::values.players;
|
||||
|
||||
const s8 player_count =
|
||||
is_single_mode
|
||||
? 1
|
||||
: static_cast<s8>(std::count_if(players.begin(), players.end() - 2,
|
||||
[](const auto& player) { return player.connected; }));
|
||||
|
||||
const auto index = static_cast<u32>(std::distance(
|
||||
players.begin(), std::find_if(players.begin(), players.end(),
|
||||
[](const auto& player) { return player.connected; })));
|
||||
|
||||
// If enable_single_mode is enabled, player_count is 1 regardless of any other parameters.
|
||||
// Otherwise, only count connected players from P1-P8.
|
||||
ControllerSupportResultInfo result_info{};
|
||||
result_info.player_count = player_count;
|
||||
result_info.selected_id = HID::Controller_NPad::IndexToNPad(index);
|
||||
result_info.player_count =
|
||||
is_single_mode ? 1
|
||||
: static_cast<s8>(std::count_if(
|
||||
players.begin(), players.end() - 2,
|
||||
[](Settings::PlayerInput player) { return player.connected; }));
|
||||
|
||||
result_info.selected_id = HID::Controller_NPad::IndexToNPad(
|
||||
std::distance(players.begin(),
|
||||
std::find_if(players.begin(), players.end(),
|
||||
[](Settings::PlayerInput player) { return player.connected; })));
|
||||
|
||||
result_info.result = 0;
|
||||
|
||||
LOG_DEBUG(Service_HID, "Result Info: player_count={}, selected_id={}, result={}",
|
||||
|
|
|
@ -69,8 +69,7 @@ public:
|
|||
buffer_event =
|
||||
Kernel::WritableEvent::CreateEventPair(system.Kernel(), "IAudioOutBufferReleased");
|
||||
|
||||
stream =
|
||||
audio_core.OpenStream(system.CoreTiming(), static_cast<u32>(audio_params.sample_rate),
|
||||
stream = audio_core.OpenStream(system.CoreTiming(), audio_params.sample_rate,
|
||||
audio_params.channel_count, std::move(unique_name),
|
||||
[this] { buffer_event.writable->Signal(); });
|
||||
}
|
||||
|
|
|
@ -50,8 +50,8 @@ public:
|
|||
Enabled,
|
||||
};
|
||||
|
||||
explicit OpusDecoderState(OpusDecoderPtr decoder_, s32 sample_rate_, u32 channel_count_)
|
||||
: decoder{std::move(decoder_)}, sample_rate{sample_rate_}, channel_count{channel_count_} {}
|
||||
explicit OpusDecoderState(OpusDecoderPtr decoder, u32 sample_rate, u32 channel_count)
|
||||
: decoder{std::move(decoder)}, sample_rate{sample_rate}, channel_count{channel_count} {}
|
||||
|
||||
// Decodes interleaved Opus packets. Optionally allows reporting time taken to
|
||||
// perform the decoding, as well as any relevant extra behavior.
|
||||
|
@ -113,16 +113,15 @@ private:
|
|||
return false;
|
||||
}
|
||||
|
||||
const auto* const frame = input.data() + sizeof(OpusPacketHeader);
|
||||
const auto frame = input.data() + sizeof(OpusPacketHeader);
|
||||
const auto decoded_sample_count = opus_packet_get_nb_samples(
|
||||
frame, static_cast<opus_int32>(input.size() - sizeof(OpusPacketHeader)), sample_rate);
|
||||
const auto decoded_size =
|
||||
static_cast<u32>(decoded_sample_count) * channel_count * sizeof(u16);
|
||||
if (decoded_size > raw_output_sz) {
|
||||
frame, static_cast<opus_int32>(input.size() - sizeof(OpusPacketHeader)),
|
||||
static_cast<opus_int32>(sample_rate));
|
||||
if (decoded_sample_count * channel_count * sizeof(u16) > raw_output_sz) {
|
||||
LOG_ERROR(
|
||||
Audio,
|
||||
"Decoded data does not fit into the output data, decoded_sz={}, raw_output_sz={}",
|
||||
decoded_size, raw_output_sz);
|
||||
decoded_sample_count * channel_count * sizeof(u16), raw_output_sz);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -138,11 +137,11 @@ private:
|
|||
}
|
||||
|
||||
const auto end_time = std::chrono::high_resolution_clock::now() - start_time;
|
||||
sample_count = static_cast<u32>(out_sample_count);
|
||||
sample_count = out_sample_count;
|
||||
consumed = static_cast<u32>(sizeof(OpusPacketHeader) + hdr.size);
|
||||
if (out_performance_time != nullptr) {
|
||||
*out_performance_time = static_cast<u64>(
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(end_time).count());
|
||||
*out_performance_time =
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(end_time).count();
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -155,7 +154,7 @@ private:
|
|||
}
|
||||
|
||||
OpusDecoderPtr decoder;
|
||||
s32 sample_rate;
|
||||
u32 sample_rate;
|
||||
u32 channel_count;
|
||||
};
|
||||
|
||||
|
@ -213,7 +212,7 @@ std::size_t WorkerBufferSize(u32 channel_count) {
|
|||
ASSERT_MSG(channel_count == 1 || channel_count == 2, "Invalid channel count");
|
||||
constexpr int num_streams = 1;
|
||||
const int num_stereo_streams = channel_count == 2 ? 1 : 0;
|
||||
return static_cast<size_t>(opus_multistream_decoder_get_size(num_streams, num_stereo_streams));
|
||||
return opus_multistream_decoder_get_size(num_streams, num_stereo_streams);
|
||||
}
|
||||
|
||||
// Creates the mapping table that maps the input channels to the particular
|
||||
|
@ -245,7 +244,7 @@ void HwOpus::GetWorkBufferSize(Kernel::HLERequestContext& ctx) {
|
|||
"Invalid sample rate");
|
||||
ASSERT_MSG(channel_count == 1 || channel_count == 2, "Invalid channel count");
|
||||
|
||||
const auto worker_buffer_sz = static_cast<u32>(WorkerBufferSize(channel_count));
|
||||
const u32 worker_buffer_sz = static_cast<u32>(WorkerBufferSize(channel_count));
|
||||
LOG_DEBUG(Audio, "worker_buffer_sz={}", worker_buffer_sz);
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 3};
|
||||
|
@ -255,7 +254,7 @@ void HwOpus::GetWorkBufferSize(Kernel::HLERequestContext& ctx) {
|
|||
|
||||
void HwOpus::OpenOpusDecoder(Kernel::HLERequestContext& ctx) {
|
||||
IPC::RequestParser rp{ctx};
|
||||
const auto sample_rate = rp.Pop<s32>();
|
||||
const auto sample_rate = rp.Pop<u32>();
|
||||
const auto channel_count = rp.Pop<u32>();
|
||||
const auto buffer_sz = rp.Pop<u32>();
|
||||
|
||||
|
|
|
@ -53,10 +53,10 @@ struct DeliveryCacheProgressImpl {
|
|||
ResultCode result = RESULT_SUCCESS;
|
||||
DirectoryName current_directory;
|
||||
FileName current_file;
|
||||
u64 current_downloaded_bytes; ///< Bytes downloaded on current file.
|
||||
u64 current_total_bytes; ///< Bytes total on current file.
|
||||
u64 total_downloaded_bytes; ///< Bytes downloaded on overall download.
|
||||
u64 total_bytes; ///< Bytes total on overall download.
|
||||
s64 current_downloaded_bytes; ///< Bytes downloaded on current file.
|
||||
s64 current_total_bytes; ///< Bytes total on current file.
|
||||
s64 total_downloaded_bytes; ///< Bytes downloaded on overall download.
|
||||
s64 total_bytes; ///< Bytes total on overall download.
|
||||
INSERT_PADDING_BYTES(
|
||||
0x198); ///< Appears to be unused in official code, possibly reserved for future use.
|
||||
};
|
||||
|
|
|
@ -3,16 +3,7 @@
|
|||
// Refer to the license.txt file included.
|
||||
|
||||
#include <fmt/ostream.h>
|
||||
|
||||
#if defined(__GNUC__)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wsign-conversion"
|
||||
#endif
|
||||
#include <httplib.h>
|
||||
#if defined(__GNUC__)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
||||
#include <mbedtls/sha256.h>
|
||||
#include <nlohmann/json.hpp>
|
||||
#include "common/hex_util.h"
|
||||
|
|
|
@ -454,8 +454,7 @@ private:
|
|||
write_size = std::min<u64>(write_size, files.size());
|
||||
std::vector<DeliveryCacheDirectoryEntry> entries(write_size);
|
||||
std::transform(
|
||||
files.begin(), files.begin() + static_cast<s64>(write_size), entries.begin(),
|
||||
[](const auto& file) {
|
||||
files.begin(), files.begin() + write_size, entries.begin(), [](const auto& file) {
|
||||
FileName name{};
|
||||
std::memcpy(name.data(), file->GetName().data(),
|
||||
std::min(file->GetName().size(), name.size()));
|
||||
|
|
|
@ -94,8 +94,7 @@ private:
|
|||
}
|
||||
|
||||
// Read the data from the Storage backend
|
||||
const auto output = backend->ReadBytes(static_cast<u64>(length), static_cast<u64>(offset));
|
||||
|
||||
std::vector<u8> output = backend->ReadBytes(length, offset);
|
||||
// Write the data to memory
|
||||
ctx.WriteBuffer(output);
|
||||
|
||||
|
@ -152,7 +151,7 @@ private:
|
|||
}
|
||||
|
||||
// Read the data from the Storage backend
|
||||
const auto output = backend->ReadBytes(static_cast<u64>(length), static_cast<u64>(offset));
|
||||
std::vector<u8> output = backend->ReadBytes(length, offset);
|
||||
|
||||
// Write the data to memory
|
||||
ctx.WriteBuffer(output);
|
||||
|
@ -195,8 +194,7 @@ private:
|
|||
// Write the data to the Storage backend
|
||||
const auto write_size =
|
||||
static_cast<std::size_t>(std::distance(data.begin(), data.begin() + length));
|
||||
const std::size_t written =
|
||||
backend->Write(data.data(), write_size, static_cast<u64>(offset));
|
||||
const std::size_t written = backend->Write(data.data(), write_size, offset);
|
||||
|
||||
ASSERT_MSG(static_cast<s64>(written) == length,
|
||||
"Could not write all bytes to file (requested={:016X}, actual={:016X}).", length,
|
||||
|
|
|
@ -23,7 +23,7 @@ void Controller_DebugPad::OnRelease() {}
|
|||
|
||||
void Controller_DebugPad::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data,
|
||||
std::size_t size) {
|
||||
shared_memory.header.timestamp = static_cast<s64>(core_timing.GetCPUTicks());
|
||||
shared_memory.header.timestamp = core_timing.GetCPUTicks();
|
||||
shared_memory.header.total_entry_count = 17;
|
||||
|
||||
if (!IsControllerActivated()) {
|
||||
|
@ -33,11 +33,9 @@ void Controller_DebugPad::OnUpdate(const Core::Timing::CoreTiming& core_timing,
|
|||
}
|
||||
shared_memory.header.entry_count = 16;
|
||||
|
||||
const auto& last_entry =
|
||||
shared_memory.pad_states[static_cast<u64>(shared_memory.header.last_entry_index)];
|
||||
const auto& last_entry = shared_memory.pad_states[shared_memory.header.last_entry_index];
|
||||
shared_memory.header.last_entry_index = (shared_memory.header.last_entry_index + 1) % 17;
|
||||
auto& cur_entry =
|
||||
shared_memory.pad_states[static_cast<u64>(shared_memory.header.last_entry_index)];
|
||||
auto& cur_entry = shared_memory.pad_states[shared_memory.header.last_entry_index];
|
||||
|
||||
cur_entry.sampling_number = last_entry.sampling_number + 1;
|
||||
cur_entry.sampling_number2 = cur_entry.sampling_number;
|
||||
|
|
|
@ -19,7 +19,7 @@ void Controller_Gesture::OnRelease() {}
|
|||
|
||||
void Controller_Gesture::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data,
|
||||
std::size_t size) {
|
||||
shared_memory.header.timestamp = static_cast<s64>(core_timing.GetCPUTicks());
|
||||
shared_memory.header.timestamp = core_timing.GetCPUTicks();
|
||||
shared_memory.header.total_entry_count = 17;
|
||||
|
||||
if (!IsControllerActivated()) {
|
||||
|
@ -29,11 +29,9 @@ void Controller_Gesture::OnUpdate(const Core::Timing::CoreTiming& core_timing, u
|
|||
}
|
||||
shared_memory.header.entry_count = 16;
|
||||
|
||||
const auto& last_entry =
|
||||
shared_memory.gesture_states[static_cast<u64>(shared_memory.header.last_entry_index)];
|
||||
const auto& last_entry = shared_memory.gesture_states[shared_memory.header.last_entry_index];
|
||||
shared_memory.header.last_entry_index = (shared_memory.header.last_entry_index + 1) % 17;
|
||||
auto& cur_entry =
|
||||
shared_memory.gesture_states[static_cast<u64>(shared_memory.header.last_entry_index)];
|
||||
auto& cur_entry = shared_memory.gesture_states[shared_memory.header.last_entry_index];
|
||||
|
||||
cur_entry.sampling_number = last_entry.sampling_number + 1;
|
||||
cur_entry.sampling_number2 = cur_entry.sampling_number;
|
||||
|
|
|
@ -21,7 +21,7 @@ void Controller_Keyboard::OnRelease() {}
|
|||
|
||||
void Controller_Keyboard::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data,
|
||||
std::size_t size) {
|
||||
shared_memory.header.timestamp = static_cast<s64>(core_timing.GetCPUTicks());
|
||||
shared_memory.header.timestamp = core_timing.GetCPUTicks();
|
||||
shared_memory.header.total_entry_count = 17;
|
||||
|
||||
if (!IsControllerActivated()) {
|
||||
|
@ -31,11 +31,9 @@ void Controller_Keyboard::OnUpdate(const Core::Timing::CoreTiming& core_timing,
|
|||
}
|
||||
shared_memory.header.entry_count = 16;
|
||||
|
||||
const auto& last_entry =
|
||||
shared_memory.pad_states[static_cast<u64>(shared_memory.header.last_entry_index)];
|
||||
const auto& last_entry = shared_memory.pad_states[shared_memory.header.last_entry_index];
|
||||
shared_memory.header.last_entry_index = (shared_memory.header.last_entry_index + 1) % 17;
|
||||
auto& cur_entry =
|
||||
shared_memory.pad_states[static_cast<u64>(shared_memory.header.last_entry_index)];
|
||||
auto& cur_entry = shared_memory.pad_states[shared_memory.header.last_entry_index];
|
||||
|
||||
cur_entry.sampling_number = last_entry.sampling_number + 1;
|
||||
cur_entry.sampling_number2 = cur_entry.sampling_number;
|
||||
|
|
|
@ -19,7 +19,7 @@ void Controller_Mouse::OnRelease() {}
|
|||
|
||||
void Controller_Mouse::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data,
|
||||
std::size_t size) {
|
||||
shared_memory.header.timestamp = static_cast<s64>(core_timing.GetCPUTicks());
|
||||
shared_memory.header.timestamp = core_timing.GetCPUTicks();
|
||||
shared_memory.header.total_entry_count = 17;
|
||||
|
||||
if (!IsControllerActivated()) {
|
||||
|
@ -29,11 +29,9 @@ void Controller_Mouse::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8*
|
|||
}
|
||||
shared_memory.header.entry_count = 16;
|
||||
|
||||
auto& last_entry =
|
||||
shared_memory.mouse_states[static_cast<u64>(shared_memory.header.last_entry_index)];
|
||||
auto& last_entry = shared_memory.mouse_states[shared_memory.header.last_entry_index];
|
||||
shared_memory.header.last_entry_index = (shared_memory.header.last_entry_index + 1) % 17;
|
||||
auto& cur_entry =
|
||||
shared_memory.mouse_states[static_cast<u64>(shared_memory.header.last_entry_index)];
|
||||
auto& cur_entry = shared_memory.mouse_states[shared_memory.header.last_entry_index];
|
||||
|
||||
cur_entry.sampling_number = last_entry.sampling_number + 1;
|
||||
cur_entry.sampling_number2 = cur_entry.sampling_number;
|
||||
|
|
|
@ -341,29 +341,26 @@ void Controller_NPad::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8*
|
|||
}
|
||||
for (std::size_t i = 0; i < shared_memory_entries.size(); i++) {
|
||||
auto& npad = shared_memory_entries[i];
|
||||
const std::array controller_npads{
|
||||
&npad.main_controller_states,
|
||||
const std::array<NPadGeneric*, 7> controller_npads{&npad.main_controller_states,
|
||||
&npad.handheld_states,
|
||||
&npad.dual_states,
|
||||
&npad.left_joy_states,
|
||||
&npad.right_joy_states,
|
||||
&npad.pokeball_states,
|
||||
&npad.libnx,
|
||||
};
|
||||
&npad.libnx};
|
||||
|
||||
for (auto* main_controller : controller_npads) {
|
||||
main_controller->common.entry_count = 16;
|
||||
main_controller->common.total_entry_count = 17;
|
||||
|
||||
const auto& last_entry =
|
||||
main_controller->npad[static_cast<u64>(main_controller->common.last_entry_index)];
|
||||
main_controller->npad[main_controller->common.last_entry_index];
|
||||
|
||||
main_controller->common.timestamp = static_cast<s64>(core_timing.GetCPUTicks());
|
||||
main_controller->common.timestamp = core_timing.GetCPUTicks();
|
||||
main_controller->common.last_entry_index =
|
||||
(main_controller->common.last_entry_index + 1) % 17;
|
||||
|
||||
auto& cur_entry =
|
||||
main_controller->npad[static_cast<u64>(main_controller->common.last_entry_index)];
|
||||
auto& cur_entry = main_controller->npad[main_controller->common.last_entry_index];
|
||||
|
||||
cur_entry.timestamp = last_entry.timestamp + 1;
|
||||
cur_entry.timestamp2 = cur_entry.timestamp;
|
||||
|
@ -374,29 +371,22 @@ void Controller_NPad::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8*
|
|||
if (controller_type == NPadControllerType::None || !connected_controllers[i].is_connected) {
|
||||
continue;
|
||||
}
|
||||
const auto npad_index = static_cast<u32>(i);
|
||||
const u32 npad_index = static_cast<u32>(i);
|
||||
|
||||
RequestPadStateUpdate(npad_index);
|
||||
auto& pad_state = npad_pad_states[npad_index];
|
||||
|
||||
auto& main_controller =
|
||||
npad.main_controller_states
|
||||
.npad[static_cast<u64>(npad.main_controller_states.common.last_entry_index)];
|
||||
npad.main_controller_states.npad[npad.main_controller_states.common.last_entry_index];
|
||||
auto& handheld_entry =
|
||||
npad.handheld_states
|
||||
.npad[static_cast<u64>(npad.handheld_states.common.last_entry_index)];
|
||||
auto& dual_entry =
|
||||
npad.dual_states.npad[static_cast<u64>(npad.dual_states.common.last_entry_index)];
|
||||
auto& left_entry =
|
||||
npad.left_joy_states
|
||||
.npad[static_cast<u64>(npad.left_joy_states.common.last_entry_index)];
|
||||
npad.handheld_states.npad[npad.handheld_states.common.last_entry_index];
|
||||
auto& dual_entry = npad.dual_states.npad[npad.dual_states.common.last_entry_index];
|
||||
auto& left_entry = npad.left_joy_states.npad[npad.left_joy_states.common.last_entry_index];
|
||||
auto& right_entry =
|
||||
npad.right_joy_states
|
||||
.npad[static_cast<u64>(npad.right_joy_states.common.last_entry_index)];
|
||||
npad.right_joy_states.npad[npad.right_joy_states.common.last_entry_index];
|
||||
auto& pokeball_entry =
|
||||
npad.pokeball_states
|
||||
.npad[static_cast<u64>(npad.pokeball_states.common.last_entry_index)];
|
||||
auto& libnx_entry = npad.libnx.npad[static_cast<u64>(npad.libnx.common.last_entry_index)];
|
||||
npad.pokeball_states.npad[npad.pokeball_states.common.last_entry_index];
|
||||
auto& libnx_entry = npad.libnx.npad[npad.libnx.common.last_entry_index];
|
||||
|
||||
libnx_entry.connection_status.raw = 0;
|
||||
libnx_entry.connection_status.IsConnected.Assign(1);
|
||||
|
@ -510,14 +500,13 @@ void Controller_NPad::OnMotionUpdate(const Core::Timing::CoreTiming& core_timing
|
|||
sixaxis_sensor->common.total_entry_count = 17;
|
||||
|
||||
const auto& last_entry =
|
||||
sixaxis_sensor->sixaxis[static_cast<u64>(sixaxis_sensor->common.last_entry_index)];
|
||||
sixaxis_sensor->sixaxis[sixaxis_sensor->common.last_entry_index];
|
||||
|
||||
sixaxis_sensor->common.timestamp = static_cast<s64>(core_timing.GetCPUTicks());
|
||||
sixaxis_sensor->common.timestamp = core_timing.GetCPUTicks();
|
||||
sixaxis_sensor->common.last_entry_index =
|
||||
(sixaxis_sensor->common.last_entry_index + 1) % 17;
|
||||
|
||||
auto& cur_entry =
|
||||
sixaxis_sensor->sixaxis[static_cast<u64>(sixaxis_sensor->common.last_entry_index)];
|
||||
auto& cur_entry = sixaxis_sensor->sixaxis[sixaxis_sensor->common.last_entry_index];
|
||||
|
||||
cur_entry.timestamp = last_entry.timestamp + 1;
|
||||
cur_entry.timestamp2 = cur_entry.timestamp;
|
||||
|
@ -540,21 +529,17 @@ void Controller_NPad::OnMotionUpdate(const Core::Timing::CoreTiming& core_timing
|
|||
}
|
||||
|
||||
auto& full_sixaxis_entry =
|
||||
npad.sixaxis_full.sixaxis[static_cast<u64>(npad.sixaxis_full.common.last_entry_index)];
|
||||
npad.sixaxis_full.sixaxis[npad.sixaxis_full.common.last_entry_index];
|
||||
auto& handheld_sixaxis_entry =
|
||||
npad.sixaxis_handheld
|
||||
.sixaxis[static_cast<u64>(npad.sixaxis_handheld.common.last_entry_index)];
|
||||
npad.sixaxis_handheld.sixaxis[npad.sixaxis_handheld.common.last_entry_index];
|
||||
auto& dual_left_sixaxis_entry =
|
||||
npad.sixaxis_dual_left
|
||||
.sixaxis[static_cast<u64>(npad.sixaxis_dual_left.common.last_entry_index)];
|
||||
npad.sixaxis_dual_left.sixaxis[npad.sixaxis_dual_left.common.last_entry_index];
|
||||
auto& dual_right_sixaxis_entry =
|
||||
npad.sixaxis_dual_right
|
||||
.sixaxis[static_cast<u64>(npad.sixaxis_dual_right.common.last_entry_index)];
|
||||
npad.sixaxis_dual_right.sixaxis[npad.sixaxis_dual_right.common.last_entry_index];
|
||||
auto& left_sixaxis_entry =
|
||||
npad.sixaxis_left.sixaxis[static_cast<u64>(npad.sixaxis_left.common.last_entry_index)];
|
||||
npad.sixaxis_left.sixaxis[npad.sixaxis_left.common.last_entry_index];
|
||||
auto& right_sixaxis_entry =
|
||||
npad.sixaxis_right
|
||||
.sixaxis[static_cast<u64>(npad.sixaxis_right.common.last_entry_index)];
|
||||
npad.sixaxis_right.sixaxis[npad.sixaxis_right.common.last_entry_index];
|
||||
|
||||
switch (controller_type) {
|
||||
case NPadControllerType::None:
|
||||
|
|
|
@ -22,12 +22,12 @@ void Controller_Stubbed::OnUpdate(const Core::Timing::CoreTiming& core_timing, u
|
|||
return;
|
||||
}
|
||||
|
||||
const CommonHeader header{
|
||||
.timestamp = static_cast<s64>(core_timing.GetCPUTicks()),
|
||||
.total_entry_count = 17,
|
||||
.last_entry_index = 0,
|
||||
.entry_count = 0,
|
||||
};
|
||||
CommonHeader header{};
|
||||
header.timestamp = core_timing.GetCPUTicks();
|
||||
header.total_entry_count = 17;
|
||||
header.entry_count = 0;
|
||||
header.last_entry_index = 0;
|
||||
|
||||
std::memcpy(data + common_offset, &header, sizeof(CommonHeader));
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ void Controller_Touchscreen::OnRelease() {}
|
|||
|
||||
void Controller_Touchscreen::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data,
|
||||
std::size_t size) {
|
||||
shared_memory.header.timestamp = static_cast<s64>(core_timing.GetCPUTicks());
|
||||
shared_memory.header.timestamp = core_timing.GetCPUTicks();
|
||||
shared_memory.header.total_entry_count = 17;
|
||||
|
||||
if (!IsControllerActivated()) {
|
||||
|
@ -33,12 +33,9 @@ void Controller_Touchscreen::OnUpdate(const Core::Timing::CoreTiming& core_timin
|
|||
shared_memory.header.entry_count = 16;
|
||||
|
||||
const auto& last_entry =
|
||||
shared_memory
|
||||
.shared_memory_entries[static_cast<u64>(shared_memory.header.last_entry_index)];
|
||||
shared_memory.shared_memory_entries[shared_memory.header.last_entry_index];
|
||||
shared_memory.header.last_entry_index = (shared_memory.header.last_entry_index + 1) % 17;
|
||||
auto& cur_entry =
|
||||
shared_memory
|
||||
.shared_memory_entries[static_cast<u64>(shared_memory.header.last_entry_index)];
|
||||
auto& cur_entry = shared_memory.shared_memory_entries[shared_memory.header.last_entry_index];
|
||||
|
||||
cur_entry.sampling_number = last_entry.sampling_number + 1;
|
||||
cur_entry.sampling_number2 = cur_entry.sampling_number;
|
||||
|
|
|
@ -69,6 +69,6 @@ private:
|
|||
TouchScreenSharedMemory shared_memory{};
|
||||
std::unique_ptr<Input::TouchDevice> touch_device;
|
||||
std::unique_ptr<Input::TouchDevice> touch_btn_device;
|
||||
u64_le last_touch{};
|
||||
s64_le last_touch{};
|
||||
};
|
||||
} // namespace Service::HID
|
||||
|
|
|
@ -20,7 +20,7 @@ void Controller_XPad::OnRelease() {}
|
|||
void Controller_XPad::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* data,
|
||||
std::size_t size) {
|
||||
for (auto& xpad_entry : shared_memory.shared_memory_entries) {
|
||||
xpad_entry.header.timestamp = static_cast<s64>(core_timing.GetCPUTicks());
|
||||
xpad_entry.header.timestamp = core_timing.GetCPUTicks();
|
||||
xpad_entry.header.total_entry_count = 17;
|
||||
|
||||
if (!IsControllerActivated()) {
|
||||
|
@ -30,11 +30,9 @@ void Controller_XPad::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8*
|
|||
}
|
||||
xpad_entry.header.entry_count = 16;
|
||||
|
||||
const auto& last_entry =
|
||||
xpad_entry.pad_states[static_cast<u64>(xpad_entry.header.last_entry_index)];
|
||||
const auto& last_entry = xpad_entry.pad_states[xpad_entry.header.last_entry_index];
|
||||
xpad_entry.header.last_entry_index = (xpad_entry.header.last_entry_index + 1) % 17;
|
||||
auto& cur_entry =
|
||||
xpad_entry.pad_states[static_cast<u64>(xpad_entry.header.last_entry_index)];
|
||||
auto& cur_entry = xpad_entry.pad_states[xpad_entry.header.last_entry_index];
|
||||
|
||||
cur_entry.sampling_number = last_entry.sampling_number + 1;
|
||||
cur_entry.sampling_number2 = cur_entry.sampling_number;
|
||||
|
|
|
@ -23,7 +23,7 @@ namespace Service::LDR {
|
|||
|
||||
constexpr ResultCode ERROR_INSUFFICIENT_ADDRESS_SPACE{ErrorModule::RO, 2};
|
||||
|
||||
[[maybe_unused]] constexpr ResultCode ERROR_INVALID_MEMORY_STATE{ErrorModule::Loader, 51};
|
||||
constexpr ResultCode ERROR_INVALID_MEMORY_STATE{ErrorModule::Loader, 51};
|
||||
constexpr ResultCode ERROR_INVALID_NRO{ErrorModule::Loader, 52};
|
||||
constexpr ResultCode ERROR_INVALID_NRR{ErrorModule::Loader, 53};
|
||||
constexpr ResultCode ERROR_MISSING_NRR_HASH{ErrorModule::Loader, 54};
|
||||
|
@ -33,7 +33,7 @@ constexpr ResultCode ERROR_ALREADY_LOADED{ErrorModule::Loader, 57};
|
|||
constexpr ResultCode ERROR_INVALID_ALIGNMENT{ErrorModule::Loader, 81};
|
||||
constexpr ResultCode ERROR_INVALID_SIZE{ErrorModule::Loader, 82};
|
||||
constexpr ResultCode ERROR_INVALID_NRO_ADDRESS{ErrorModule::Loader, 84};
|
||||
[[maybe_unused]] constexpr ResultCode ERROR_INVALID_NRR_ADDRESS{ErrorModule::Loader, 85};
|
||||
constexpr ResultCode ERROR_INVALID_NRR_ADDRESS{ErrorModule::Loader, 85};
|
||||
constexpr ResultCode ERROR_NOT_INITIALIZED{ErrorModule::Loader, 87};
|
||||
|
||||
constexpr std::size_t MAXIMUM_LOADED_RO{0x40};
|
||||
|
|
|
@ -240,10 +240,10 @@ MiiStoreData BuildRandomStoreData(Age age, Gender gender, Race race, const Commo
|
|||
bf.eye_type.Assign(
|
||||
eye_type_info.values[GetRandomValue<std::size_t>(eye_type_info.values_count)]);
|
||||
|
||||
const auto eye_rotate_1{gender != Gender::Male ? 4U : 2U};
|
||||
const auto eye_rotate_2{gender != Gender::Male ? 3U : 4U};
|
||||
const auto eye_rotate_offset{32U - EyeRotateLookup[eye_rotate_1] + eye_rotate_2};
|
||||
const auto eye_rotate{32U - EyeRotateLookup[bf.eye_type]};
|
||||
const auto eye_rotate_1{gender != Gender::Male ? 4 : 2};
|
||||
const auto eye_rotate_2{gender != Gender::Male ? 3 : 4};
|
||||
const auto eye_rotate_offset{32 - EyeRotateLookup[eye_rotate_1] + eye_rotate_2};
|
||||
const auto eye_rotate{32 - EyeRotateLookup[bf.eye_type]};
|
||||
|
||||
bf.eye_color.Assign(
|
||||
EyeColorLookup[eye_color_info
|
||||
|
@ -257,11 +257,11 @@ MiiStoreData BuildRandomStoreData(Age age, Gender gender, Race race, const Commo
|
|||
bf.eyebrow_type.Assign(
|
||||
eyebrow_type_info.values[GetRandomValue<std::size_t>(eyebrow_type_info.values_count)]);
|
||||
|
||||
const auto eyebrow_rotate_1{race == Race::Asian ? 6U : 0U};
|
||||
const auto eyebrow_y{race == Race::Asian ? 9U : 10U};
|
||||
const auto eyebrow_rotate_offset{32U - EyebrowRotateLookup[eyebrow_rotate_1] + 6};
|
||||
const auto eyebrow_rotate_1{race == Race::Asian ? 6 : 0};
|
||||
const auto eyebrow_y{race == Race::Asian ? 9 : 10};
|
||||
const auto eyebrow_rotate_offset{32 - EyebrowRotateLookup[eyebrow_rotate_1] + 6};
|
||||
const auto eyebrow_rotate{
|
||||
32U - EyebrowRotateLookup[static_cast<std::size_t>(bf.eyebrow_type.Value())]};
|
||||
32 - EyebrowRotateLookup[static_cast<std::size_t>(bf.eyebrow_type.Value())]};
|
||||
|
||||
bf.eyebrow_color.Assign(bf.hair_color);
|
||||
bf.eyebrow_scale.Assign(4);
|
||||
|
@ -270,14 +270,14 @@ MiiStoreData BuildRandomStoreData(Age age, Gender gender, Race race, const Commo
|
|||
bf.eyebrow_x.Assign(2);
|
||||
bf.eyebrow_y.Assign(axis_y + eyebrow_y);
|
||||
|
||||
const auto nose_scale{gender == Gender::Female ? 3U : 4U};
|
||||
const auto nose_scale{gender == Gender::Female ? 3 : 4};
|
||||
|
||||
bf.nose_type.Assign(
|
||||
nose_type_info.values[GetRandomValue<std::size_t>(nose_type_info.values_count)]);
|
||||
bf.nose_scale.Assign(nose_scale);
|
||||
bf.nose_y.Assign(axis_y + 9);
|
||||
|
||||
const auto mouth_color{gender == Gender::Female ? GetRandomValue<u32>(4) : 0U};
|
||||
const auto mouth_color{gender == Gender::Female ? GetRandomValue<int>(4) : 0};
|
||||
|
||||
bf.mouth_type.Assign(
|
||||
mouth_type_info.values[GetRandomValue<std::size_t>(mouth_type_info.values_count)]);
|
||||
|
|
|
@ -217,7 +217,7 @@ private:
|
|||
const auto& amiibo = nfp_interface.GetAmiiboBuffer();
|
||||
const TagInfo tag_info{
|
||||
.uuid = amiibo.uuid,
|
||||
.uuid_length = static_cast<u8>(amiibo.uuid.size()),
|
||||
.uuid_length = static_cast<u8>(tag_info.uuid.size()),
|
||||
.padding_1 = {},
|
||||
.protocol = 1, // TODO(ogniK): Figure out actual values
|
||||
.tag_type = 2,
|
||||
|
|
|
@ -368,7 +368,7 @@ ResultVal<u8> IApplicationManagerInterface::GetApplicationDesiredLanguage(
|
|||
|
||||
// Get language code from settings
|
||||
const auto language_code =
|
||||
Set::GetLanguageCodeFromIndex(static_cast<u32>(Settings::values.language_index.GetValue()));
|
||||
Set::GetLanguageCodeFromIndex(Settings::values.language_index.GetValue());
|
||||
|
||||
// Convert to application language, get priority list
|
||||
const auto application_language = ConvertToApplicationLanguage(language_code);
|
||||
|
|
|
@ -50,9 +50,19 @@ constexpr std::array<std::pair<FontArchives, const char*>, 7> SHARED_FONTS{
|
|||
std::make_pair(FontArchives::Extension, "nintendo_ext2_003.bfttf"),
|
||||
};
|
||||
|
||||
constexpr std::array<const char*, 7> SHARED_FONTS_TTF{
|
||||
"FontStandard.ttf",
|
||||
"FontChineseSimplified.ttf",
|
||||
"FontExtendedChineseSimplified.ttf",
|
||||
"FontChineseTraditional.ttf",
|
||||
"FontKorean.ttf",
|
||||
"FontNintendoExtended.ttf",
|
||||
"FontNintendoExtended2.ttf",
|
||||
};
|
||||
|
||||
// The below data is specific to shared font data dumped from Switch on f/w 2.2
|
||||
// Virtual address and offsets/sizes likely will vary by dump
|
||||
[[maybe_unused]] constexpr VAddr SHARED_FONT_MEM_VADDR{0x00000009d3016000ULL};
|
||||
constexpr VAddr SHARED_FONT_MEM_VADDR{0x00000009d3016000ULL};
|
||||
constexpr u32 EXPECTED_RESULT{0x7f9a0218}; // What we expect the decrypted bfttf first 4 bytes to be
|
||||
constexpr u32 EXPECTED_MAGIC{0x36f81a1e}; // What we expect the encrypted bfttf first 4 bytes to be
|
||||
constexpr u64 SHARED_FONT_MEM_SIZE{0x1100000};
|
||||
|
|
|
@ -155,7 +155,7 @@ u32 nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& ou
|
|||
|
||||
const auto object{nvmap_dev->GetObject(params.nvmap_handle)};
|
||||
if (!object) {
|
||||
LOG_ERROR(Service_NVDRV, "invalid nvmap_handle={:X}", params.nvmap_handle);
|
||||
LOG_CRITICAL(Service_NVDRV, "invalid nvmap_handle={:X}", params.nvmap_handle);
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvErrCodes::InvalidInput;
|
||||
}
|
||||
|
@ -167,21 +167,18 @@ u32 nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& ou
|
|||
auto& gpu = system.GPU();
|
||||
|
||||
u64 page_size{params.page_size};
|
||||
if (page_size == 0) {
|
||||
if (!page_size) {
|
||||
page_size = object->align;
|
||||
}
|
||||
|
||||
if ((params.flags & AddressSpaceFlags::Remap) != AddressSpaceFlags::None) {
|
||||
const auto buffer_map = FindBufferMap(static_cast<GPUVAddr>(params.offset));
|
||||
|
||||
if (buffer_map) {
|
||||
const auto cpu_addr{
|
||||
static_cast<VAddr>(buffer_map->CpuAddr() + static_cast<u64>(params.buffer_offset))};
|
||||
if (const auto buffer_map{FindBufferMap(params.offset)}; buffer_map) {
|
||||
const auto cpu_addr{static_cast<VAddr>(buffer_map->CpuAddr() + params.buffer_offset)};
|
||||
const auto gpu_addr{static_cast<GPUVAddr>(params.offset + params.buffer_offset)};
|
||||
|
||||
if (!gpu.MemoryManager().Map(cpu_addr, gpu_addr, params.mapping_size)) {
|
||||
LOG_ERROR(Service_NVDRV,
|
||||
"Remap failed, flags={:X}, nvmap_handle={:X}, buffer_offset={}, "
|
||||
LOG_CRITICAL(Service_NVDRV,
|
||||
"remap failed, flags={:X}, nvmap_handle={:X}, buffer_offset={}, "
|
||||
"mapping_size = {}, offset={}",
|
||||
params.flags, params.nvmap_handle, params.buffer_offset,
|
||||
params.mapping_size, params.offset);
|
||||
|
@ -193,7 +190,7 @@ u32 nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& ou
|
|||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvErrCodes::Success;
|
||||
} else {
|
||||
LOG_ERROR(Service_NVDRV, "Address not mapped. offset={}", params.offset);
|
||||
LOG_CRITICAL(Service_NVDRV, "address not mapped offset={}", params.offset);
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
return NvErrCodes::InvalidInput;
|
||||
|
@ -203,27 +200,25 @@ u32 nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& ou
|
|||
// We can only map objects that have already been assigned a CPU address.
|
||||
ASSERT(object->status == nvmap::Object::Status::Allocated);
|
||||
|
||||
const auto physical_address{object->addr + static_cast<VAddr>(params.buffer_offset)};
|
||||
const auto physical_address{object->addr + params.buffer_offset};
|
||||
u64 size{params.mapping_size};
|
||||
if (size == 0) {
|
||||
if (!size) {
|
||||
size = object->size;
|
||||
}
|
||||
|
||||
const bool is_alloc{(params.flags & AddressSpaceFlags::FixedOffset) == AddressSpaceFlags::None};
|
||||
if (is_alloc) {
|
||||
params.offset =
|
||||
static_cast<s64>(gpu.MemoryManager().MapAllocate(physical_address, size, page_size));
|
||||
params.offset = gpu.MemoryManager().MapAllocate(physical_address, size, page_size);
|
||||
} else {
|
||||
params.offset = static_cast<s64>(
|
||||
gpu.MemoryManager().Map(physical_address, static_cast<GPUVAddr>(params.offset), size));
|
||||
params.offset = gpu.MemoryManager().Map(physical_address, params.offset, size);
|
||||
}
|
||||
|
||||
auto result{NvErrCodes::Success};
|
||||
if (params.offset == 0) {
|
||||
LOG_ERROR(Service_NVDRV, "Failed to map size={}", size);
|
||||
if (!params.offset) {
|
||||
LOG_CRITICAL(Service_NVDRV, "failed to map size={}", size);
|
||||
result = NvErrCodes::InvalidInput;
|
||||
} else {
|
||||
AddBufferMap(static_cast<GPUVAddr>(params.offset), size, physical_address, is_alloc);
|
||||
AddBufferMap(params.offset, size, physical_address, is_alloc);
|
||||
}
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
|
@ -234,13 +229,12 @@ u32 nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& ou
|
|||
IoctlUnmapBuffer params{};
|
||||
std::memcpy(¶ms, input.data(), input.size());
|
||||
|
||||
const auto offset = static_cast<GPUVAddr>(params.offset);
|
||||
LOG_DEBUG(Service_NVDRV, "called, offset=0x{:X}", offset);
|
||||
LOG_DEBUG(Service_NVDRV, "called, offset=0x{:X}", params.offset);
|
||||
|
||||
if (const auto size{RemoveBufferMap(offset)}; size) {
|
||||
system.GPU().MemoryManager().Unmap(offset, *size);
|
||||
if (const auto size{RemoveBufferMap(params.offset)}; size) {
|
||||
system.GPU().MemoryManager().Unmap(params.offset, *size);
|
||||
} else {
|
||||
LOG_ERROR(Service_NVDRV, "invalid offset=0x{:X}", offset);
|
||||
LOG_ERROR(Service_NVDRV, "invalid offset=0x{:X}", params.offset);
|
||||
}
|
||||
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
|
|
|
@ -63,7 +63,8 @@ u32 nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>&
|
|||
return NvResult::BadParameter;
|
||||
}
|
||||
|
||||
const u32 event_id = params.value & 0x00FF;
|
||||
u32 event_id = params.value & 0x00FF;
|
||||
|
||||
if (event_id >= MaxNvEvents) {
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::BadParameter;
|
||||
|
@ -77,17 +78,16 @@ u32 nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>&
|
|||
event.writable->Signal();
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
auto lock = gpu.LockSync();
|
||||
const u32 current_syncpoint_value = gpu.GetSyncpointValue(params.syncpt_id);
|
||||
const s32 diff = static_cast<s32>(current_syncpoint_value - params.threshold);
|
||||
const s32 diff = current_syncpoint_value - params.threshold;
|
||||
if (diff >= 0) {
|
||||
event.writable->Signal();
|
||||
params.value = current_syncpoint_value;
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
return NvResult::Success;
|
||||
}
|
||||
const u32 target_value = current_syncpoint_value - static_cast<u32>(diff);
|
||||
const u32 target_value = current_syncpoint_value - diff;
|
||||
|
||||
if (!is_async) {
|
||||
params.value = 0;
|
||||
|
@ -98,7 +98,7 @@ u32 nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>&
|
|||
return NvResult::Timeout;
|
||||
}
|
||||
|
||||
const EventState status = events_interface.status[event_id];
|
||||
EventState status = events_interface.status[event_id];
|
||||
if (event_id < MaxNvEvents || status == EventState::Free || status == EventState::Registered) {
|
||||
events_interface.SetEventStatus(event_id, EventState::Waiting);
|
||||
events_interface.assigned_syncpt[event_id] = params.syncpt_id;
|
||||
|
@ -114,7 +114,7 @@ u32 nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>&
|
|||
if (!is_async && ctrl.fresh_call) {
|
||||
ctrl.must_delay = true;
|
||||
ctrl.timeout = params.timeout;
|
||||
ctrl.event_id = static_cast<s32>(event_id);
|
||||
ctrl.event_id = event_id;
|
||||
return NvResult::Timeout;
|
||||
}
|
||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||
|
|
|
@ -127,7 +127,7 @@ u32 nvhost_gpu::AllocGPFIFOEx2(const std::vector<u8>& input, std::vector<u8>& ou
|
|||
params.unk3);
|
||||
|
||||
auto& gpu = system.GPU();
|
||||
params.fence_out.id = static_cast<s32>(assigned_syncpoints);
|
||||
params.fence_out.id = assigned_syncpoints;
|
||||
params.fence_out.value = gpu.GetSyncpointValue(assigned_syncpoints);
|
||||
assigned_syncpoints++;
|
||||
std::memcpy(output.data(), ¶ms, output.size());
|
||||
|
@ -166,8 +166,7 @@ u32 nvhost_gpu::SubmitGPFIFO(const std::vector<u8>& input, std::vector<u8>& outp
|
|||
UNIMPLEMENTED_IF(params.flags.add_increment.Value() != 0);
|
||||
|
||||
auto& gpu = system.GPU();
|
||||
const u32 current_syncpoint_value =
|
||||
gpu.GetSyncpointValue(static_cast<u32>(params.fence_out.id));
|
||||
u32 current_syncpoint_value = gpu.GetSyncpointValue(params.fence_out.id);
|
||||
if (params.flags.increment.Value()) {
|
||||
params.fence_out.value += current_syncpoint_value;
|
||||
} else {
|
||||
|
@ -201,8 +200,7 @@ u32 nvhost_gpu::KickoffPB(const std::vector<u8>& input, std::vector<u8>& output,
|
|||
UNIMPLEMENTED_IF(params.flags.add_increment.Value() != 0);
|
||||
|
||||
auto& gpu = system.GPU();
|
||||
const u32 current_syncpoint_value =
|
||||
gpu.GetSyncpointValue(static_cast<u32>(params.fence_out.id));
|
||||
u32 current_syncpoint_value = gpu.GetSyncpointValue(params.fence_out.id);
|
||||
if (params.flags.increment.Value()) {
|
||||
params.fence_out.value += current_syncpoint_value;
|
||||
} else {
|
||||
|
|
|
@ -61,9 +61,9 @@ void NVDRV::IoctlBase(Kernel::HLERequestContext& ctx, IoctlVersion version) {
|
|||
if (ctrl.must_delay) {
|
||||
ctrl.fresh_call = false;
|
||||
ctx.SleepClientThread(
|
||||
"NVServices::DelayedResponse", static_cast<u64>(ctrl.timeout),
|
||||
[=, this](std::shared_ptr<Kernel::Thread>, Kernel::HLERequestContext& ctx_,
|
||||
Kernel::ThreadWakeupReason) {
|
||||
"NVServices::DelayedResponse", ctrl.timeout,
|
||||
[=, this](std::shared_ptr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx_,
|
||||
Kernel::ThreadWakeupReason reason) {
|
||||
IoctlCtrl ctrl2{ctrl};
|
||||
std::vector<u8> tmp_output = output;
|
||||
std::vector<u8> tmp_output2 = output2;
|
||||
|
@ -77,7 +77,7 @@ void NVDRV::IoctlBase(Kernel::HLERequestContext& ctx, IoctlVersion version) {
|
|||
rb.Push(RESULT_SUCCESS);
|
||||
rb.Push(ioctl_result);
|
||||
},
|
||||
nvdrv->GetEventWriteable(static_cast<u32>(ctrl.event_id)));
|
||||
nvdrv->GetEventWriteable(ctrl.event_id));
|
||||
} else {
|
||||
ctx.WriteBuffer(output);
|
||||
if (version == IoctlVersion::Version3) {
|
||||
|
|
|
@ -247,7 +247,7 @@ void NVFlinger::Compose() {
|
|||
guard->unlock();
|
||||
for (u32 fence_id = 0; fence_id < multi_fence.num_fences; fence_id++) {
|
||||
const auto& fence = multi_fence.fences[fence_id];
|
||||
gpu.WaitFence(static_cast<u32>(fence.id), fence.value);
|
||||
gpu.WaitFence(fence.id, fence.value);
|
||||
}
|
||||
guard->lock();
|
||||
|
||||
|
|
|
@ -80,10 +80,10 @@ namespace Service {
|
|||
std::string_view port_name,
|
||||
const u32* cmd_buff) {
|
||||
// Number of params == bits 0-5 + bits 6-11
|
||||
const u32 num_params = (cmd_buff[0] & 0x3F) + ((cmd_buff[0] >> 6) & 0x3F);
|
||||
int num_params = (cmd_buff[0] & 0x3F) + ((cmd_buff[0] >> 6) & 0x3F);
|
||||
|
||||
std::string function_string = fmt::format("function '{}': port={}", name, port_name);
|
||||
for (u32 i = 1; i <= num_params; ++i) {
|
||||
for (int i = 1; i <= num_params; ++i) {
|
||||
function_string += fmt::format(", cmd_buff[{}]=0x{:X}", i, cmd_buff[i]);
|
||||
}
|
||||
return function_string;
|
||||
|
|
|
@ -91,8 +91,7 @@ void GetAvailableLanguageCodesImpl(Kernel::HLERequestContext& ctx, std::size_t m
|
|||
}
|
||||
|
||||
void GetKeyCodeMapImpl(Kernel::HLERequestContext& ctx) {
|
||||
const auto language_code =
|
||||
available_language_codes[static_cast<u32>(Settings::values.language_index.GetValue())];
|
||||
const auto language_code = available_language_codes[Settings::values.language_index.GetValue()];
|
||||
const auto key_code =
|
||||
std::find_if(language_to_layout.cbegin(), language_to_layout.cend(),
|
||||
[=](const auto& element) { return element.first == language_code; });
|
||||
|
@ -168,8 +167,7 @@ void SET::GetLanguageCode(Kernel::HLERequestContext& ctx) {
|
|||
|
||||
IPC::ResponseBuilder rb{ctx, 4};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.PushEnum(
|
||||
available_language_codes[static_cast<u32>(Settings::values.language_index.GetValue())]);
|
||||
rb.PushEnum(available_language_codes[Settings::values.language_index.GetValue()]);
|
||||
}
|
||||
|
||||
void SET::GetRegionCode(Kernel::HLERequestContext& ctx) {
|
||||
|
|
|
@ -437,9 +437,9 @@ std::pair<s32, Errno> BSD::SocketImpl(Domain domain, Type type, Protocol protoco
|
|||
UNIMPLEMENTED_MSG("SOCK_RAW errno management");
|
||||
}
|
||||
|
||||
[[maybe_unused]] const bool unk_flag = (static_cast<u32>(type) & 0x20000000U) != 0;
|
||||
[[maybe_unused]] const bool unk_flag = (static_cast<u32>(type) & 0x20000000) != 0;
|
||||
UNIMPLEMENTED_IF_MSG(unk_flag, "Unknown flag in type");
|
||||
type = static_cast<Type>(static_cast<u32>(type) & ~0x20000000U);
|
||||
type = static_cast<Type>(static_cast<u32>(type) & ~0x20000000);
|
||||
|
||||
const s32 fd = FindFreeFileDescriptorHandle();
|
||||
if (fd < 0) {
|
||||
|
@ -447,7 +447,7 @@ std::pair<s32, Errno> BSD::SocketImpl(Domain domain, Type type, Protocol protoco
|
|||
return {-1, Errno::MFILE};
|
||||
}
|
||||
|
||||
FileDescriptor& descriptor = GetFileDescriptor(fd).emplace();
|
||||
FileDescriptor& descriptor = file_descriptors[fd].emplace();
|
||||
// ENONMEM might be thrown here
|
||||
|
||||
LOG_INFO(Service, "New socket fd={}", fd);
|
||||
|
@ -461,7 +461,7 @@ std::pair<s32, Errno> BSD::SocketImpl(Domain domain, Type type, Protocol protoco
|
|||
|
||||
std::pair<s32, Errno> BSD::PollImpl(std::vector<u8>& write_buffer, std::vector<u8> read_buffer,
|
||||
s32 nfds, s32 timeout) {
|
||||
if (write_buffer.size() < static_cast<size_t>(nfds) * sizeof(PollFD)) {
|
||||
if (write_buffer.size() < nfds * sizeof(PollFD)) {
|
||||
return {-1, Errno::INVAL};
|
||||
}
|
||||
|
||||
|
@ -471,7 +471,7 @@ std::pair<s32, Errno> BSD::PollImpl(std::vector<u8>& write_buffer, std::vector<u
|
|||
}
|
||||
|
||||
const size_t length = std::min(read_buffer.size(), write_buffer.size());
|
||||
std::vector<PollFD> fds(static_cast<size_t>(nfds));
|
||||
std::vector<PollFD> fds(nfds);
|
||||
std::memcpy(fds.data(), read_buffer.data(), length);
|
||||
|
||||
if (timeout >= 0) {
|
||||
|
@ -497,7 +497,7 @@ std::pair<s32, Errno> BSD::PollImpl(std::vector<u8>& write_buffer, std::vector<u
|
|||
return {0, Errno::SUCCESS};
|
||||
}
|
||||
|
||||
const std::optional<FileDescriptor>& descriptor = GetFileDescriptor(pollfd.fd);
|
||||
const std::optional<FileDescriptor>& descriptor = file_descriptors[pollfd.fd];
|
||||
if (!descriptor) {
|
||||
LOG_ERROR(Service, "File descriptor handle={} is not allocated", pollfd.fd);
|
||||
pollfd.revents = POLL_NVAL;
|
||||
|
@ -508,7 +508,7 @@ std::pair<s32, Errno> BSD::PollImpl(std::vector<u8>& write_buffer, std::vector<u
|
|||
std::vector<Network::PollFD> host_pollfds(fds.size());
|
||||
std::transform(fds.begin(), fds.end(), host_pollfds.begin(), [this](PollFD pollfd) {
|
||||
Network::PollFD result;
|
||||
result.socket = GetFileDescriptor(pollfd.fd)->socket.get();
|
||||
result.socket = file_descriptors[pollfd.fd]->socket.get();
|
||||
result.events = TranslatePollEventsToHost(pollfd.events);
|
||||
result.revents = 0;
|
||||
return result;
|
||||
|
@ -536,13 +536,13 @@ std::pair<s32, Errno> BSD::AcceptImpl(s32 fd, std::vector<u8>& write_buffer) {
|
|||
return {-1, Errno::MFILE};
|
||||
}
|
||||
|
||||
FileDescriptor& descriptor = *GetFileDescriptor(fd);
|
||||
FileDescriptor& descriptor = *file_descriptors[fd];
|
||||
auto [result, bsd_errno] = descriptor.socket->Accept();
|
||||
if (bsd_errno != Network::Errno::SUCCESS) {
|
||||
return {-1, Translate(bsd_errno)};
|
||||
}
|
||||
|
||||
FileDescriptor& new_descriptor = GetFileDescriptor(new_fd).emplace();
|
||||
FileDescriptor& new_descriptor = file_descriptors[new_fd].emplace();
|
||||
new_descriptor.socket = std::move(result.socket);
|
||||
new_descriptor.is_connection_based = descriptor.is_connection_based;
|
||||
|
||||
|
@ -561,7 +561,7 @@ Errno BSD::BindImpl(s32 fd, const std::vector<u8>& addr) {
|
|||
SockAddrIn addr_in;
|
||||
std::memcpy(&addr_in, addr.data(), sizeof(addr_in));
|
||||
|
||||
return Translate(GetFileDescriptor(fd)->socket->Bind(Translate(addr_in)));
|
||||
return Translate(file_descriptors[fd]->socket->Bind(Translate(addr_in)));
|
||||
}
|
||||
|
||||
Errno BSD::ConnectImpl(s32 fd, const std::vector<u8>& addr) {
|
||||
|
@ -573,7 +573,7 @@ Errno BSD::ConnectImpl(s32 fd, const std::vector<u8>& addr) {
|
|||
SockAddrIn addr_in;
|
||||
std::memcpy(&addr_in, addr.data(), sizeof(addr_in));
|
||||
|
||||
return Translate(GetFileDescriptor(fd)->socket->Connect(Translate(addr_in)));
|
||||
return Translate(file_descriptors[fd]->socket->Connect(Translate(addr_in)));
|
||||
}
|
||||
|
||||
Errno BSD::GetPeerNameImpl(s32 fd, std::vector<u8>& write_buffer) {
|
||||
|
@ -581,7 +581,7 @@ Errno BSD::GetPeerNameImpl(s32 fd, std::vector<u8>& write_buffer) {
|
|||
return Errno::BADF;
|
||||
}
|
||||
|
||||
const auto [addr_in, bsd_errno] = GetFileDescriptor(fd)->socket->GetPeerName();
|
||||
const auto [addr_in, bsd_errno] = file_descriptors[fd]->socket->GetPeerName();
|
||||
if (bsd_errno != Network::Errno::SUCCESS) {
|
||||
return Translate(bsd_errno);
|
||||
}
|
||||
|
@ -597,7 +597,7 @@ Errno BSD::GetSockNameImpl(s32 fd, std::vector<u8>& write_buffer) {
|
|||
return Errno::BADF;
|
||||
}
|
||||
|
||||
const auto [addr_in, bsd_errno] = GetFileDescriptor(fd)->socket->GetSockName();
|
||||
const auto [addr_in, bsd_errno] = file_descriptors[fd]->socket->GetSockName();
|
||||
if (bsd_errno != Network::Errno::SUCCESS) {
|
||||
return Translate(bsd_errno);
|
||||
}
|
||||
|
@ -612,7 +612,7 @@ Errno BSD::ListenImpl(s32 fd, s32 backlog) {
|
|||
if (!IsFileDescriptorValid(fd)) {
|
||||
return Errno::BADF;
|
||||
}
|
||||
return Translate(GetFileDescriptor(fd)->socket->Listen(backlog));
|
||||
return Translate(file_descriptors[fd]->socket->Listen(backlog));
|
||||
}
|
||||
|
||||
std::pair<s32, Errno> BSD::FcntlImpl(s32 fd, FcntlCmd cmd, s32 arg) {
|
||||
|
@ -620,14 +620,14 @@ std::pair<s32, Errno> BSD::FcntlImpl(s32 fd, FcntlCmd cmd, s32 arg) {
|
|||
return {-1, Errno::BADF};
|
||||
}
|
||||
|
||||
FileDescriptor& descriptor = *GetFileDescriptor(fd);
|
||||
FileDescriptor& descriptor = *file_descriptors[fd];
|
||||
|
||||
switch (cmd) {
|
||||
case FcntlCmd::GETFL:
|
||||
ASSERT(arg == 0);
|
||||
return {descriptor.flags, Errno::SUCCESS};
|
||||
case FcntlCmd::SETFL: {
|
||||
const bool enable = (static_cast<u32>(arg) & FLAG_O_NONBLOCK) != 0;
|
||||
const bool enable = (arg & FLAG_O_NONBLOCK) != 0;
|
||||
const Errno bsd_errno = Translate(descriptor.socket->SetNonBlock(enable));
|
||||
if (bsd_errno != Errno::SUCCESS) {
|
||||
return {-1, bsd_errno};
|
||||
|
@ -648,7 +648,7 @@ Errno BSD::SetSockOptImpl(s32 fd, u32 level, OptName optname, size_t optlen, con
|
|||
return Errno::BADF;
|
||||
}
|
||||
|
||||
Network::Socket* const socket = GetFileDescriptor(fd)->socket.get();
|
||||
Network::Socket* const socket = file_descriptors[fd]->socket.get();
|
||||
|
||||
if (optname == OptName::LINGER) {
|
||||
ASSERT(optlen == sizeof(Linger));
|
||||
|
@ -689,14 +689,14 @@ Errno BSD::ShutdownImpl(s32 fd, s32 how) {
|
|||
return Errno::BADF;
|
||||
}
|
||||
const Network::ShutdownHow host_how = Translate(static_cast<ShutdownHow>(how));
|
||||
return Translate(GetFileDescriptor(fd)->socket->Shutdown(host_how));
|
||||
return Translate(file_descriptors[fd]->socket->Shutdown(host_how));
|
||||
}
|
||||
|
||||
std::pair<s32, Errno> BSD::RecvImpl(s32 fd, u32 flags, std::vector<u8>& message) {
|
||||
if (!IsFileDescriptorValid(fd)) {
|
||||
return {-1, Errno::BADF};
|
||||
}
|
||||
return Translate(GetFileDescriptor(fd)->socket->Recv(flags, message));
|
||||
return Translate(file_descriptors[fd]->socket->Recv(flags, message));
|
||||
}
|
||||
|
||||
std::pair<s32, Errno> BSD::RecvFromImpl(s32 fd, u32 flags, std::vector<u8>& message,
|
||||
|
@ -705,7 +705,7 @@ std::pair<s32, Errno> BSD::RecvFromImpl(s32 fd, u32 flags, std::vector<u8>& mess
|
|||
return {-1, Errno::BADF};
|
||||
}
|
||||
|
||||
FileDescriptor& descriptor = *GetFileDescriptor(fd);
|
||||
FileDescriptor& descriptor = *file_descriptors[fd];
|
||||
|
||||
Network::SockAddrIn addr_in{};
|
||||
Network::SockAddrIn* p_addr_in = nullptr;
|
||||
|
@ -719,7 +719,7 @@ std::pair<s32, Errno> BSD::RecvFromImpl(s32 fd, u32 flags, std::vector<u8>& mess
|
|||
// Apply flags
|
||||
if ((flags & FLAG_MSG_DONTWAIT) != 0) {
|
||||
flags &= ~FLAG_MSG_DONTWAIT;
|
||||
if ((static_cast<u32>(descriptor.flags) & FLAG_O_NONBLOCK) == 0) {
|
||||
if ((descriptor.flags & FLAG_O_NONBLOCK) == 0) {
|
||||
descriptor.socket->SetNonBlock(true);
|
||||
}
|
||||
}
|
||||
|
@ -727,7 +727,7 @@ std::pair<s32, Errno> BSD::RecvFromImpl(s32 fd, u32 flags, std::vector<u8>& mess
|
|||
const auto [ret, bsd_errno] = Translate(descriptor.socket->RecvFrom(flags, message, p_addr_in));
|
||||
|
||||
// Restore original state
|
||||
if ((static_cast<u32>(descriptor.flags) & FLAG_O_NONBLOCK) == 0) {
|
||||
if ((descriptor.flags & FLAG_O_NONBLOCK) == 0) {
|
||||
descriptor.socket->SetNonBlock(false);
|
||||
}
|
||||
|
||||
|
@ -748,7 +748,7 @@ std::pair<s32, Errno> BSD::SendImpl(s32 fd, u32 flags, const std::vector<u8>& me
|
|||
if (!IsFileDescriptorValid(fd)) {
|
||||
return {-1, Errno::BADF};
|
||||
}
|
||||
return Translate(GetFileDescriptor(fd)->socket->Send(message, flags));
|
||||
return Translate(file_descriptors[fd]->socket->Send(message, flags));
|
||||
}
|
||||
|
||||
std::pair<s32, Errno> BSD::SendToImpl(s32 fd, u32 flags, const std::vector<u8>& message,
|
||||
|
@ -767,8 +767,7 @@ std::pair<s32, Errno> BSD::SendToImpl(s32 fd, u32 flags, const std::vector<u8>&
|
|||
p_addr_in = &addr_in;
|
||||
}
|
||||
|
||||
const auto& descriptor = GetFileDescriptor(fd);
|
||||
return Translate(descriptor->socket->SendTo(flags, message, p_addr_in));
|
||||
return Translate(file_descriptors[fd]->socket->SendTo(flags, message, p_addr_in));
|
||||
}
|
||||
|
||||
Errno BSD::CloseImpl(s32 fd) {
|
||||
|
@ -776,21 +775,20 @@ Errno BSD::CloseImpl(s32 fd) {
|
|||
return Errno::BADF;
|
||||
}
|
||||
|
||||
auto& descriptor = GetFileDescriptor(fd);
|
||||
const Errno bsd_errno = Translate(descriptor->socket->Close());
|
||||
const Errno bsd_errno = Translate(file_descriptors[fd]->socket->Close());
|
||||
if (bsd_errno != Errno::SUCCESS) {
|
||||
return bsd_errno;
|
||||
}
|
||||
|
||||
LOG_INFO(Service, "Close socket fd={}", fd);
|
||||
|
||||
descriptor.reset();
|
||||
file_descriptors[fd].reset();
|
||||
return bsd_errno;
|
||||
}
|
||||
|
||||
s32 BSD::FindFreeFileDescriptorHandle() noexcept {
|
||||
for (s32 fd = 0; fd < static_cast<s32>(file_descriptors.size()); ++fd) {
|
||||
if (!GetFileDescriptor(fd)) {
|
||||
if (!file_descriptors[fd]) {
|
||||
return fd;
|
||||
}
|
||||
}
|
||||
|
@ -802,7 +800,7 @@ bool BSD::IsFileDescriptorValid(s32 fd) const noexcept {
|
|||
LOG_ERROR(Service, "Invalid file descriptor handle={}", fd);
|
||||
return false;
|
||||
}
|
||||
if (!GetFileDescriptor(fd)) {
|
||||
if (!file_descriptors[fd]) {
|
||||
LOG_ERROR(Service, "File descriptor handle={} is not allocated", fd);
|
||||
return false;
|
||||
}
|
||||
|
@ -815,12 +813,10 @@ bool BSD::IsBlockingSocket(s32 fd) const noexcept {
|
|||
if (fd > static_cast<s32>(MAX_FD) || fd < 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const auto& descriptor = GetFileDescriptor(fd);
|
||||
if (!descriptor) {
|
||||
if (!file_descriptors[fd]) {
|
||||
return false;
|
||||
}
|
||||
return (static_cast<u32>(descriptor->flags) & FLAG_O_NONBLOCK) != 0;
|
||||
return (file_descriptors[fd]->flags & FLAG_O_NONBLOCK) != 0;
|
||||
}
|
||||
|
||||
void BSD::BuildErrnoResponse(Kernel::HLERequestContext& ctx, Errno bsd_errno) const noexcept {
|
||||
|
@ -831,14 +827,6 @@ void BSD::BuildErrnoResponse(Kernel::HLERequestContext& ctx, Errno bsd_errno) co
|
|||
rb.PushEnum(bsd_errno);
|
||||
}
|
||||
|
||||
std::optional<BSD::FileDescriptor>& BSD::GetFileDescriptor(s32 fd) {
|
||||
return file_descriptors[static_cast<u32>(fd)];
|
||||
}
|
||||
|
||||
const std::optional<BSD::FileDescriptor>& BSD::GetFileDescriptor(s32 fd) const {
|
||||
return file_descriptors[static_cast<u32>(fd)];
|
||||
}
|
||||
|
||||
BSD::BSD(Core::System& system, const char* name)
|
||||
: ServiceFramework(name), worker_pool{system, this} {
|
||||
// clang-format off
|
||||
|
|
|
@ -167,9 +167,6 @@ private:
|
|||
|
||||
void BuildErrnoResponse(Kernel::HLERequestContext& ctx, Errno bsd_errno) const noexcept;
|
||||
|
||||
std::optional<FileDescriptor>& GetFileDescriptor(s32 fd);
|
||||
const std::optional<FileDescriptor>& GetFileDescriptor(s32 fd) const;
|
||||
|
||||
std::array<std::optional<FileDescriptor>, MAX_FD> file_descriptors;
|
||||
|
||||
BlockingWorkerPool<BSD, PollWork, AcceptWork, ConnectWork, RecvWork, RecvFromWork, SendWork,
|
||||
|
|
|
@ -64,7 +64,6 @@ Network::Type Translate(Type type) {
|
|||
return Network::Type::DGRAM;
|
||||
default:
|
||||
UNIMPLEMENTED_MSG("Unimplemented type={}", static_cast<int>(type));
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -117,8 +117,7 @@ static constexpr int GetMonthLength(bool is_leap_year, int month) {
|
|||
constexpr std::array<int, 12> month_lengths{31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
|
||||
constexpr std::array<int, 12> month_lengths_leap{31, 29, 31, 30, 31, 30,
|
||||
31, 31, 30, 31, 30, 31};
|
||||
const auto month_index = static_cast<u32>(month);
|
||||
return is_leap_year ? month_lengths_leap[month_index] : month_lengths[month_index];
|
||||
return is_leap_year ? month_lengths_leap[month] : month_lengths[month];
|
||||
}
|
||||
|
||||
static constexpr bool IsDigit(char value) {
|
||||
|
@ -321,7 +320,7 @@ static bool ParsePosixName(const char* name, TimeZoneRule& rule) {
|
|||
int dest_len{};
|
||||
int dest_offset{};
|
||||
const char* dest_name{name + offset};
|
||||
if (rule.chars.size() < static_cast<std::size_t>(char_count)) {
|
||||
if (rule.chars.size() < std::size_t(char_count)) {
|
||||
return {};
|
||||
}
|
||||
|
||||
|
@ -344,7 +343,7 @@ static bool ParsePosixName(const char* name, TimeZoneRule& rule) {
|
|||
return {};
|
||||
}
|
||||
char_count += dest_len + 1;
|
||||
if (rule.chars.size() < static_cast<std::size_t>(char_count)) {
|
||||
if (rule.chars.size() < std::size_t(char_count)) {
|
||||
return {};
|
||||
}
|
||||
if (name[offset] != '\0' && name[offset] != ',' && name[offset] != ';') {
|
||||
|
@ -387,7 +386,7 @@ static bool ParsePosixName(const char* name, TimeZoneRule& rule) {
|
|||
rule.default_type = 0;
|
||||
|
||||
s64 jan_first{};
|
||||
u32 time_count{};
|
||||
int time_count{};
|
||||
int jan_offset{};
|
||||
int year_beginning{epoch_year};
|
||||
do {
|
||||
|
@ -415,7 +414,7 @@ static bool ParsePosixName(const char* name, TimeZoneRule& rule) {
|
|||
if (is_reversed ||
|
||||
(start_time < end_time &&
|
||||
(end_time - start_time < (year_seconds + (std_offset - dest_offset))))) {
|
||||
if (rule.ats.size() - 2 < time_count) {
|
||||
if (rule.ats.size() - 2 < std::size_t(time_count)) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -439,7 +438,7 @@ static bool ParsePosixName(const char* name, TimeZoneRule& rule) {
|
|||
}
|
||||
jan_offset = 0;
|
||||
}
|
||||
rule.time_count = static_cast<s32>(time_count);
|
||||
rule.time_count = time_count;
|
||||
if (time_count == 0) {
|
||||
rule.type_count = 1;
|
||||
} else if (years_per_repeat < year - year_beginning) {
|
||||
|
@ -452,30 +451,26 @@ static bool ParsePosixName(const char* name, TimeZoneRule& rule) {
|
|||
}
|
||||
|
||||
s64 their_std_offset{};
|
||||
for (u32 index = 0; index < static_cast<u32>(rule.time_count); ++index) {
|
||||
for (int index{}; index < rule.time_count; ++index) {
|
||||
const s8 type{rule.types[index]};
|
||||
const auto& tti = rule.ttis[static_cast<u8>(type)];
|
||||
|
||||
if (tti.is_standard_time_daylight) {
|
||||
their_std_offset = -tti.gmt_offset;
|
||||
if (rule.ttis[type].is_standard_time_daylight) {
|
||||
their_std_offset = -rule.ttis[type].gmt_offset;
|
||||
}
|
||||
}
|
||||
|
||||
s64 their_offset{their_std_offset};
|
||||
for (u32 index = 0; index < static_cast<u32>(rule.time_count); ++index) {
|
||||
for (int index{}; index < rule.time_count; ++index) {
|
||||
const s8 type{rule.types[index]};
|
||||
const auto& tti = rule.ttis[static_cast<u8>(type)];
|
||||
|
||||
rule.types[index] = tti.is_dst ? 1 : 0;
|
||||
if (!tti.is_gmt) {
|
||||
if (!tti.is_standard_time_daylight) {
|
||||
rule.types[index] = rule.ttis[type].is_dst ? 1 : 0;
|
||||
if (!rule.ttis[type].is_gmt) {
|
||||
if (!rule.ttis[type].is_standard_time_daylight) {
|
||||
rule.ats[index] += dest_offset - their_std_offset;
|
||||
} else {
|
||||
rule.ats[index] += std_offset - their_std_offset;
|
||||
}
|
||||
}
|
||||
their_offset = -tti.gmt_offset;
|
||||
if (!tti.is_dst) {
|
||||
their_offset = -rule.ttis[type].gmt_offset;
|
||||
if (!rule.ttis[type].is_dst) {
|
||||
their_std_offset = their_offset;
|
||||
}
|
||||
}
|
||||
|
@ -499,16 +494,16 @@ static bool ParsePosixName(const char* name, TimeZoneRule& rule) {
|
|||
}
|
||||
|
||||
rule.char_count = char_count;
|
||||
for (std::size_t index = 0; index < static_cast<std::size_t>(std_len); ++index) {
|
||||
for (int index{}; index < std_len; ++index) {
|
||||
rule.chars[index] = std_name[index];
|
||||
}
|
||||
|
||||
rule.chars[static_cast<size_t>(std_len++)] = '\0';
|
||||
rule.chars[std_len++] = '\0';
|
||||
if (dest_len != 0) {
|
||||
for (int index = 0; index < dest_len; ++index) {
|
||||
rule.chars[static_cast<std::size_t>(std_len + index)] = dest_name[index];
|
||||
for (int index{}; index < dest_len; ++index) {
|
||||
rule.chars[std_len + index] = dest_name[index];
|
||||
}
|
||||
rule.chars[static_cast<std::size_t>(std_len + dest_len)] = '\0';
|
||||
rule.chars[std_len + dest_len] = '\0';
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -536,33 +531,33 @@ static bool ParseTimeZoneBinary(TimeZoneRule& time_zone_rule, FileSys::VirtualFi
|
|||
|
||||
int time_count{};
|
||||
u64 read_offset = sizeof(TzifHeader);
|
||||
for (size_t index = 0; index < static_cast<size_t>(time_zone_rule.time_count); ++index) {
|
||||
for (int index{}; index < time_zone_rule.time_count; ++index) {
|
||||
s64_be at{};
|
||||
vfs_file->ReadObject<s64_be>(&at, read_offset);
|
||||
time_zone_rule.types[index] = 1;
|
||||
if (time_count != 0 && at <= time_zone_rule.ats[static_cast<size_t>(time_count) - 1]) {
|
||||
if (at < time_zone_rule.ats[static_cast<size_t>(time_count) - 1]) {
|
||||
if (time_count != 0 && at <= time_zone_rule.ats[time_count - 1]) {
|
||||
if (at < time_zone_rule.ats[time_count - 1]) {
|
||||
return {};
|
||||
}
|
||||
time_zone_rule.types[index - 1] = 0;
|
||||
time_count--;
|
||||
}
|
||||
time_zone_rule.ats[static_cast<size_t>(time_count++)] = at;
|
||||
time_zone_rule.ats[time_count++] = at;
|
||||
read_offset += sizeof(s64_be);
|
||||
}
|
||||
time_count = 0;
|
||||
for (size_t index = 0; index < static_cast<size_t>(time_zone_rule.time_count); ++index) {
|
||||
const auto type{static_cast<s8>(*vfs_file->ReadByte(read_offset))};
|
||||
read_offset += sizeof(s8);
|
||||
for (int index{}; index < time_zone_rule.time_count; ++index) {
|
||||
const u8 type{*vfs_file->ReadByte(read_offset)};
|
||||
read_offset += sizeof(u8);
|
||||
if (time_zone_rule.time_count <= type) {
|
||||
return {};
|
||||
}
|
||||
if (time_zone_rule.types[index] != 0) {
|
||||
time_zone_rule.types[static_cast<size_t>(time_count++)] = type;
|
||||
time_zone_rule.types[time_count++] = type;
|
||||
}
|
||||
}
|
||||
time_zone_rule.time_count = time_count;
|
||||
for (size_t index = 0; index < static_cast<size_t>(time_zone_rule.type_count); ++index) {
|
||||
for (int index{}; index < time_zone_rule.type_count; ++index) {
|
||||
TimeTypeInfo& ttis{time_zone_rule.ttis[index]};
|
||||
u32_be gmt_offset{};
|
||||
vfs_file->ReadObject<u32_be>(&gmt_offset, read_offset);
|
||||
|
@ -584,11 +579,10 @@ static bool ParseTimeZoneBinary(TimeZoneRule& time_zone_rule, FileSys::VirtualFi
|
|||
ttis.abbreviation_list_index = abbreviation_list_index;
|
||||
}
|
||||
|
||||
vfs_file->ReadArray(time_zone_rule.chars.data(), static_cast<u32>(time_zone_rule.char_count),
|
||||
read_offset);
|
||||
time_zone_rule.chars[static_cast<u32>(time_zone_rule.char_count)] = '\0';
|
||||
read_offset += static_cast<u64>(time_zone_rule.char_count);
|
||||
for (size_t index = 0; index < static_cast<size_t>(time_zone_rule.type_count); ++index) {
|
||||
vfs_file->ReadArray(time_zone_rule.chars.data(), time_zone_rule.char_count, read_offset);
|
||||
time_zone_rule.chars[time_zone_rule.char_count] = '\0';
|
||||
read_offset += time_zone_rule.char_count;
|
||||
for (int index{}; index < time_zone_rule.type_count; ++index) {
|
||||
if (header.ttis_std_count == 0) {
|
||||
time_zone_rule.ttis[index].is_standard_time_daylight = false;
|
||||
} else {
|
||||
|
@ -601,7 +595,7 @@ static bool ParseTimeZoneBinary(TimeZoneRule& time_zone_rule, FileSys::VirtualFi
|
|||
}
|
||||
}
|
||||
|
||||
for (size_t index = 0; index < static_cast<size_t>(time_zone_rule.type_count); ++index) {
|
||||
for (int index{}; index < time_zone_rule.type_count; ++index) {
|
||||
if (header.ttis_std_count == 0) {
|
||||
time_zone_rule.ttis[index].is_gmt = false;
|
||||
} else {
|
||||
|
@ -625,14 +619,13 @@ static bool ParseTimeZoneBinary(TimeZoneRule& time_zone_rule, FileSys::VirtualFi
|
|||
}
|
||||
|
||||
std::array<char, time_zone_name_max + 1> temp_name{};
|
||||
vfs_file->ReadArray(temp_name.data(), static_cast<size_t>(bytes_read), read_offset);
|
||||
if (bytes_read > 2 && temp_name[0] == '\n' &&
|
||||
temp_name[static_cast<u64>(bytes_read - 1)] == '\n' &&
|
||||
static_cast<std::size_t>(time_zone_rule.type_count) + 2 <= time_zone_rule.ttis.size()) {
|
||||
temp_name[static_cast<u64>(bytes_read - 1)] = '\0';
|
||||
vfs_file->ReadArray(temp_name.data(), bytes_read, read_offset);
|
||||
if (bytes_read > 2 && temp_name[0] == '\n' && temp_name[bytes_read - 1] == '\n' &&
|
||||
std::size_t(time_zone_rule.type_count) + 2 <= time_zone_rule.ttis.size()) {
|
||||
temp_name[bytes_read - 1] = '\0';
|
||||
|
||||
std::array<char, time_zone_name_max> name{};
|
||||
std::memcpy(name.data(), temp_name.data() + 1, static_cast<std::size_t>(bytes_read - 1));
|
||||
std::memcpy(name.data(), temp_name.data() + 1, std::size_t(bytes_read - 1));
|
||||
|
||||
TimeZoneRule temp_rule;
|
||||
if (ParsePosixName(name.data(), temp_rule)) {
|
||||
|
@ -649,24 +642,24 @@ static bool ParseTimeZoneBinary(TimeZoneRule& time_zone_rule, FileSys::VirtualFi
|
|||
s32 default_type{};
|
||||
|
||||
for (default_type = 0; default_type < time_zone_rule.time_count; default_type++) {
|
||||
if (time_zone_rule.types[static_cast<u32>(default_type)] == 0) {
|
||||
if (time_zone_rule.types[default_type] == 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
default_type = default_type < time_zone_rule.time_count ? -1 : 0;
|
||||
if (default_type < 0 && time_zone_rule.time_count > 0 &&
|
||||
time_zone_rule.ttis[static_cast<u8>(time_zone_rule.types[0])].is_dst) {
|
||||
time_zone_rule.ttis[time_zone_rule.types[0]].is_dst) {
|
||||
default_type = time_zone_rule.types[0];
|
||||
while (--default_type >= 0) {
|
||||
if (!time_zone_rule.ttis[static_cast<u32>(default_type)].is_dst) {
|
||||
if (!time_zone_rule.ttis[default_type].is_dst) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (default_type < 0) {
|
||||
default_type = 0;
|
||||
while (time_zone_rule.ttis[static_cast<u32>(default_type)].is_dst) {
|
||||
while (time_zone_rule.ttis[default_type].is_dst) {
|
||||
if (++default_type >= time_zone_rule.type_count) {
|
||||
default_type = 0;
|
||||
break;
|
||||
|
@ -756,12 +749,12 @@ static ResultCode ToCalendarTimeInternal(const TimeZoneRule& rules, s64 time,
|
|||
CalendarTimeInternal& calendar_time,
|
||||
CalendarAdditionalInfo& calendar_additional_info) {
|
||||
if ((rules.go_ahead && time < rules.ats[0]) ||
|
||||
(rules.go_back && time > rules.ats[static_cast<size_t>(rules.time_count - 1)])) {
|
||||
(rules.go_back && time > rules.ats[rules.time_count - 1])) {
|
||||
s64 seconds{};
|
||||
if (time < rules.ats[0]) {
|
||||
seconds = rules.ats[0] - time;
|
||||
} else {
|
||||
seconds = time - rules.ats[static_cast<size_t>(rules.time_count - 1)];
|
||||
seconds = time - rules.ats[rules.time_count - 1];
|
||||
}
|
||||
seconds--;
|
||||
|
||||
|
@ -774,8 +767,7 @@ static ResultCode ToCalendarTimeInternal(const TimeZoneRule& rules, s64 time,
|
|||
} else {
|
||||
new_time -= seconds;
|
||||
}
|
||||
if (new_time < rules.ats[0] &&
|
||||
new_time > rules.ats[static_cast<size_t>(rules.time_count - 1)]) {
|
||||
if (new_time < rules.ats[0] && new_time > rules.ats[rules.time_count - 1]) {
|
||||
return ERROR_TIME_NOT_FOUND;
|
||||
}
|
||||
if (const ResultCode result{
|
||||
|
@ -799,27 +791,25 @@ static ResultCode ToCalendarTimeInternal(const TimeZoneRule& rules, s64 time,
|
|||
s32 low{1};
|
||||
s32 high{rules.time_count};
|
||||
while (low < high) {
|
||||
const s32 mid{(low + high) >> 1};
|
||||
if (time < rules.ats[static_cast<size_t>(mid)]) {
|
||||
s32 mid{(low + high) >> 1};
|
||||
if (time < rules.ats[mid]) {
|
||||
high = mid;
|
||||
} else {
|
||||
low = mid + 1;
|
||||
}
|
||||
}
|
||||
tti_index = rules.types[static_cast<size_t>(low - 1)];
|
||||
tti_index = rules.types[low - 1];
|
||||
}
|
||||
|
||||
if (const ResultCode result{
|
||||
CreateCalendarTime(time, rules.ttis[static_cast<u32>(tti_index)].gmt_offset,
|
||||
if (const ResultCode result{CreateCalendarTime(time, rules.ttis[tti_index].gmt_offset,
|
||||
calendar_time, calendar_additional_info)};
|
||||
result != RESULT_SUCCESS) {
|
||||
return result;
|
||||
}
|
||||
|
||||
const auto& tti = rules.ttis[static_cast<size_t>(tti_index)];
|
||||
calendar_additional_info.is_dst = tti.is_dst;
|
||||
const char* time_zone{&rules.chars[static_cast<size_t>(tti.abbreviation_list_index)]};
|
||||
for (size_t index = 0; time_zone[index] != '\0'; ++index) {
|
||||
calendar_additional_info.is_dst = rules.ttis[tti_index].is_dst;
|
||||
const char* time_zone{&rules.chars[rules.ttis[tti_index].abbreviation_list_index]};
|
||||
for (int index{}; time_zone[index] != '\0'; ++index) {
|
||||
calendar_additional_info.timezone_name[index] = time_zone[index];
|
||||
}
|
||||
return RESULT_SUCCESS;
|
||||
|
|
|
@ -49,12 +49,12 @@ void ITimeZoneService::LoadTimeZoneRule(Kernel::HLERequestContext& ctx) {
|
|||
const auto raw_location_name{rp.PopRaw<std::array<u8, 0x24>>()};
|
||||
|
||||
std::string location_name;
|
||||
for (const auto byte : raw_location_name) {
|
||||
for (const auto& byte : raw_location_name) {
|
||||
// Strip extra bytes
|
||||
if (byte == '\0') {
|
||||
break;
|
||||
}
|
||||
location_name.push_back(static_cast<char>(byte));
|
||||
location_name.push_back(byte);
|
||||
}
|
||||
|
||||
LOG_DEBUG(Service_Time, "called, location_name={}", location_name);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue