2016-02-21 14:13:52 +01:00
|
|
|
// Copyright 2016 Citra Emulator Project
|
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
2016-04-25 09:54:57 +02:00
|
|
|
#include <array>
|
2016-04-27 14:53:23 +02:00
|
|
|
#include <memory>
|
|
|
|
|
2016-02-21 14:13:52 +01:00
|
|
|
#include "audio_core/hle/dsp.h"
|
2016-04-27 08:22:39 +02:00
|
|
|
#include "audio_core/hle/mixers.h"
|
2016-02-21 14:13:52 +01:00
|
|
|
#include "audio_core/hle/pipe.h"
|
2016-04-25 09:54:57 +02:00
|
|
|
#include "audio_core/hle/source.h"
|
2016-04-27 14:53:23 +02:00
|
|
|
#include "audio_core/sink.h"
|
2016-05-15 04:04:03 +02:00
|
|
|
#include "audio_core/time_stretch.h"
|
2016-02-21 14:13:52 +01:00
|
|
|
|
|
|
|
namespace DSP {
|
|
|
|
namespace HLE {
|
|
|
|
|
2016-04-27 08:22:39 +02:00
|
|
|
// Region management
|
|
|
|
|
2016-04-25 11:01:37 +02:00
|
|
|
std::array<SharedMemory, 2> g_regions;
|
|
|
|
|
|
|
|
static size_t CurrentRegionIndex() {
|
|
|
|
// The region with the higher frame counter is chosen unless there is wraparound.
|
|
|
|
// This function only returns a 0 or 1.
|
|
|
|
|
|
|
|
if (g_regions[0].frame_counter == 0xFFFFu && g_regions[1].frame_counter != 0xFFFEu) {
|
|
|
|
// Wraparound has occured.
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (g_regions[1].frame_counter == 0xFFFFu && g_regions[0].frame_counter != 0xFFFEu) {
|
|
|
|
// Wraparound has occured.
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (g_regions[0].frame_counter > g_regions[1].frame_counter) ? 0 : 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static SharedMemory& ReadRegion() {
|
|
|
|
return g_regions[CurrentRegionIndex()];
|
|
|
|
}
|
|
|
|
|
|
|
|
static SharedMemory& WriteRegion() {
|
|
|
|
return g_regions[1 - CurrentRegionIndex()];
|
|
|
|
}
|
2016-02-21 14:13:52 +01:00
|
|
|
|
2016-04-27 08:22:39 +02:00
|
|
|
// Audio processing and mixing
|
|
|
|
|
2016-04-25 09:54:57 +02:00
|
|
|
static std::array<Source, num_sources> sources = {
|
|
|
|
Source(0), Source(1), Source(2), Source(3), Source(4), Source(5),
|
|
|
|
Source(6), Source(7), Source(8), Source(9), Source(10), Source(11),
|
|
|
|
Source(12), Source(13), Source(14), Source(15), Source(16), Source(17),
|
|
|
|
Source(18), Source(19), Source(20), Source(21), Source(22), Source(23)
|
|
|
|
};
|
2016-04-27 08:22:39 +02:00
|
|
|
static Mixers mixers;
|
|
|
|
|
|
|
|
static StereoFrame16 GenerateCurrentFrame() {
|
|
|
|
SharedMemory& read = ReadRegion();
|
|
|
|
SharedMemory& write = WriteRegion();
|
|
|
|
|
|
|
|
std::array<QuadFrame32, 3> intermediate_mixes = {};
|
|
|
|
|
|
|
|
// Generate intermediate mixes
|
|
|
|
for (size_t i = 0; i < num_sources; i++) {
|
|
|
|
write.source_statuses.status[i] = sources[i].Tick(read.source_configurations.config[i], read.adpcm_coefficients.coeff[i]);
|
|
|
|
for (size_t mix = 0; mix < 3; mix++) {
|
|
|
|
sources[i].MixInto(intermediate_mixes[mix], mix);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate final mix
|
|
|
|
write.dsp_status = mixers.Tick(read.dsp_configuration, read.intermediate_mix_samples, write.intermediate_mix_samples, intermediate_mixes);
|
|
|
|
|
|
|
|
StereoFrame16 output_frame = mixers.GetOutput();
|
|
|
|
|
|
|
|
// Write current output frame to the shared memory region
|
|
|
|
for (size_t samplei = 0; samplei < output_frame.size(); samplei++) {
|
|
|
|
for (size_t channeli = 0; channeli < output_frame[0].size(); channeli++) {
|
|
|
|
write.final_samples.pcm16[samplei][channeli] = s16_le(output_frame[samplei][channeli]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return output_frame;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Audio output
|
2016-04-25 09:54:57 +02:00
|
|
|
|
2016-08-31 17:56:30 +02:00
|
|
|
static bool perform_time_stretching = true;
|
2016-04-27 14:53:23 +02:00
|
|
|
static std::unique_ptr<AudioCore::Sink> sink;
|
2016-05-15 04:04:03 +02:00
|
|
|
static AudioCore::TimeStretcher time_stretcher;
|
2016-04-27 14:53:23 +02:00
|
|
|
|
2016-08-31 17:56:30 +02:00
|
|
|
static void FlushResidualStretcherAudio() {
|
|
|
|
time_stretcher.Flush();
|
|
|
|
while (true) {
|
|
|
|
std::vector<s16> residual_audio = time_stretcher.Process(sink->SamplesInQueue());
|
|
|
|
if (residual_audio.empty())
|
|
|
|
break;
|
|
|
|
sink->EnqueueSamples(residual_audio.data(), residual_audio.size() / 2);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-15 13:51:04 +02:00
|
|
|
static void OutputCurrentFrame(const StereoFrame16& frame) {
|
2016-08-31 17:56:30 +02:00
|
|
|
if (perform_time_stretching) {
|
|
|
|
time_stretcher.AddSamples(&frame[0][0], frame.size());
|
|
|
|
std::vector<s16> stretched_samples = time_stretcher.Process(sink->SamplesInQueue());
|
|
|
|
sink->EnqueueSamples(stretched_samples.data(), stretched_samples.size() / 2);
|
|
|
|
} else {
|
2016-09-07 16:26:38 +02:00
|
|
|
constexpr size_t maximum_sample_latency = 2048; // about 64 miliseconds
|
2016-08-31 17:56:30 +02:00
|
|
|
if (sink->SamplesInQueue() > maximum_sample_latency) {
|
|
|
|
// This can occur if we're running too fast and samples are starting to back up.
|
|
|
|
// Just drop the samples.
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
sink->EnqueueSamples(&frame[0][0], frame.size());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void EnableStretching(bool enable) {
|
|
|
|
if (perform_time_stretching == enable)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!enable) {
|
|
|
|
FlushResidualStretcherAudio();
|
|
|
|
}
|
|
|
|
perform_time_stretching = enable;
|
2016-05-15 13:51:04 +02:00
|
|
|
}
|
|
|
|
|
2016-04-27 08:22:39 +02:00
|
|
|
// Public Interface
|
|
|
|
|
2016-02-21 14:13:52 +01:00
|
|
|
void Init() {
|
|
|
|
DSP::HLE::ResetPipes();
|
2016-05-15 04:04:03 +02:00
|
|
|
|
2016-04-25 09:54:57 +02:00
|
|
|
for (auto& source : sources) {
|
|
|
|
source.Reset();
|
|
|
|
}
|
2016-05-15 04:04:03 +02:00
|
|
|
|
2016-04-27 08:22:39 +02:00
|
|
|
mixers.Reset();
|
|
|
|
|
2016-05-15 04:04:03 +02:00
|
|
|
time_stretcher.Reset();
|
|
|
|
if (sink) {
|
|
|
|
time_stretcher.SetOutputSampleRate(sink->GetNativeSampleRate());
|
|
|
|
}
|
2016-02-21 14:13:52 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void Shutdown() {
|
2016-08-31 17:56:30 +02:00
|
|
|
if (perform_time_stretching) {
|
|
|
|
FlushResidualStretcherAudio();
|
2016-05-15 04:04:03 +02:00
|
|
|
}
|
2016-02-21 14:13:52 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
bool Tick() {
|
2016-04-27 08:22:39 +02:00
|
|
|
StereoFrame16 current_frame = {};
|
2016-04-25 09:54:57 +02:00
|
|
|
|
2016-04-27 08:22:39 +02:00
|
|
|
// TODO: Check dsp::DSP semaphore (which indicates emulated application has finished writing to shared memory region)
|
|
|
|
current_frame = GenerateCurrentFrame();
|
2016-04-25 09:54:57 +02:00
|
|
|
|
2016-05-15 13:51:04 +02:00
|
|
|
OutputCurrentFrame(current_frame);
|
|
|
|
|
2016-02-21 14:13:52 +01:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-04-27 14:53:23 +02:00
|
|
|
void SetSink(std::unique_ptr<AudioCore::Sink> sink_) {
|
|
|
|
sink = std::move(sink_);
|
2016-05-15 04:04:03 +02:00
|
|
|
time_stretcher.SetOutputSampleRate(sink->GetNativeSampleRate());
|
2016-04-27 14:53:23 +02:00
|
|
|
}
|
|
|
|
|
2016-02-21 14:13:52 +01:00
|
|
|
} // namespace HLE
|
|
|
|
} // namespace DSP
|