| 1 | /* |
| 2 | Copyright 2018 Google Inc. All Rights Reserved. |
| 3 | |
| 4 | Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | you may not use this file except in compliance with the License. |
| 6 | You may obtain a copy of the License at |
| 7 | |
| 8 | http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | |
| 10 | Unless required by applicable law or agreed to in writing, software |
| 11 | distributed under the License is distributed on an "AS-IS" BASIS, |
| 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | See the License for the specific language governing permissions and |
| 14 | limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "dsp/reflections_processor.h" |
| 18 | |
| 19 | #include <algorithm> |
| 20 | |
| 21 | #include "base/constants_and_types.h" |
| 22 | #include "base/logging.h" |
| 23 | #include "dsp/filter_coefficient_generators.h" |
| 24 | #include "dsp/gain.h" |
| 25 | #include "dsp/shoe_box_room.h" |
| 26 | |
| 27 | namespace vraudio { |
| 28 | |
| 29 | namespace { |
| 30 | |
| 31 | // Maximum allowed delay time for a reflection. Above 2s, the effective output |
| 32 | // level of a reflection will fall below -60dB and thus perceived dynamic |
| 33 | // changes should be negligible. |
| 34 | const size_t kMaxDelayTimeSeconds = 2; |
| 35 | |
| 36 | // Returns the maximum delay time in the given set of reflections. |
| 37 | float FindMaxReflectionDelayTime(const std::vector<Reflection>& reflections) { |
| 38 | float max_delay_time = 0.0f; |
| 39 | for (const auto& reflection : reflections) { |
| 40 | max_delay_time = std::max(a: max_delay_time, b: reflection.delay_time_seconds); |
| 41 | } |
| 42 | return max_delay_time; |
| 43 | } |
| 44 | |
| 45 | } // namespace |
| 46 | |
| 47 | ReflectionsProcessor::ReflectionsProcessor(int sample_rate, |
| 48 | size_t frames_per_buffer) |
| 49 | : sample_rate_(sample_rate), |
| 50 | frames_per_buffer_(frames_per_buffer), |
| 51 | max_delay_samples_(kMaxDelayTimeSeconds * sample_rate_), |
| 52 | low_pass_filter_(0.0f), |
| 53 | temp_mono_buffer_(kNumMonoChannels, frames_per_buffer_), |
| 54 | current_reflection_buffer_(kNumFirstOrderAmbisonicChannels, |
| 55 | frames_per_buffer), |
| 56 | target_reflection_buffer_(kNumFirstOrderAmbisonicChannels, |
| 57 | frames_per_buffer), |
| 58 | target_reflections_(kNumRoomSurfaces), |
| 59 | crossfade_(false), |
| 60 | crossfader_(frames_per_buffer_), |
| 61 | num_frames_to_process_on_empty_input_(0), |
| 62 | delays_(kNumRoomSurfaces), |
| 63 | delay_filter_(max_delay_samples_, frames_per_buffer), |
| 64 | delay_buffer_(kNumRoomSurfaces, frames_per_buffer), |
| 65 | gains_(kNumRoomSurfaces), |
| 66 | gain_processors_(kNumRoomSurfaces) { |
| 67 | DCHECK_GT(sample_rate_, 0); |
| 68 | DCHECK_GT(frames_per_buffer_, 0U); |
| 69 | } |
| 70 | |
| 71 | void ReflectionsProcessor::Update( |
| 72 | const ReflectionProperties& reflection_properties, |
| 73 | const WorldPosition& listener_position) { |
| 74 | |
| 75 | // Initialize the low-pass filter. |
| 76 | const float low_pass_coefficient = ComputeLowPassMonoPoleCoefficient( |
| 77 | cuttoff_frequency: reflection_properties.cutoff_frequency, sample_rate: sample_rate_); |
| 78 | low_pass_filter_.SetCoefficient(low_pass_coefficient); |
| 79 | // Update the target reflections. |
| 80 | WorldPosition relative_listener_position; |
| 81 | GetRelativeDirection( |
| 82 | from_position: WorldPosition(reflection_properties.room_position), |
| 83 | from_rotation: WorldRotation(reflection_properties.room_rotation).conjugate(), |
| 84 | to_position: listener_position, relative_direction: &relative_listener_position); |
| 85 | ComputeReflections(relative_listener_position, |
| 86 | room_dimensions: WorldPosition(reflection_properties.room_dimensions), |
| 87 | reflection_coefficients: reflection_properties.coefficients, reflections: &target_reflections_); |
| 88 | // Additional |frames_per_buffer_| to process needed to compensate the |
| 89 | // crossfade between the current and target reflections. |
| 90 | num_frames_to_process_on_empty_input_ = |
| 91 | frames_per_buffer_ + |
| 92 | static_cast<size_t>(FindMaxReflectionDelayTime(reflections: target_reflections_) * |
| 93 | static_cast<float>(sample_rate_)); |
| 94 | // Reflections have been updated so crossfade is required. |
| 95 | crossfade_ = true; |
| 96 | } |
| 97 | |
| 98 | void ReflectionsProcessor::Process(const AudioBuffer& input, |
| 99 | AudioBuffer* output) { |
| 100 | DCHECK_EQ(input.num_channels(), kNumMonoChannels); |
| 101 | DCHECK_EQ(input.num_frames(), frames_per_buffer_); |
| 102 | DCHECK(output); |
| 103 | DCHECK_GE(output->num_channels(), kNumFirstOrderAmbisonicChannels); |
| 104 | DCHECK_EQ(output->num_frames(), frames_per_buffer_); |
| 105 | // Prefilter mono input. |
| 106 | const AudioBuffer::Channel& input_channel = input[0]; |
| 107 | AudioBuffer::Channel* temp_channel = &temp_mono_buffer_[0]; |
| 108 | const bool filter_success = |
| 109 | low_pass_filter_.Filter(input: input_channel, output: temp_channel); |
| 110 | const AudioBuffer::Channel& low_pass_channel = |
| 111 | filter_success ? *temp_channel : input_channel; |
| 112 | delay_filter_.InsertData(input: low_pass_channel); |
| 113 | // Process reflections. |
| 114 | if (crossfade_) { |
| 115 | ApplyReflections(output: ¤t_reflection_buffer_); |
| 116 | UpdateGainsAndDelays(); |
| 117 | ApplyReflections(output: &target_reflection_buffer_); |
| 118 | crossfader_.ApplyLinearCrossfade(input_fade_in: target_reflection_buffer_, |
| 119 | input_fade_out: current_reflection_buffer_, output); |
| 120 | crossfade_ = false; |
| 121 | } else { |
| 122 | ApplyReflections(output); |
| 123 | } |
| 124 | } |
| 125 | |
| 126 | void ReflectionsProcessor::UpdateGainsAndDelays() { |
| 127 | for (size_t i = 0; i < kNumRoomSurfaces; ++i) { |
| 128 | delays_[i] = |
| 129 | std::min(a: max_delay_samples_, |
| 130 | b: static_cast<size_t>(target_reflections_[i].delay_time_seconds * |
| 131 | static_cast<float>(sample_rate_))); |
| 132 | gains_[i] = target_reflections_[i].magnitude; |
| 133 | } |
| 134 | } |
| 135 | |
| 136 | void ReflectionsProcessor::ApplyReflections(AudioBuffer* output) { |
| 137 | DCHECK(output); |
| 138 | DCHECK_GE(output->num_channels(), kNumFirstOrderAmbisonicChannels); |
| 139 | (*output).Clear(); |
| 140 | for (size_t i = 0; i < kNumRoomSurfaces; ++i) { |
| 141 | auto* delay_channel = &delay_buffer_[i]; |
| 142 | delay_filter_.GetDelayedData(delay_samples: delays_[i], buffer: delay_channel); |
| 143 | const bool zero_gain = IsGainNearZero(gain: gains_[i]) && |
| 144 | IsGainNearZero(gain: gain_processors_[i].GetGain()); |
| 145 | if (!zero_gain) { |
| 146 | gain_processors_[i].ApplyGain(target_gain: gains_[i], input: *delay_channel, output: delay_channel, |
| 147 | accumulate_output: false /* accumulate_output */); |
| 148 | // Applies fast Ambisonic reflections encoding. |
| 149 | (*output)[0] += *delay_channel; |
| 150 | switch (i) { |
| 151 | case 0: /* left wall reflection */ |
| 152 | (*output)[1] += *delay_channel; |
| 153 | break; |
| 154 | case 1: /* right wall reflection */ |
| 155 | (*output)[1] -= *delay_channel; |
| 156 | break; |
| 157 | case 2: /* floor reflection */ |
| 158 | (*output)[2] -= *delay_channel; |
| 159 | break; |
| 160 | case 3: /* ceiling reflection */ |
| 161 | (*output)[2] += *delay_channel; |
| 162 | break; |
| 163 | case 4: /* front wall reflection */ |
| 164 | (*output)[3] += *delay_channel; |
| 165 | break; |
| 166 | case 5: /* back wall reflection */ |
| 167 | (*output)[3] -= *delay_channel; |
| 168 | break; |
| 169 | } |
| 170 | } else { |
| 171 | // Make sure the gain processor is initialized. |
| 172 | gain_processors_[i].Reset(gain: 0.0f); |
| 173 | } |
| 174 | } |
| 175 | } |
| 176 | |
| 177 | } // namespace vraudio |
| 178 | |