using NAudio.Flac; using NAudio.Wave; using System; using System.Collections.Generic; using System.IO; using System.Linq; namespace SongVocalIsolateAutomation.AudioProcessing { internal class WaveLoader { public static WaveData LoadFlac(string path, bool trimZeroPrefix = true) { using (var waveStream = new FlacReader(path)) { var waveFormat = waveStream.WaveFormat; var allSamples = ReadAllSamples(waveStream, waveFormat); allSamples = trimZeroPrefix ? allSamples.SkipWhile(p => p == 0).ToArray() : allSamples; return new WaveData(allSamples, waveFormat.Channels, waveFormat.SampleRate); } } private static float[] ReadAllSamples(WaveStream waveStream, WaveFormat waveFormat) { if (WaveFormatEncoding.Pcm != waveFormat.Encoding) throw new NotSupportedException("PCM only support"); if (16 != waveFormat.BitsPerSample && 24 != waveFormat.BitsPerSample) throw new NotSupportedException("16 or 24 bit per sample only support"); float[] allSamples; { waveStream.Seek(0, SeekOrigin.Begin); var sampleData = new List((int)((waveStream.TotalTime.TotalSeconds + 1) * waveFormat.SampleRate)); if (16 == waveFormat.BitsPerSample) { var buffer = new byte[waveFormat.BlockAlign]; while (true) { var read = waveStream.Read(buffer, 0, buffer.Length); if (buffer.Length != read) break; for (var i = 0; i < waveFormat.BlockAlign; i++) { //little-endian var value = (short)(buffer[i] | (buffer[++i] << 8)); sampleData.Add(value / 32768f); } } } if (24 == waveFormat.BitsPerSample) { var buffer = new byte[waveFormat.BlockAlign]; while (true) { var read = waveStream.Read(buffer, 0, buffer.Length); if (buffer.Length != read) break; for (var i = 0; i < waveFormat.BlockAlign; i++) { //little-endian var value = (int)(buffer[i] | (buffer[++i] << 8) | (buffer[++i] << 24)); sampleData.Add(value / 2147483648f); } } } allSamples = sampleData.ToArray(); } return allSamples; } } }