From 05fced237bfe991ce4328f1f95ec72f916bcb113 Mon Sep 17 00:00:00 2001 From: Fares Schulz Date: Mon, 20 May 2024 16:10:56 +0200 Subject: [PATCH] Updated docu for new anira::InferenceConfig format, new anira version 0.1.0 --- CMakeLists.txt | 2 +- README.md | 3 --- docs/anira-usage.md | 34 +++++++++++++++++++++++----------- 3 files changed, 24 insertions(+), 15 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 4bef544..5a1f4bd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -22,7 +22,7 @@ option(ANIRA_WITH_TFLITE "Build with TensorFlow Lite backend" ON) set (PROJECT_NAME anira) -project (${PROJECT_NAME} VERSION 0.0.8) +project (${PROJECT_NAME} VERSION 0.1.0) # Sets the minimum macOS version, c++20 is only available from macOS 11.0 if (APPLE) diff --git a/README.md b/README.md index 1471e35..d9c96c9 100644 --- a/README.md +++ b/README.md @@ -28,9 +28,6 @@ anira::InferenceConfig myNNConfig( "path/to/your/model.onnx (or *.pt, *.tflite)", // Model path {2048, 1, 150}, // Input shape {2048, 1}, // Output shape - 2048, // Batch size - 150, // Model input size - 1, // Model output size 42.66f // Maximum inference time in ms ); diff --git a/docs/anira-usage.md b/docs/anira-usage.md index 27379ec..389d72d 100644 --- a/docs/anira-usage.md +++ b/docs/anira-usage.md @@ -35,9 +35,6 @@ anira::InferenceConfig hybridNNConfig( {2048, 150, 1}, // Input shape for TensorFlow Lite (required, when -DANIRA_WITH_TFLITE=ON) {2048, 1}, // Output shape for TensorFlow Lite (required, when -DANIRA_WITH_TFLITE=ON) #endif - 2048, // Batch size (required) - 150, // Model input size (required) - 1, // Model output size (required) 42.66f, // Maximum inference time in ms for processing of all batches (required) 0, // Internal model latency in samples for processing of all batches (optional: default = 0) @@ -73,15 +70,30 @@ When your pre- and post-processing requires to access values from the ```anira:: class MyPrePostProcessor : public anira::PrePostProcessor { public: - void preProcess(anira::RingBuffer& input, anira::AudioBufferF& output, [[maybe_unused]] anira::InferenceBackend currentInferenceBackend) override { - for (size_t batch = 0; batch < config.m_batch_size; ++batch) { - size_t baseIdx = batch * config.m_model_input_size; - popSamplesFromBuffer(input, output, config.m_model_output_size, config.m_model_input_size-config.m_model_output_size, baseIdx); + virtual void preProcess(anira::RingBuffer& input, anira::AudioBufferF& output, [[maybe_unused]] anira::InferenceBackend currentInferenceBackend) override { + int64_t num_batches; + int64_t num_input_samples; + int64_t num_output_samples; + if (currentInferenceBackend == anira::LIBTORCH) { + num_batches = config.m_model_input_shape_torch[0]; + num_input_samples = config.m_model_input_shape_torch[2]; + num_output_samples = config.m_model_output_shape_torch[1]; + } else if (currentInferenceBackend == anira::ONNX) { + num_batches = config.m_model_input_shape_onnx[0]; + num_input_samples = config.m_model_input_shape_onnx[2]; + num_output_samples = config.m_model_output_shape_onnx[1]; + } else if (currentInferenceBackend == anira::TFLITE) { + num_batches = config.m_model_input_shape_tflite[0]; + num_input_samples = config.m_model_input_shape_tflite[1]; + num_output_samples = config.m_model_output_shape_tflite[1]; + } else { + throw std::runtime_error("Invalid inference backend"); + } + + for (size_t batch = 0; batch < num_batches; batch++) { + size_t baseIdx = batch * num_input_samples; + popSamplesFromBuffer(input, output, num_output_samples, num_input_samples-num_output_samples, baseIdx); } - }; - - void postProcess(anira::AudioBufferF& input, anira::RingBuffer& output, [[maybe_unused]] anira::InferenceBackend currentInferenceBackend) { - pushSamplesToBuffer(input, output); }; anira::InferenceConfig config = myConfig;