Difference between revisions of "Using keyword recognition on the Arduino"
Jump to navigation
Jump to search
Line 293: | Line 293: | ||
</syntaxhighlight> | </syntaxhighlight> | ||
− | [[Category:Arduino]][[Category:Machine Learning | + | [[Category:Arduino]][[Category:Machine Learning]][[Category:Classification Models]] |
Revision as of 10:27, 22 November 2022
/* Edge Impulse Arduino examples
* Copyright (c) 2021 EdgeImpulse Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
// If your target is limited in memory remove this macro to save 10K RAM
#define EIDSP_QUANTIZE_FILTERBANK 0
/**
* Define the number of slices per model window. E.g. a model window of 1000 ms
* with slices per model window set to 4. Results in a slice size of 250 ms.
* For more info: https://docs.edgeimpulse.com/docs/continuous-audio-sampling
*/
#define EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW 3
/* Includes ---------------------------------------------------------------- */
#include <your_library_name_here.h>
#include <PDM.h> // microphone
/** Audio buffers, pointers and selectors */
typedef struct {
signed short* buffers[2];
unsigned char buf_select;
unsigned char buf_ready;
unsigned int buf_count;
unsigned int n_samples;
} inference_t;
static inference_t inference;
static bool record_ready = false;
static signed short* sampleBuffer;
static bool debug_nn = false; // Set this to true to see e.g. features generated from the raw signal
static int print_results = -(EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW);
/**
* @brief Arduino setup function
*/
void setup()
{
// put your setup code here, to run once:
Serial.begin(115200);
pinMode(2, OUTPUT);
pinMode(3, OUTPUT);
Serial.println("yes no Inferencing");
// summary of inferencing settings (from model_metadata.h)
ei_printf("Inferencing settings:\n");
ei_printf("\tInterval: %.2f ms.\n", (float)EI_CLASSIFIER_INTERVAL_MS);
ei_printf("\tFrame size: %d\n", EI_CLASSIFIER_DSP_INPUT_FRAME_SIZE);
ei_printf("\tSample length: %d ms.\n", EI_CLASSIFIER_RAW_SAMPLE_COUNT / 16);
ei_printf("\tNo. of classes: %d\n",
sizeof(ei_classifier_inferencing_categories) / sizeof(ei_classifier_inferencing_categories[0]));
run_classifier_init();
if (microphone_inference_start(EI_CLASSIFIER_SLICE_SIZE) == false) {
ei_printf("ERR: Failed to setup audio sampling\r\n");
return;
}
}
/**
* @brief Arduino main function. Runs the inferencing loop.
*/
void loop()
{
bool m = microphone_inference_record();
if (!m) {
ei_printf("ERR: Failed to record audio...\n");
return;
}
signal_t signal;
signal.total_length = EI_CLASSIFIER_SLICE_SIZE;
signal.get_data = µphone_audio_signal_get_data;
ei_impulse_result_t result = { 0 };
EI_IMPULSE_ERROR r = run_classifier_continuous(&signal, &result, debug_nn);
if (r != EI_IMPULSE_OK) {
ei_printf("ERR: Failed to run classifier (%d)\n", r);
return;
}
// this is the code that prints out the label if the models probability
// is heigher than the given threshold.
if (++print_results >= (EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW)) {
// loop over all the labels
for (size_t ix = 0; ix < EI_CLASSIFIER_LABEL_COUNT; ix++) {
String label = result.classification[ix].label; // the label
float labelBytes[label.length()];
float probability = result.classification[ix].value; // the probability
float threshold = 0.50; // the threshold
if (label == "yes" && probability >= threshold) {
Serial.println("yes");
//inferenceCharacteristic.writeValue(0);
digitalWrite(2, HIGH); //turn on DC motor
digitalWrite(3, LOW); //turn off fan
delay(500); //short pause
}
else if (label == "no" && probability >= threshold) {
Serial.println("no");
//inferenceCharacteristic.writeValue(1);
digitalWrite(3, HIGH); //turn on fan
digitalWrite(2, LOW); //turn off DC motor
delay(500); //short pause
}
else {
digitalWrite(2, LOW);
digitalWrite(3, LOW);
}
}
#if EI_CLASSIFIER_HAS_ANOMALY == 1
ei_printf(" anomaly score: %.3f\n", result.anomaly);
#endif
print_results = 0;
}
}
/**
* @brief Printf function uses vsnprintf and output using Arduino Serial
*
* @param[in] format Variable argument list
*/
void ei_printf(const char* format, ...)
{
static char print_buf[1024] = { 0 };
va_list args;
va_start(args, format);
int r = vsnprintf(print_buf, sizeof(print_buf), format, args);
va_end(args);
if (r > 0) {
Serial.write(print_buf);
}
}
/**
* @brief PDM buffer full callback
* Get data and call audio thread callback
*/
static void pdm_data_ready_inference_callback(void)
{
int bytesAvailable = PDM.available();
// read into the sample buffer
int bytesRead = PDM.read((char*)&sampleBuffer[0], bytesAvailable);
if (record_ready == true) {
for (int i = 0; i < bytesRead >> 1; i++) {
inference.buffers[inference.buf_select][inference.buf_count++] = sampleBuffer[i];
if (inference.buf_count >= inference.n_samples) {
inference.buf_select ^= 1;
inference.buf_count = 0;
inference.buf_ready = 1;
}
}
}
}
/**
* @brief Init inferencing struct and setup/start PDM
*
* @param[in] n_samples The n samples
*
* @return { description_of_the_return_value }
*/
static bool microphone_inference_start(uint32_t n_samples)
{
inference.buffers[0] = (signed short*)malloc(n_samples * sizeof(signed short));
if (inference.buffers[0] == NULL) {
return false;
}
inference.buffers[1] = (signed short*)malloc(n_samples * sizeof(signed short));
if (inference.buffers[1] == NULL) {
free(inference.buffers[0]);
return false;
}
sampleBuffer = (signed short*)malloc((n_samples >> 1) * sizeof(signed short));
if (sampleBuffer == NULL) {
free(inference.buffers[0]);
free(inference.buffers[1]);
return false;
}
inference.buf_select = 0;
inference.buf_count = 0;
inference.n_samples = n_samples;
inference.buf_ready = 0;
// configure the data receive callback
PDM.onReceive(&pdm_data_ready_inference_callback);
PDM.setBufferSize((n_samples >> 1) * sizeof(int16_t));
// initialize PDM with:
// - one channel (mono mode)
// - a 16 kHz sample rate
if (!PDM.begin(1, EI_CLASSIFIER_FREQUENCY)) {
ei_printf("Failed to start PDM!");
}
// set the gain, defaults to 20
PDM.setGain(127);
record_ready = true;
return true;
}
/**
* @brief Wait on new data
*
* @return True when finished
*/
static bool microphone_inference_record(void)
{
bool ret = true;
if (inference.buf_ready == 1) {
ei_printf("Error sample buffer overrun. Decrease the number of slices per model window "
"(EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW)\n");
ret = false;
}
while (inference.buf_ready == 0) {
delay(1);
}
inference.buf_ready = 0;
return ret;
}
/**
* Get raw audio signal data
*/
static int microphone_audio_signal_get_data(size_t offset, size_t length, float* out_ptr)
{
numpy::int16_to_float(&inference.buffers[inference.buf_select ^ 1][offset], out_ptr, length);
return 0;
}
/**
* @brief Stop PDM and release buffers
*/
static void microphone_inference_end(void)
{
PDM.end();
free(inference.buffers[0]);
free(inference.buffers[1]);
free(sampleBuffer);
}
#if !defined(EI_CLASSIFIER_SENSOR) || EI_CLASSIFIER_SENSOR != EI_CLASSIFIER_SENSOR_MICROPHONE
#error "Invalid model for current sensor."
#endif