/* * This file is part of the MicroPython project, http://micropython.org/ * * The MIT License (MIT) * * Copyright (c) 2021 Mike Teachman * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include #include #include #include #include #include "py/obj.h" #include "py/runtime.h" #include "py/mphal.h" #include "py/misc.h" #include "py/stream.h" #include "py/objstr.h" #include "modmachine.h" #include "hardware/pio.h" #include "hardware/clocks.h" #include "hardware/gpio.h" #include "hardware/dma.h" #include "hardware/irq.h" // The I2S class has 3 modes of operation: // // Mode1: Blocking // - readinto() and write() methods block until the supplied buffer is filled (read) or emptied (write) // - this is the default mode of operation // // Mode2: Non-Blocking // - readinto() and write() methods return immediately // - buffer filling and emptying happens asynchronously to the main MicroPython task // - a callback function is called when the supplied buffer has been filled (read) or emptied (write) // - non-blocking mode is enabled when a callback is set with the irq() method // - the DMA IRQ handler is used to implement the asynchronous background operations // // Mode3: Uasyncio // - implements the stream protocol // - uasyncio mode is enabled when the ioctl() function is called // - the state of the internal ring buffer is used to detect that I2S samples can be read or written // // The samples contained in the app buffer supplied for the readinto() and write() methods have the following convention: // Mono: little endian format // Stereo: little endian format, left channel first // // I2S terms: // "frame": consists of two audio samples (Left audio sample + Right audio sample) // // Misc: // - for Mono configuration: // - readinto method: samples are gathered from the L channel only // - write method: every sample is output to both the L and R channels // - for readinto method the I2S hardware is read using 8-byte frames // (this is standard for almost all I2S hardware, such as MEMS microphones) // - the PIO is used to drive the I2S bus signals // - all sample data transfers use non-blocking DMA // - the DMA controller is configured with 2 DMA channels in chained mode #define MAX_I2S_RP2 (2) // The DMA buffer size was empirically determined. It is a tradeoff between: // 1. memory use (smaller buffer size desirable to reduce memory footprint) // 2. interrupt frequency (larger buffer size desirable to reduce interrupt frequency) #define SIZEOF_DMA_BUFFER_IN_BYTES (256) #define SIZEOF_HALF_DMA_BUFFER_IN_BYTES (SIZEOF_DMA_BUFFER_IN_BYTES / 2) #define I2S_NUM_DMA_CHANNELS (2) // For non-blocking mode, to avoid underflow/overflow, sample data is written/read to/from the ring buffer at a rate faster // than the DMA transfer rate #define NON_BLOCKING_RATE_MULTIPLIER (4) #define SIZEOF_NON_BLOCKING_COPY_IN_BYTES (SIZEOF_HALF_DMA_BUFFER_IN_BYTES * NON_BLOCKING_RATE_MULTIPLIER) #define NUM_I2S_USER_FORMATS (4) #define I2S_RX_FRAME_SIZE_IN_BYTES (8) #define SAMPLES_PER_FRAME (2) #define PIO_INSTRUCTIONS_PER_BIT (2) typedef enum { RX, TX } i2s_mode_t; typedef enum { MONO, STEREO } format_t; typedef enum { BLOCKING, NON_BLOCKING, UASYNCIO } io_mode_t; typedef enum { GP_INPUT = 0, GP_OUTPUT = 1 } gpio_dir_t; typedef struct _ring_buf_t { uint8_t *buffer; size_t head; size_t tail; size_t size; } ring_buf_t; typedef struct _non_blocking_descriptor_t { mp_buffer_info_t appbuf; uint32_t index; bool copy_in_progress; } non_blocking_descriptor_t; typedef struct _machine_i2s_obj_t { mp_obj_base_t base; uint8_t i2s_id; mp_hal_pin_obj_t sck; mp_hal_pin_obj_t ws; mp_hal_pin_obj_t sd; i2s_mode_t mode; int8_t bits; format_t format; int32_t rate; int32_t ibuf; mp_obj_t callback_for_non_blocking; io_mode_t io_mode; PIO pio; uint8_t sm; const pio_program_t *pio_program; uint prog_offset; int dma_channel[I2S_NUM_DMA_CHANNELS]; uint8_t dma_buffer[SIZEOF_DMA_BUFFER_IN_BYTES]; ring_buf_t ring_buffer; uint8_t *ring_buffer_storage; non_blocking_descriptor_t non_blocking_descriptor; } machine_i2s_obj_t; // The frame map is used with the readinto() method to transform the audio sample data coming // from DMA memory (32-bit stereo) to the format specified // in the I2S constructor. e.g. 16-bit mono STATIC const int8_t i2s_frame_map[NUM_I2S_USER_FORMATS][I2S_RX_FRAME_SIZE_IN_BYTES] = { {-1, -1, 0, 1, -1, -1, -1, -1 }, // Mono, 16-bits { 0, 1, 2, 3, -1, -1, -1, -1 }, // Mono, 32-bits {-1, -1, 0, 1, -1, -1, 2, 3 }, // Stereo, 16-bits { 0, 1, 2, 3, 4, 5, 6, 7 }, // Stereo, 32-bits }; STATIC const PIO pio_instances[NUM_PIOS] = {pio0, pio1}; // PIO program for 16-bit write // set(x, 14) .side(0b01) // label('left_channel') // out(pins, 1) .side(0b00) // jmp(x_dec, "left_channel") .side(0b01) // out(pins, 1) .side(0b10) // set(x, 14) .side(0b11) // label('right_channel') // out(pins, 1) .side(0b10) // jmp(x_dec, "right_channel") .side(0b11) // out(pins, 1) .side(0b00) STATIC const uint16_t pio_instructions_write_16[] = {59438, 24577, 2113, 28673, 63534, 28673, 6213, 24577}; STATIC const pio_program_t pio_write_16 = { pio_instructions_write_16, sizeof(pio_instructions_write_16) / sizeof(uint16_t), -1 }; // PIO program for 32-bit write // set(x, 30) .side(0b01) // label('left_channel') // out(pins, 1) .side(0b00) // jmp(x_dec, "left_channel") .side(0b01) // out(pins, 1) .side(0b10) // set(x, 30) .side(0b11) // label('right_channel') // out(pins, 1) .side(0b10) // jmp(x_dec, "right_channel") .side(0b11) // out(pins, 1) .side(0b00) STATIC const uint16_t pio_instructions_write_32[] = {59454, 24577, 2113, 28673, 63550, 28673, 6213, 24577}; STATIC const pio_program_t pio_write_32 = { pio_instructions_write_32, sizeof(pio_instructions_write_32) / sizeof(uint16_t), -1 }; // PIO program for 32-bit read // set(x, 30) .side(0b00) // label('left_channel') // in_(pins, 1) .side(0b01) // jmp(x_dec, "left_channel") .side(0b00) // in_(pins, 1) .side(0b11) // set(x, 30) .side(0b10) // label('right_channel') // in_(pins, 1) .side(0b11) // jmp(x_dec, "right_channel") .side(0b10) // in_(pins, 1) .side(0b01) STATIC const uint16_t pio_instructions_read_32[] = {57406, 18433, 65, 22529, 61502, 22529, 4165, 18433}; STATIC const pio_program_t pio_read_32 = { pio_instructions_read_32, sizeof(pio_instructions_read_32) / sizeof(uint16_t), -1 }; STATIC uint8_t dma_get_bits(i2s_mode_t mode, int8_t bits); STATIC void dma_irq0_handler(void); STATIC void dma_irq1_handler(void); STATIC mp_obj_t machine_i2s_deinit(mp_obj_t self_in); void machine_i2s_init0(void) { for (uint8_t i = 0; i < MAX_I2S_RP2; i++) { MP_STATE_PORT(machine_i2s_obj[i]) = NULL; } } // Ring Buffer // Thread safe when used with these constraints: // - Single Producer, Single Consumer // - Sequential atomic operations // One byte of capacity is used to detect buffer empty/full STATIC void ringbuf_init(ring_buf_t *rbuf, uint8_t *buffer, size_t size) { rbuf->buffer = buffer; rbuf->size = size; rbuf->head = 0; rbuf->tail = 0; } STATIC bool ringbuf_push(ring_buf_t *rbuf, uint8_t data) { size_t next_tail = (rbuf->tail + 1) % rbuf->size; if (next_tail != rbuf->head) { rbuf->buffer[rbuf->tail] = data; rbuf->tail = next_tail; return true; } // full return false; } STATIC bool ringbuf_pop(ring_buf_t *rbuf, uint8_t *data) { if (rbuf->head == rbuf->tail) { // empty return false; } *data = rbuf->buffer[rbuf->head]; rbuf->head = (rbuf->head + 1) % rbuf->size; return true; } STATIC bool ringbuf_is_empty(ring_buf_t *rbuf) { return rbuf->head == rbuf->tail; } STATIC bool ringbuf_is_full(ring_buf_t *rbuf) { return ((rbuf->tail + 1) % rbuf->size) == rbuf->head; } STATIC size_t ringbuf_available_data(ring_buf_t *rbuf) { return (rbuf->tail - rbuf->head + rbuf->size) % rbuf->size; } STATIC size_t ringbuf_available_space(ring_buf_t *rbuf) { return rbuf->size - ringbuf_available_data(rbuf) - 1; } STATIC int8_t get_frame_mapping_index(int8_t bits, format_t format) { if (format == MONO) { if (bits == 16) { return 0; } else { // 32 bits return 1; } } else { // STEREO if (bits == 16) { return 2; } else { // 32 bits return 3; } } } STATIC uint32_t fill_appbuf_from_ringbuf(machine_i2s_obj_t *self, mp_buffer_info_t *appbuf) { // copy audio samples from the ring buffer to the app buffer // loop, copying samples until the app buffer is filled // For uasyncio mode, the loop will make an early exit if the ring buffer becomes empty // Example: // a MicroPython I2S object is configured for 16-bit mono (2 bytes per audio sample). // For every frame coming from the ring buffer (8 bytes), 2 bytes are "cherry picked" and // copied to the supplied app buffer. // Thus, for every 1 byte copied to the app buffer, 4 bytes are read from the ring buffer. // If a 8kB app buffer is supplied, 32kB of audio samples is read from the ring buffer. uint32_t num_bytes_copied_to_appbuf = 0; uint8_t *app_p = (uint8_t *)appbuf->buf; uint8_t appbuf_sample_size_in_bytes = (self->bits == 16? 2 : 4) * (self->format == STEREO ? 2: 1); uint32_t num_bytes_needed_from_ringbuf = appbuf->len * (I2S_RX_FRAME_SIZE_IN_BYTES / appbuf_sample_size_in_bytes); uint8_t discard_byte; while (num_bytes_needed_from_ringbuf) { uint8_t f_index = get_frame_mapping_index(self->bits, self->format); for (uint8_t i = 0; i < I2S_RX_FRAME_SIZE_IN_BYTES; i++) { int8_t r_to_a_mapping = i2s_frame_map[f_index][i]; if (r_to_a_mapping != -1) { if (self->io_mode == BLOCKING) { // poll the ringbuf until a sample becomes available, copy into appbuf using the mapping transform while (ringbuf_pop(&self->ring_buffer, app_p + r_to_a_mapping) == false) { ; } num_bytes_copied_to_appbuf++; } else if (self->io_mode == UASYNCIO) { if (ringbuf_pop(&self->ring_buffer, app_p + r_to_a_mapping) == false) { // ring buffer is empty, exit goto exit; } else { num_bytes_copied_to_appbuf++; } } else { return 0; // should never get here (non-blocking mode does not use this function) } } else { // r_a_mapping == -1 // discard unused byte from ring buffer if (self->io_mode == BLOCKING) { // poll the ringbuf until a sample becomes available while (ringbuf_pop(&self->ring_buffer, &discard_byte) == false) { ; } } else if (self->io_mode == UASYNCIO) { if (ringbuf_pop(&self->ring_buffer, &discard_byte) == false) { // ring buffer is empty, exit goto exit; } } else { return 0; // should never get here (non-blocking mode does not use this function) } } num_bytes_needed_from_ringbuf--; } app_p += appbuf_sample_size_in_bytes; } exit: return num_bytes_copied_to_appbuf; } // function is used in IRQ context STATIC void fill_appbuf_from_ringbuf_non_blocking(machine_i2s_obj_t *self) { // attempt to copy a block of audio samples from the ring buffer to the supplied app buffer. // audio samples will be formatted as part of the copy operation uint32_t num_bytes_copied_to_appbuf = 0; uint8_t *app_p = &(((uint8_t *)self->non_blocking_descriptor.appbuf.buf)[self->non_blocking_descriptor.index]); uint8_t appbuf_sample_size_in_bytes = (self->bits == 16? 2 : 4) * (self->format == STEREO ? 2: 1); uint32_t num_bytes_remaining_to_copy_to_appbuf = self->non_blocking_descriptor.appbuf.len - self->non_blocking_descriptor.index; uint32_t num_bytes_remaining_to_copy_from_ring_buffer = num_bytes_remaining_to_copy_to_appbuf * (I2S_RX_FRAME_SIZE_IN_BYTES / appbuf_sample_size_in_bytes); uint32_t num_bytes_needed_from_ringbuf = MIN(SIZEOF_NON_BLOCKING_COPY_IN_BYTES, num_bytes_remaining_to_copy_from_ring_buffer); uint8_t discard_byte; if (ringbuf_available_data(&self->ring_buffer) >= num_bytes_needed_from_ringbuf) { while (num_bytes_needed_from_ringbuf) { uint8_t f_index = get_frame_mapping_index(self->bits, self->format); for (uint8_t i = 0; i < I2S_RX_FRAME_SIZE_IN_BYTES; i++) { int8_t r_to_a_mapping = i2s_frame_map[f_index][i]; if (r_to_a_mapping != -1) { ringbuf_pop(&self->ring_buffer, app_p + r_to_a_mapping); num_bytes_copied_to_appbuf++; } else { // r_a_mapping == -1 // discard unused byte from ring buffer ringbuf_pop(&self->ring_buffer, &discard_byte); } num_bytes_needed_from_ringbuf--; } app_p += appbuf_sample_size_in_bytes; } self->non_blocking_descriptor.index += num_bytes_copied_to_appbuf; if (self->non_blocking_descriptor.index >= self->non_blocking_descriptor.appbuf.len) { self->non_blocking_descriptor.copy_in_progress = false; mp_sched_schedule(self->callback_for_non_blocking, MP_OBJ_FROM_PTR(self)); } } } STATIC uint32_t copy_appbuf_to_ringbuf(machine_i2s_obj_t *self, mp_buffer_info_t *appbuf) { // copy audio samples from the app buffer to the ring buffer // loop, reading samples until the app buffer is emptied // for uasyncio mode, the loop will make an early exit if the ring buffer becomes full uint32_t a_index = 0; while (a_index < appbuf->len) { if (self->io_mode == BLOCKING) { // copy a byte to the ringbuf when space becomes available while (ringbuf_push(&self->ring_buffer, ((uint8_t *)appbuf->buf)[a_index]) == false) { ; } a_index++; } else if (self->io_mode == UASYNCIO) { if (ringbuf_push(&self->ring_buffer, ((uint8_t *)appbuf->buf)[a_index]) == false) { // ring buffer is full, exit break; } else { a_index++; } } else { return 0; // should never get here (non-blocking mode does not use this function) } } return a_index; } // function is used in IRQ context STATIC void copy_appbuf_to_ringbuf_non_blocking(machine_i2s_obj_t *self) { // copy audio samples from app buffer into ring buffer uint32_t num_bytes_remaining_to_copy = self->non_blocking_descriptor.appbuf.len - self->non_blocking_descriptor.index; uint32_t num_bytes_to_copy = MIN(SIZEOF_NON_BLOCKING_COPY_IN_BYTES, num_bytes_remaining_to_copy); if (ringbuf_available_space(&self->ring_buffer) >= num_bytes_to_copy) { for (uint32_t i = 0; i < num_bytes_to_copy; i++) { ringbuf_push(&self->ring_buffer, ((uint8_t *)self->non_blocking_descriptor.appbuf.buf)[self->non_blocking_descriptor.index + i]); } self->non_blocking_descriptor.index += num_bytes_to_copy; if (self->non_blocking_descriptor.index >= self->non_blocking_descriptor.appbuf.len) { self->non_blocking_descriptor.copy_in_progress = false; mp_sched_schedule(self->callback_for_non_blocking, MP_OBJ_FROM_PTR(self)); } } } // function is used in IRQ context STATIC void empty_dma(machine_i2s_obj_t *self, uint8_t *dma_buffer_p) { // when space exists, copy samples into ring buffer if (ringbuf_available_space(&self->ring_buffer) >= SIZEOF_HALF_DMA_BUFFER_IN_BYTES) { for (uint32_t i = 0; i < SIZEOF_HALF_DMA_BUFFER_IN_BYTES; i++) { ringbuf_push(&self->ring_buffer, dma_buffer_p[i]); } } } // function is used in IRQ context STATIC void feed_dma(machine_i2s_obj_t *self, uint8_t *dma_buffer_p) { // when data exists, copy samples from ring buffer if (ringbuf_available_data(&self->ring_buffer) >= SIZEOF_HALF_DMA_BUFFER_IN_BYTES) { // copy a block of samples from the ring buffer to the dma buffer. // STM32 HAL API has a stereo I2S implementation, but not mono // mono format is implemented by duplicating each sample into both L and R channels. if ((self->format == MONO) && (self->bits == 16)) { for (uint32_t i = 0; i < SIZEOF_HALF_DMA_BUFFER_IN_BYTES / 4; i++) { for (uint8_t b = 0; b < sizeof(uint16_t); b++) { ringbuf_pop(&self->ring_buffer, &dma_buffer_p[i * 4 + b]); dma_buffer_p[i * 4 + b + 2] = dma_buffer_p[i * 4 + b]; // duplicated mono sample } } } else if ((self->format == MONO) && (self->bits == 32)) { for (uint32_t i = 0; i < SIZEOF_HALF_DMA_BUFFER_IN_BYTES / 8; i++) { for (uint8_t b = 0; b < sizeof(uint32_t); b++) { ringbuf_pop(&self->ring_buffer, &dma_buffer_p[i * 8 + b]); dma_buffer_p[i * 8 + b + 4] = dma_buffer_p[i * 8 + b]; // duplicated mono sample } } } else { // STEREO, both 16-bit and 32-bit for (uint32_t i = 0; i < SIZEOF_HALF_DMA_BUFFER_IN_BYTES; i++) { ringbuf_pop(&self->ring_buffer, &dma_buffer_p[i]); } } } else { // underflow. clear buffer to transmit "silence" on the I2S bus memset(dma_buffer_p, 0, SIZEOF_HALF_DMA_BUFFER_IN_BYTES); } } STATIC void irq_configure(machine_i2s_obj_t *self) { if (self->i2s_id == 0) { irq_set_exclusive_handler(DMA_IRQ_0, dma_irq0_handler); irq_set_enabled(DMA_IRQ_0, true); } else { irq_set_exclusive_handler(DMA_IRQ_1, dma_irq1_handler); irq_set_enabled(DMA_IRQ_1, true); } } STATIC void irq_deinit(machine_i2s_obj_t *self) { if (self->i2s_id == 0) { irq_set_enabled(DMA_IRQ_0, false); irq_remove_handler(DMA_IRQ_0, dma_irq0_handler); } else { irq_set_enabled(DMA_IRQ_1, false); irq_remove_handler(DMA_IRQ_1, dma_irq1_handler); } } STATIC void pio_configure(machine_i2s_obj_t *self) { if (self->mode == TX) { if (self->bits == 16) { self->pio_program = &pio_write_16; } else { self->pio_program = &pio_write_32; } } else { // RX self->pio_program = &pio_read_32; } // find a PIO with a free state machine and adequate program space PIO candidate_pio; bool is_free_sm; bool can_add_program; for (uint8_t p = 0; p < NUM_PIOS; p++) { candidate_pio = pio_instances[p]; is_free_sm = false; can_add_program = false; for (uint8_t sm = 0; sm < NUM_PIO_STATE_MACHINES; sm++) { if (!pio_sm_is_claimed(candidate_pio, sm)) { is_free_sm = true; break; } } if (pio_can_add_program(candidate_pio, self->pio_program)) { can_add_program = true; } if (is_free_sm && can_add_program) { break; } } if (!is_free_sm) { mp_raise_msg(&mp_type_OSError, MP_ERROR_TEXT("no free state machines")); } if (!can_add_program) { mp_raise_msg(&mp_type_OSError, MP_ERROR_TEXT("not enough PIO program space")); } self->pio = candidate_pio; self->sm = pio_claim_unused_sm(self->pio, false); self->prog_offset = pio_add_program(self->pio, self->pio_program); pio_sm_init(self->pio, self->sm, self->prog_offset, NULL); pio_sm_config config = pio_get_default_sm_config(); float pio_freq = self->rate * SAMPLES_PER_FRAME * dma_get_bits(self->mode, self->bits) * PIO_INSTRUCTIONS_PER_BIT; float clkdiv = clock_get_hz(clk_sys) / pio_freq; sm_config_set_clkdiv(&config, clkdiv); if (self->mode == TX) { sm_config_set_out_pins(&config, self->sd, 1); sm_config_set_out_shift(&config, false, true, dma_get_bits(self->mode, self->bits)); sm_config_set_fifo_join(&config, PIO_FIFO_JOIN_TX); // double TX FIFO size } else { // RX sm_config_set_in_pins(&config, self->sd); sm_config_set_in_shift(&config, false, true, dma_get_bits(self->mode, self->bits)); sm_config_set_fifo_join(&config, PIO_FIFO_JOIN_RX); // double RX FIFO size } sm_config_set_sideset(&config, 2, false, false); sm_config_set_sideset_pins(&config, self->sck); sm_config_set_wrap(&config, self->prog_offset, self->prog_offset + self->pio_program->length - 1); pio_sm_set_config(self->pio, self->sm, &config); } STATIC void pio_deinit(machine_i2s_obj_t *self) { if (self->pio) { pio_sm_set_enabled(self->pio, self->sm, false); pio_sm_unclaim(self->pio, self->sm); pio_remove_program(self->pio, self->pio_program, self->prog_offset); } } STATIC void gpio_init_i2s(PIO pio, uint8_t sm, mp_hal_pin_obj_t pin_num, uint8_t pin_val, gpio_dir_t pin_dir) { uint32_t pinmask = 1 << pin_num; pio_sm_set_pins_with_mask(pio, sm, pin_val << pin_num, pinmask); pio_sm_set_pindirs_with_mask(pio, sm, pin_dir << pin_num, pinmask); pio_gpio_init(pio, pin_num); } STATIC void gpio_configure(machine_i2s_obj_t *self) { gpio_init_i2s(self->pio, self->sm, self->sck, 0, GP_OUTPUT); gpio_init_i2s(self->pio, self->sm, self->ws, 0, GP_OUTPUT); if (self->mode == TX) { gpio_init_i2s(self->pio, self->sm, self->sd, 0, GP_OUTPUT); } else { // RX gpio_init_i2s(self->pio, self->sm, self->sd, 0, GP_INPUT); } } STATIC uint8_t dma_get_bits(i2s_mode_t mode, int8_t bits) { if (mode == TX) { return bits; } else { // RX // always read 32 bit words for I2S e.g. I2S MEMS microphones return 32; } } // determine which DMA channel is associated to this IRQ STATIC uint dma_map_irq_to_channel(uint irq_index) { for (uint ch = 0; ch < NUM_DMA_CHANNELS; ch++) { if ((dma_irqn_get_channel_status(irq_index, ch))) { return ch; } } // This should never happen return -1; } // note: first DMA channel is mapped to the top half of buffer, second is mapped to the bottom half STATIC uint8_t *dma_get_buffer(machine_i2s_obj_t *i2s_obj, uint channel) { for (uint8_t ch = 0; ch < I2S_NUM_DMA_CHANNELS; ch++) { if (i2s_obj->dma_channel[ch] == channel) { return i2s_obj->dma_buffer + (SIZEOF_HALF_DMA_BUFFER_IN_BYTES * ch); } } // This should never happen return NULL; } STATIC void dma_configure(machine_i2s_obj_t *self) { uint8_t num_free_dma_channels = 0; for (uint8_t ch = 0; ch < NUM_DMA_CHANNELS; ch++) { if (!dma_channel_is_claimed(ch)) { num_free_dma_channels++; } } if (num_free_dma_channels < I2S_NUM_DMA_CHANNELS) { mp_raise_msg(&mp_type_OSError, MP_ERROR_TEXT("cannot claim 2 DMA channels")); } for (uint8_t ch = 0; ch < I2S_NUM_DMA_CHANNELS; ch++) { self->dma_channel[ch] = dma_claim_unused_channel(false); } // The DMA channels are chained together. The first DMA channel is used to access // the top half of the DMA buffer. The second DMA channel accesses the bottom half of the DMA buffer. // With chaining, when one DMA channel has completed a data transfer, the other // DMA channel automatically starts a new data transfer. enum dma_channel_transfer_size dma_size = (dma_get_bits(self->mode, self->bits) == 16) ? DMA_SIZE_16 : DMA_SIZE_32; for (uint8_t ch = 0; ch < I2S_NUM_DMA_CHANNELS; ch++) { dma_channel_config dma_config = dma_channel_get_default_config(self->dma_channel[ch]); channel_config_set_transfer_data_size(&dma_config, dma_size); channel_config_set_chain_to(&dma_config, self->dma_channel[(ch + 1) % I2S_NUM_DMA_CHANNELS]); uint8_t *dma_buffer = self->dma_buffer + (SIZEOF_HALF_DMA_BUFFER_IN_BYTES * ch); if (self->mode == TX) { channel_config_set_dreq(&dma_config, pio_get_dreq(self->pio, self->sm, true)); channel_config_set_read_increment(&dma_config, true); channel_config_set_write_increment(&dma_config, false); dma_channel_configure(self->dma_channel[ch], &dma_config, (void *)&self->pio->txf[self->sm], // dest = PIO TX FIFO dma_buffer, // src = DMA buffer SIZEOF_HALF_DMA_BUFFER_IN_BYTES / (dma_get_bits(self->mode, self->bits) / 8), false); } else { // RX channel_config_set_dreq(&dma_config, pio_get_dreq(self->pio, self->sm, false)); channel_config_set_read_increment(&dma_config, false); channel_config_set_write_increment(&dma_config, true); dma_channel_configure(self->dma_channel[ch], &dma_config, dma_buffer, // dest = DMA buffer (void *)&self->pio->rxf[self->sm], // src = PIO RX FIFO SIZEOF_HALF_DMA_BUFFER_IN_BYTES / (dma_get_bits(self->mode, self->bits) / 8), false); } } for (uint8_t ch = 0; ch < I2S_NUM_DMA_CHANNELS; ch++) { dma_irqn_acknowledge_channel(self->i2s_id, self->dma_channel[ch]); // clear pending. e.g. from SPI dma_irqn_set_channel_enabled(self->i2s_id, self->dma_channel[ch], true); } } STATIC void dma_deinit(machine_i2s_obj_t *self) { for (uint8_t ch = 0; ch < I2S_NUM_DMA_CHANNELS; ch++) { int channel = self->dma_channel[ch]; // unchain the channel to prevent triggering a transfer in the chained-to channel dma_channel_config dma_config = dma_get_channel_config(channel); channel_config_set_chain_to(&dma_config, channel); dma_channel_set_config(channel, &dma_config, false); dma_irqn_set_channel_enabled(self->i2s_id, channel, false); dma_channel_abort(channel); // in case a transfer is in flight dma_channel_unclaim(channel); } } STATIC void dma_irq_handler(uint8_t irq_index) { int dma_channel = dma_map_irq_to_channel(irq_index); if (dma_channel == -1) { // This should never happen return; } machine_i2s_obj_t *self = MP_STATE_PORT(machine_i2s_obj[irq_index]); if (self == NULL) { // This should never happen return; } uint8_t *dma_buffer = dma_get_buffer(self, dma_channel); if (dma_buffer == NULL) { // This should never happen return; } if (self->mode == TX) { // for non-blocking operation handle the write() method requests. if ((self->io_mode == NON_BLOCKING) && (self->non_blocking_descriptor.copy_in_progress)) { copy_appbuf_to_ringbuf_non_blocking(self); } feed_dma(self, dma_buffer); dma_irqn_acknowledge_channel(irq_index, dma_channel); dma_channel_set_read_addr(dma_channel, dma_buffer, false); } else { // RX empty_dma(self, dma_buffer); dma_irqn_acknowledge_channel(irq_index, dma_channel); dma_channel_set_write_addr(dma_channel, dma_buffer, false); // for non-blocking operation handle the readinto() method requests. if ((self->io_mode == NON_BLOCKING) && (self->non_blocking_descriptor.copy_in_progress)) { fill_appbuf_from_ringbuf_non_blocking(self); } } } STATIC void dma_irq0_handler(void) { dma_irq_handler(0); } STATIC void dma_irq1_handler(void) { dma_irq_handler(1); } STATIC void machine_i2s_init_helper(machine_i2s_obj_t *self, size_t n_pos_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { enum { ARG_sck, ARG_ws, ARG_sd, ARG_mode, ARG_bits, ARG_format, ARG_rate, ARG_ibuf, }; static const mp_arg_t allowed_args[] = { { MP_QSTR_sck, MP_ARG_KW_ONLY | MP_ARG_REQUIRED | MP_ARG_OBJ, {.u_obj = MP_OBJ_NULL} }, { MP_QSTR_ws, MP_ARG_KW_ONLY | MP_ARG_REQUIRED | MP_ARG_OBJ, {.u_obj = MP_OBJ_NULL} }, { MP_QSTR_sd, MP_ARG_KW_ONLY | MP_ARG_REQUIRED | MP_ARG_OBJ, {.u_obj = MP_OBJ_NULL} }, { MP_QSTR_mode, MP_ARG_KW_ONLY | MP_ARG_REQUIRED | MP_ARG_INT, {.u_int = -1} }, { MP_QSTR_bits, MP_ARG_KW_ONLY | MP_ARG_REQUIRED | MP_ARG_INT, {.u_int = -1} }, { MP_QSTR_format, MP_ARG_KW_ONLY | MP_ARG_REQUIRED | MP_ARG_INT, {.u_int = -1} }, { MP_QSTR_rate, MP_ARG_KW_ONLY | MP_ARG_REQUIRED | MP_ARG_INT, {.u_int = -1} }, { MP_QSTR_ibuf, MP_ARG_KW_ONLY | MP_ARG_REQUIRED | MP_ARG_INT, {.u_int = -1} }, }; mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; mp_arg_parse_all(n_pos_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); // // ---- Check validity of arguments ---- // // are Pins valid? mp_hal_pin_obj_t sck = args[ARG_sck].u_obj == MP_OBJ_NULL ? -1 : mp_hal_get_pin_obj(args[ARG_sck].u_obj); mp_hal_pin_obj_t ws = args[ARG_ws].u_obj == MP_OBJ_NULL ? -1 : mp_hal_get_pin_obj(args[ARG_ws].u_obj); mp_hal_pin_obj_t sd = args[ARG_sd].u_obj == MP_OBJ_NULL ? -1 : mp_hal_get_pin_obj(args[ARG_sd].u_obj); // does WS pin follow SCK pin? // note: SCK and WS are implemented as PIO sideset pins. Sideset pins must be sequential. if (ws != (sck + 1)) { mp_raise_ValueError(MP_ERROR_TEXT("invalid ws (must be sck+1)")); } // is Mode valid? i2s_mode_t i2s_mode = args[ARG_mode].u_int; if ((i2s_mode != RX) && (i2s_mode != TX)) { mp_raise_ValueError(MP_ERROR_TEXT("invalid mode")); } // is Bits valid? int8_t i2s_bits = args[ARG_bits].u_int; if ((i2s_bits != 16) && (i2s_bits != 32)) { mp_raise_ValueError(MP_ERROR_TEXT("invalid bits")); } // is Format valid? format_t i2s_format = args[ARG_format].u_int; if ((i2s_format != MONO) && (i2s_format != STEREO)) { mp_raise_ValueError(MP_ERROR_TEXT("invalid format")); } // is Rate valid? // Not checked // is Ibuf valid? int32_t ring_buffer_len = args[ARG_ibuf].u_int; if (ring_buffer_len > 0) { self->ring_buffer_storage = m_new(uint8_t, ring_buffer_len); ; ringbuf_init(&self->ring_buffer, self->ring_buffer_storage, ring_buffer_len); } else { mp_raise_ValueError(MP_ERROR_TEXT("invalid ibuf")); } self->sck = sck; self->ws = ws; self->sd = sd; self->mode = i2s_mode; self->bits = i2s_bits; self->format = i2s_format; self->rate = args[ARG_rate].u_int; self->ibuf = ring_buffer_len; self->callback_for_non_blocking = MP_OBJ_NULL; self->non_blocking_descriptor.copy_in_progress = false; self->io_mode = BLOCKING; irq_configure(self); pio_configure(self); gpio_configure(self); dma_configure(self); pio_sm_set_enabled(self->pio, self->sm, true); dma_channel_start(self->dma_channel[0]); } STATIC void machine_i2s_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) { machine_i2s_obj_t *self = MP_OBJ_TO_PTR(self_in); mp_printf(print, "I2S(id=%u,\n" "sck="MP_HAL_PIN_FMT ",\n" "ws="MP_HAL_PIN_FMT ",\n" "sd="MP_HAL_PIN_FMT ",\n" "mode=%u,\n" "bits=%u, format=%u,\n" "rate=%d, ibuf=%d)", self->i2s_id, mp_hal_pin_name(self->sck), mp_hal_pin_name(self->ws), mp_hal_pin_name(self->sd), self->mode, self->bits, self->format, self->rate, self->ibuf ); } STATIC mp_obj_t machine_i2s_make_new(const mp_obj_type_t *type, size_t n_pos_args, size_t n_kw_args, const mp_obj_t *args) { mp_arg_check_num(n_pos_args, n_kw_args, 1, MP_OBJ_FUN_ARGS_MAX, true); uint8_t i2s_id = mp_obj_get_int(args[0]); if (i2s_id >= MAX_I2S_RP2) { mp_raise_ValueError(MP_ERROR_TEXT("invalid id")); } machine_i2s_obj_t *self; if (MP_STATE_PORT(machine_i2s_obj[i2s_id]) == NULL) { self = mp_obj_malloc(machine_i2s_obj_t, &machine_i2s_type); MP_STATE_PORT(machine_i2s_obj[i2s_id]) = self; self->i2s_id = i2s_id; } else { self = MP_STATE_PORT(machine_i2s_obj[i2s_id]); machine_i2s_deinit(MP_OBJ_FROM_PTR(self)); } mp_map_t kw_args; mp_map_init_fixed_table(&kw_args, n_kw_args, args + n_pos_args); machine_i2s_init_helper(self, n_pos_args - 1, args + 1, &kw_args); return MP_OBJ_FROM_PTR(self); } STATIC mp_obj_t machine_i2s_init(size_t n_pos_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { machine_i2s_obj_t *self = MP_OBJ_TO_PTR(pos_args[0]); machine_i2s_deinit(MP_OBJ_FROM_PTR(self)); machine_i2s_init_helper(self, n_pos_args - 1, pos_args + 1, kw_args); return mp_const_none; } STATIC MP_DEFINE_CONST_FUN_OBJ_KW(machine_i2s_init_obj, 1, machine_i2s_init); STATIC mp_obj_t machine_i2s_deinit(mp_obj_t self_in) { machine_i2s_obj_t *self = MP_OBJ_TO_PTR(self_in); // use self->pio as in indication that I2S object has already been de-initialized if (self->pio != NULL) { pio_deinit(self); dma_deinit(self); irq_deinit(self); m_free(self->ring_buffer_storage); self->pio = NULL; // flag object as de-initialized } return mp_const_none; } STATIC MP_DEFINE_CONST_FUN_OBJ_1(machine_i2s_deinit_obj, machine_i2s_deinit); STATIC mp_obj_t machine_i2s_irq(mp_obj_t self_in, mp_obj_t handler) { machine_i2s_obj_t *self = MP_OBJ_TO_PTR(self_in); if (handler != mp_const_none && !mp_obj_is_callable(handler)) { mp_raise_ValueError(MP_ERROR_TEXT("invalid callback")); } if (handler != mp_const_none) { self->io_mode = NON_BLOCKING; } else { self->io_mode = BLOCKING; } self->callback_for_non_blocking = handler; return mp_const_none; } STATIC MP_DEFINE_CONST_FUN_OBJ_2(machine_i2s_irq_obj, machine_i2s_irq); // Shift() is typically used as a volume control. // shift=1 increases volume by 6dB, shift=-1 decreases volume by 6dB STATIC mp_obj_t machine_i2s_shift(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { enum { ARG_buf, ARG_bits, ARG_shift}; static const mp_arg_t allowed_args[] = { { MP_QSTR_buf, MP_ARG_REQUIRED | MP_ARG_KW_ONLY | MP_ARG_OBJ, {.u_obj = MP_OBJ_NULL} }, { MP_QSTR_bits, MP_ARG_REQUIRED | MP_ARG_KW_ONLY | MP_ARG_INT, {.u_int = -1} }, { MP_QSTR_shift, MP_ARG_REQUIRED | MP_ARG_KW_ONLY | MP_ARG_INT, {.u_int = -1} }, }; // parse args mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); mp_buffer_info_t bufinfo; mp_get_buffer_raise(args[ARG_buf].u_obj, &bufinfo, MP_BUFFER_RW); int16_t *buf_16 = bufinfo.buf; int32_t *buf_32 = bufinfo.buf; uint8_t bits = args[ARG_bits].u_int; int8_t shift = args[ARG_shift].u_int; uint32_t num_audio_samples; switch (bits) { case 16: num_audio_samples = bufinfo.len / sizeof(uint16_t); break; case 32: num_audio_samples = bufinfo.len / sizeof(uint32_t); break; default: mp_raise_ValueError(MP_ERROR_TEXT("invalid bits")); break; } for (uint32_t i = 0; i < num_audio_samples; i++) { switch (bits) { case 16: if (shift >= 0) { buf_16[i] = buf_16[i] << shift; } else { buf_16[i] = buf_16[i] >> abs(shift); } break; case 32: if (shift >= 0) { buf_32[i] = buf_32[i] << shift; } else { buf_32[i] = buf_32[i] >> abs(shift); } break; } } return mp_const_none; } STATIC MP_DEFINE_CONST_FUN_OBJ_KW(machine_i2s_shift_fun_obj, 0, machine_i2s_shift); STATIC MP_DEFINE_CONST_STATICMETHOD_OBJ(machine_i2s_shift_obj, MP_ROM_PTR(&machine_i2s_shift_fun_obj)); STATIC const mp_rom_map_elem_t machine_i2s_locals_dict_table[] = { // Methods { MP_ROM_QSTR(MP_QSTR_init), MP_ROM_PTR(&machine_i2s_init_obj) }, { MP_ROM_QSTR(MP_QSTR_readinto), MP_ROM_PTR(&mp_stream_readinto_obj) }, { MP_ROM_QSTR(MP_QSTR_write), MP_ROM_PTR(&mp_stream_write_obj) }, { MP_ROM_QSTR(MP_QSTR_deinit), MP_ROM_PTR(&machine_i2s_deinit_obj) }, { MP_ROM_QSTR(MP_QSTR_irq), MP_ROM_PTR(&machine_i2s_irq_obj) }, // Static method { MP_ROM_QSTR(MP_QSTR_shift), MP_ROM_PTR(&machine_i2s_shift_obj) }, // Constants { MP_ROM_QSTR(MP_QSTR_RX), MP_ROM_INT(RX) }, { MP_ROM_QSTR(MP_QSTR_TX), MP_ROM_INT(TX) }, { MP_ROM_QSTR(MP_QSTR_STEREO), MP_ROM_INT(STEREO) }, { MP_ROM_QSTR(MP_QSTR_MONO), MP_ROM_INT(MONO) }, }; MP_DEFINE_CONST_DICT(machine_i2s_locals_dict, machine_i2s_locals_dict_table); STATIC mp_uint_t machine_i2s_stream_read(mp_obj_t self_in, void *buf_in, mp_uint_t size, int *errcode) { machine_i2s_obj_t *self = MP_OBJ_TO_PTR(self_in); if (self->mode != RX) { *errcode = MP_EPERM; return MP_STREAM_ERROR; } uint8_t appbuf_sample_size_in_bytes = (self->bits / 8) * (self->format == STEREO ? 2: 1); if (size % appbuf_sample_size_in_bytes != 0) { *errcode = MP_EINVAL; return MP_STREAM_ERROR; } if (size == 0) { return 0; } if (self->io_mode == NON_BLOCKING) { self->non_blocking_descriptor.appbuf.buf = (void *)buf_in; self->non_blocking_descriptor.appbuf.len = size; self->non_blocking_descriptor.index = 0; self->non_blocking_descriptor.copy_in_progress = true; return size; } else { // blocking or uasyncio mode mp_buffer_info_t appbuf; appbuf.buf = (void *)buf_in; appbuf.len = size; uint32_t num_bytes_read = fill_appbuf_from_ringbuf(self, &appbuf); return num_bytes_read; } } STATIC mp_uint_t machine_i2s_stream_write(mp_obj_t self_in, const void *buf_in, mp_uint_t size, int *errcode) { machine_i2s_obj_t *self = MP_OBJ_TO_PTR(self_in); if (self->mode != TX) { *errcode = MP_EPERM; return MP_STREAM_ERROR; } if (size == 0) { return 0; } if (self->io_mode == NON_BLOCKING) { self->non_blocking_descriptor.appbuf.buf = (void *)buf_in; self->non_blocking_descriptor.appbuf.len = size; self->non_blocking_descriptor.index = 0; self->non_blocking_descriptor.copy_in_progress = true; return size; } else { // blocking or uasyncio mode mp_buffer_info_t appbuf; appbuf.buf = (void *)buf_in; appbuf.len = size; uint32_t num_bytes_written = copy_appbuf_to_ringbuf(self, &appbuf); return num_bytes_written; } } STATIC mp_uint_t machine_i2s_ioctl(mp_obj_t self_in, mp_uint_t request, uintptr_t arg, int *errcode) { machine_i2s_obj_t *self = MP_OBJ_TO_PTR(self_in); mp_uint_t ret; uintptr_t flags = arg; self->io_mode = UASYNCIO; // a call to ioctl() is an indication that uasyncio is being used if (request == MP_STREAM_POLL) { ret = 0; if (flags & MP_STREAM_POLL_RD) { if (self->mode != RX) { *errcode = MP_EPERM; return MP_STREAM_ERROR; } if (!ringbuf_is_empty(&self->ring_buffer)) { ret |= MP_STREAM_POLL_RD; } } if (flags & MP_STREAM_POLL_WR) { if (self->mode != TX) { *errcode = MP_EPERM; return MP_STREAM_ERROR; } if (!ringbuf_is_full(&self->ring_buffer)) { ret |= MP_STREAM_POLL_WR; } } } else { *errcode = MP_EINVAL; ret = MP_STREAM_ERROR; } return ret; } STATIC const mp_stream_p_t i2s_stream_p = { .read = machine_i2s_stream_read, .write = machine_i2s_stream_write, .ioctl = machine_i2s_ioctl, .is_text = false, }; MP_DEFINE_CONST_OBJ_TYPE( machine_i2s_type, MP_QSTR_I2S, MP_TYPE_FLAG_NONE, machine_i2s_make_new, print, machine_i2s_print, getiter, mp_identity_getiter, iternext, mp_stream_unbuffered_iter, protocol, &i2s_stream_p, locals_dict, (mp_obj_dict_t *)&machine_i2s_locals_dict ); MP_REGISTER_ROOT_POINTER(void *machine_i2s_obj[2]);