diff --git a/drivers/spi/Kconfig.max32 b/drivers/spi/Kconfig.max32 index 0f9e4082862f01..f5f6c8d4e8e499 100644 --- a/drivers/spi/Kconfig.max32 +++ b/drivers/spi/Kconfig.max32 @@ -23,4 +23,22 @@ config SPI_MAX32_DMA help Enable DMA support for MAX32 MCU SPI driver. +if SPI_RTIO +config SPI_MAX32_RTIO_SQ_SIZE + int "Number of available submission queue entries" + default 8 # Sensible default that covers most common spi transactions + help + When RTIO is use with SPI each driver holds a context with which blocking + API calls use to perform SPI transactions. This queue needs to be as deep + as the longest set of spi_buf_sets used, where normal SPI operations are + used (equal length buffers). It may need to be slightly deeper where the + spi buffer sets for transmit/receive are not always matched equally in + length as these are transformed into normal transceives. + +config SPI_MAX32_RTIO_CQ_SIZE + int "Number of available submission queue entries" + default 8 # Sensible default that covers most common spi transactions + +endif # SPI_RTIO + endif # SPI_MAX32 diff --git a/drivers/spi/spi_max32.c b/drivers/spi/spi_max32.c index d0238c260ff7a8..28c5c1a18cb75d 100644 --- a/drivers/spi/spi_max32.c +++ b/drivers/spi/spi_max32.c @@ -17,6 +17,10 @@ #include #include #include +#include +#include +#include +#include #include @@ -51,12 +55,19 @@ struct max32_spi_data { const struct device *dev; mxc_spi_req_t req; uint8_t dummy[2]; + struct k_spinlock lock; + #ifdef CONFIG_SPI_MAX32_DMA volatile uint8_t dma_stat; #endif /* CONFIG_SPI_MAX32_DMA */ + #ifdef CONFIG_SPI_ASYNC struct k_work async_work; #endif /* CONFIG_SPI_ASYNC */ + +#ifdef CONFIG_SPI_RTIO + struct spi_rtio *rtio_ctx; +#endif }; #ifdef CONFIG_SPI_MAX32_DMA @@ -70,6 +81,20 @@ struct max32_spi_data { static void spi_max32_callback(mxc_spi_req_t *req, int error); #endif /* CONFIG_SPI_MAX32_INTERRUPT */ +static inline k_spinlock_key_t spi_spin_lock(const struct device *dev) +{ + struct max32_spi_data *data = dev->data; + + return k_spin_lock(&data->lock); +} + +static inline void spi_spin_unlock(const struct device *dev, k_spinlock_key_t key) +{ + struct max32_spi_data *data = dev->data; + + k_spin_unlock(&data->lock, key); +} + static int spi_configure(const struct device *dev, const struct spi_config *config) { int ret = 0; @@ -234,6 +259,11 @@ static int spi_max32_transceive(const struct device *dev) const struct max32_spi_config *cfg = dev->config; struct max32_spi_data *data = dev->data; struct spi_context *ctx = &data->ctx; + k_spinlock_key_t key; +#ifdef CONFIG_SPI_RTIO + struct spi_rtio *rtio_ctx = data->rtio_ctx; + struct rtio_sqe *sqe = &rtio_ctx->txn_curr->sqe; +#endif uint32_t len; uint8_t dfs_shift; @@ -242,19 +272,45 @@ static int spi_max32_transceive(const struct device *dev) dfs_shift = spi_max32_get_dfs_shift(ctx); len = spi_context_max_continuous_chunk(ctx); +#ifdef CONFIG_SPI_RTIO + if (sqe->op == RTIO_OP_TINY_TX) { + len = sqe->tiny_tx.buf_len; + data->req.txData = (uint8_t *)sqe->tiny_tx.buf; + data->req.rxData = 0; + } else { + len = sqe->txrx.buf_len; + data->req.txData = (uint8_t *)sqe->txrx.tx_buf; + data->req.rxData = sqe->txrx.rx_buf; + } + data->req.txLen = len >> dfs_shift; + data->req.rxLen = len >> dfs_shift; +#else data->req.txLen = len >> dfs_shift; data->req.txData = (uint8_t *)ctx->tx_buf; data->req.rxLen = len >> dfs_shift; data->req.rxData = ctx->rx_buf; data->req.rxData = ctx->rx_buf; +#endif + data->req.rxLen = len >> dfs_shift; if (!data->req.rxData) { /* Pass a dummy buffer to HAL if receive buffer is NULL, otherwise * corrupt data is read during subsequent transactions. */ +#ifndef CONFIG_SPI_RTIO data->req.rxData = data->dummy; data->req.rxLen = 0; +#else + if (sqe->op == RTIO_OP_RX) { + data->req.rxData = sqe->rx.buf; + data->req.rxLen = sqe->rx.buf_len; + data->req.txData = NULL; + } else { + data->req.rxData = NULL; + data->req.rxLen = 0; + } +#endif } data->req.spi = cfg->regs; data->req.ssIdx = ctx->config->slave; @@ -279,6 +335,7 @@ static int spi_max32_transceive(const struct device *dev) MXC_SPI_StartTransmission(cfg->regs); #else + key = spi_spin_lock(dev); ret = spi_max32_transceive_sync(cfg->regs, data, dfs_shift); if (ret) { ret = -EIO; @@ -287,7 +344,7 @@ static int spi_max32_transceive(const struct device *dev) spi_context_update_rx(ctx, 1, len); } #endif - + spi_spin_unlock(dev, key); return ret; } @@ -296,10 +353,12 @@ static int transceive(const struct device *dev, const struct spi_config *config, bool async, spi_callback_t cb, void *userdata) { int ret = 0; - const struct max32_spi_config *cfg = dev->config; struct max32_spi_data *data = dev->data; struct spi_context *ctx = &data->ctx; +#ifndef CONFIG_SPI_RTIO + const struct max32_spi_config *cfg = dev->config; bool hw_cs_ctrl = true; +#endif #ifndef CONFIG_SPI_MAX32_INTERRUPT if (async) { @@ -309,6 +368,12 @@ static int transceive(const struct device *dev, const struct spi_config *config, spi_context_lock(ctx, async, cb, userdata, config); +#if CONFIG_SPI_RTIO + struct spi_rtio *rtio_ctx = data->rtio_ctx; + + ret = spi_rtio_transceive(rtio_ctx, config, tx_bufs, rx_bufs); +#else + ret = spi_configure(dev, config); if (ret != 0) { spi_context_release(ctx, ret); @@ -363,9 +428,8 @@ static int transceive(const struct device *dev, const struct spi_config *config, cfg->regs->ctrl0 |= MXC_F_SPI_CTRL0_EN; } } - +#endif spi_context_release(ctx, ret); - return ret; } @@ -565,6 +629,99 @@ static int transceive_dma(const struct device *dev, const struct spi_config *con } #endif /* CONFIG_SPI_MAX32_DMA */ +#ifdef CONFIG_SPI_RTIO +static void spi_max32_iodev_complete(const struct device *dev, int status); + +static void spi_max32_iodev_start(const struct device *dev) +{ + struct max32_spi_data *data = dev->data; + struct spi_rtio *rtio_ctx = data->rtio_ctx; + struct rtio_sqe *sqe = &rtio_ctx->txn_curr->sqe; + int ret = 0; + + switch (sqe->op) { + case RTIO_OP_RX: + case RTIO_OP_TX: + case RTIO_OP_TINY_TX: + case RTIO_OP_TXRX: + ret = spi_max32_transceive(dev); + break; + default: + spi_max32_iodev_complete(dev, -EINVAL); + break; + } + if (ret == 0) { + spi_max32_iodev_complete(dev, 0); + } +} + +static inline void spi_max32_iodev_prepare_start(const struct device *dev) +{ + struct max32_spi_data *data = dev->data; + struct spi_rtio *rtio_ctx = data->rtio_ctx; + struct spi_dt_spec *spi_dt_spec = rtio_ctx->txn_curr->sqe.iodev->data; + struct spi_config *spi_config = &spi_dt_spec->config; + struct max32_spi_config *cfg = (struct max32_spi_config *)dev->config; + int ret; + bool hw_cs_ctrl = true; + + ret = spi_configure(dev, spi_config); + __ASSERT(!ret, "%d", ret); + + /* Check if CS GPIO exists */ + if (spi_cs_is_gpio(spi_config)) { + hw_cs_ctrl = false; + } + MXC_SPI_HWSSControl(cfg->regs, hw_cs_ctrl); + + /* Assert the CS line if HW control disabled */ + if (!hw_cs_ctrl) { + spi_context_cs_control(&data->ctx, true); + } else { + cfg->regs->ctrl0 = (cfg->regs->ctrl0 & ~MXC_F_SPI_CTRL0_START) | + MXC_F_SPI_CTRL0_SS_CTRL; + }; +} + +static void spi_max32_iodev_complete(const struct device *dev, int status) +{ + struct max32_spi_data *data = dev->data; + struct spi_rtio *rtio_ctx = data->rtio_ctx; + + if (!status && rtio_ctx->txn_curr->sqe.flags & RTIO_SQE_TRANSACTION) { + rtio_ctx->txn_curr = rtio_txn_next(rtio_ctx->txn_curr); + spi_max32_iodev_start(dev); + } else { + struct max32_spi_config *cfg = (struct max32_spi_config *)dev->config; + bool hw_cs_ctrl = true; + + if (!hw_cs_ctrl) { + spi_context_cs_control(&data->ctx, false); + } else { + cfg->regs->ctrl0 &= ~(MXC_F_SPI_CTRL0_START | MXC_F_SPI_CTRL0_SS_CTRL | + MXC_F_SPI_CTRL0_EN); + cfg->regs->ctrl0 |= MXC_F_SPI_CTRL0_EN; + } + + if (spi_rtio_complete(rtio_ctx, status)) { + spi_max32_iodev_prepare_start(dev); + spi_max32_iodev_start(dev); + } + } +} + +static void api_iodev_submit(const struct device *dev, struct rtio_iodev_sqe *iodev_sqe) +{ + struct max32_spi_data *data = dev->data; + struct spi_rtio *rtio_ctx = data->rtio_ctx; + + if (spi_rtio_submit(rtio_ctx, iodev_sqe)) { + spi_max32_iodev_prepare_start(dev); + spi_max32_iodev_start(dev); + } +} +#endif + static int api_transceive(const struct device *dev, const struct spi_config *config, const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { @@ -685,12 +842,12 @@ static int api_release(const struct device *dev, const struct spi_config *config { struct max32_spi_data *data = dev->data; +#ifndef CONFIG_SPI_RTIO if (!spi_context_configured(&data->ctx, config)) { return -EINVAL; } - +#endif spi_context_unlock_unconditionally(&data->ctx); - return 0; } @@ -724,6 +881,10 @@ static int spi_max32_init(const struct device *dev) data->dev = dev; +#ifdef CONFIG_SPI_RTIO + spi_rtio_init(data->rtio_ctx, dev); +#endif + #ifdef CONFIG_SPI_MAX32_INTERRUPT cfg->irq_config_func(dev); #ifdef CONFIG_SPI_ASYNC @@ -743,8 +904,8 @@ static const struct spi_driver_api spi_max32_api = { .transceive_async = api_transceive_async, #endif /* CONFIG_SPI_ASYNC */ #ifdef CONFIG_SPI_RTIO - .iodev_submit = spi_rtio_iodev_default_submit, -#endif + .iodev_submit = api_iodev_submit, +#endif /* CONFIG_SPI_RTIO */ .release = api_release, }; @@ -784,22 +945,28 @@ static const struct spi_driver_api spi_max32_api = { #define MAX32_SPI_DMA_INIT(n) #endif -#define DEFINE_SPI_MAX32(_num) \ - PINCTRL_DT_INST_DEFINE(_num); \ - SPI_MAX32_IRQ_CONFIG_FUNC(_num) \ - static const struct max32_spi_config max32_spi_config_##_num = { \ +#define DEFINE_SPI_MAX32_RTIO(_num) SPI_RTIO_DEFINE(max32_spi_rtio_##_num, \ + CONFIG_SPI_MAX32_RTIO_SQ_SIZE, \ + CONFIG_SPI_MAX32_RTIO_CQ_SIZE) + +#define DEFINE_SPI_MAX32(_num) \ + PINCTRL_DT_INST_DEFINE(_num); \ + SPI_MAX32_IRQ_CONFIG_FUNC(_num) \ + COND_CODE_1(CONFIG_SPI_RTIO, (DEFINE_SPI_MAX32_RTIO(_num)), ()); \ + static const struct max32_spi_config max32_spi_config_##_num = { \ .regs = (mxc_spi_regs_t *)DT_INST_REG_ADDR(_num), \ .pctrl = PINCTRL_DT_INST_DEV_CONFIG_GET(_num), \ .clock = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(_num)), \ .perclk.bus = DT_INST_CLOCKS_CELL(_num, offset), \ .perclk.bit = DT_INST_CLOCKS_CELL(_num, bit), \ MAX32_SPI_DMA_INIT(_num) SPI_MAX32_CONFIG_IRQ_FUNC(_num)}; \ - static struct max32_spi_data max32_spi_data_##_num = { \ + static struct max32_spi_data max32_spi_data_##_num = { \ SPI_CONTEXT_INIT_LOCK(max32_spi_data_##_num, ctx), \ SPI_CONTEXT_INIT_SYNC(max32_spi_data_##_num, ctx), \ - SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(_num), ctx)}; \ - DEVICE_DT_INST_DEFINE(_num, spi_max32_init, NULL, &max32_spi_data_##_num, \ - &max32_spi_config_##_num, PRE_KERNEL_2, CONFIG_SPI_INIT_PRIORITY, \ + SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(_num), ctx) \ + IF_ENABLED(CONFIG_SPI_RTIO, (.rtio_ctx = &max32_spi_rtio_##_num))}; \ + DEVICE_DT_INST_DEFINE(_num, spi_max32_init, NULL, &max32_spi_data_##_num, \ + &max32_spi_config_##_num, PRE_KERNEL_2, CONFIG_SPI_INIT_PRIORITY, \ &spi_max32_api); DT_INST_FOREACH_STATUS_OKAY(DEFINE_SPI_MAX32)