From 9db0956873b0e214d79221679019d37f333b71aa Mon Sep 17 00:00:00 2001 From: Declan Snyder Date: Mon, 17 Jun 2024 16:34:52 -0500 Subject: [PATCH 1/2] drivers: spi_mcux_lpspi: Clean up code While working on fixing a bug, I found this driver to be a mess and difficult to read, so clean it up. Most of the changes have no functional effect, some however are going to cause minor optimizations in the image. Trivial changes: - Organize #includes and #defines and put prototypes at the top. - Remove some very redundant comments. - Remove code that was commented out. - Reword a few comments. - Add comments to #endif showing the condition ending. - Remove as many preprocessor directives as possible where they are not necessary. - Reorder some functions to be located near other relevant functions (eg, put init and isr near the device init macro). - Add some delimiting comments for readability. - Remove many very unnecessary newlines. - Align slashes at the end of macros. - Align debugging statements. Minor Changes: - Consolidate some duplicated code in the dma paths into helper functions (spi_mcux_dma_equal_block_length and spi_mcux_dma_common_load). This should make the image smaller but have no functional difference besides a struct potentially getting it's members filled in a different order. - Reduce redundancy and ifdef chaos in the transceive wrappers by making another wrapper function. - Some source code changed to reduce indentation levels and nested conditional controls which were hard to follow, but functional logic is kept the same. - Remove absolutely redundant and pointless wrapper of k_spin_lock. - Remove parent_dev from config struct and utilize irq_config_func in all cases, with a different definition for lp_flexcomm / legacy. Fixes: - Fix some code added in 17032a093d31da46a106a2abff97af6177367b8d that appears to have a bug where transceive_dma is called even if there is no dma device from spi_mcux_transceive_async. - Fix a code path in transceive_dma that appears to have a control flow bug (probably due to the super hard to follow nesting and #ifdefs), where under a certain scenario if an error happened, some code that seems to depend on the earlier code would execute, but the earlier code didn't execute because of the error, seems like what was meant to happen was to wrap up and exit the function in case of error like the other code path. - Fix the slave select input check allowing slave 4, because of using > instead of >= in the check. Signed-off-by: Declan Snyder --- drivers/spi/spi_mcux_lpspi.c | 576 +++++++++++++++++------------------ 1 file changed, 275 insertions(+), 301 deletions(-) diff --git a/drivers/spi/spi_mcux_lpspi.c b/drivers/spi/spi_mcux_lpspi.c index 4cbf21d77eb9b7..3a44e46867b09e 100644 --- a/drivers/spi/spi_mcux_lpspi.c +++ b/drivers/spi/spi_mcux_lpspi.c @@ -9,49 +9,41 @@ #include #include #include -#include +#include +#include + +#include +LOG_MODULE_REGISTER(spi_mcux_lpspi, CONFIG_SPI_LOG_LEVEL); + #if CONFIG_NXP_LP_FLEXCOMM #include #endif -#include -#include + #ifdef CONFIG_SPI_MCUX_LPSPI_DMA #include #endif -#include + #ifdef CONFIG_SPI_RTIO #include #include #endif -LOG_MODULE_REGISTER(spi_mcux_lpspi, CONFIG_SPI_LOG_LEVEL); +#include #include "spi_context.h" +/* These two defines should be made into DT properties if they ever are different */ #define CHIP_SELECT_COUNT 4 #define MAX_DATA_WIDTH 4096 - -/* Required by DEVICE_MMIO_NAMED_* macros */ +/* Defines required by DEVICE_MMIO_NAMED_* macros */ #define DEV_CFG(_dev) \ ((const struct spi_mcux_config *)(_dev)->config) #define DEV_DATA(_dev) ((struct spi_mcux_data *)(_dev)->data) -struct spi_mcux_config { - DEVICE_MMIO_NAMED_ROM(reg_base); -#ifdef CONFIG_NXP_LP_FLEXCOMM - const struct device *parent_dev; -#endif - const struct device *clock_dev; - clock_control_subsys_t clock_subsys; - void (*irq_config_func)(const struct device *dev); - uint32_t pcs_sck_delay; - uint32_t sck_pcs_delay; - uint32_t transfer_delay; - const struct pinctrl_dev_config *pincfg; - lpspi_pin_config_t data_pin_config; -}; -#ifdef CONFIG_SPI_MCUX_LPSPI_DMA +/* ******** DMA Definitions ******* */ +#if defined(CONFIG_SPI_MCUX_LPSPI_DMA) + #define SPI_MCUX_LPSPI_DMA_ERROR_FLAG 0x01 #define SPI_MCUX_LPSPI_DMA_RX_DONE_FLAG 0x02 #define SPI_MCUX_LPSPI_DMA_TX_DONE_FLAG 0x04 @@ -64,7 +56,32 @@ struct stream { struct dma_config dma_cfg; struct dma_block_config dma_blk_cfg; }; -#endif + +static int spi_mcux_dma_rxtx_load(const struct device *dev, size_t *dma_size); +#endif /* CONFIG_SPI_MCUX_LPSPI_DMA */ +/* ******************************** */ + + +/* ******* RTIO Definitions ******* */ +#if defined(CONFIG_SPI_RTIO) +static void spi_mcux_iodev_complete(const struct device *dev, int status); +static void spi_mcux_iodev_next(const struct device *dev, bool completion); +static void spi_mcux_iodev_start(const struct device *dev); +#endif /* CONFIG_SPI_RTIO */ +/* ******************************** */ + + +struct spi_mcux_config { + DEVICE_MMIO_NAMED_ROM(reg_base); + const struct device *clock_dev; + clock_control_subsys_t clock_subsys; + void (*irq_config_func)(const struct device *dev); + uint32_t pcs_sck_delay; + uint32_t sck_pcs_delay; + uint32_t transfer_delay; + const struct pinctrl_dev_config *pincfg; + lpspi_pin_config_t data_pin_config; +}; struct spi_mcux_data { DEVICE_MMIO_NAMED_RAM(reg_base); @@ -94,9 +111,9 @@ struct spi_mcux_data { #endif }; + static int spi_mcux_transfer_next_packet(const struct device *dev) { - /* const struct spi_mcux_config *config = dev->config; */ struct spi_mcux_data *data = dev->data; LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base); struct spi_context *ctx = &data->ctx; @@ -158,23 +175,6 @@ static int spi_mcux_transfer_next_packet(const struct device *dev) return 0; } -static void spi_mcux_isr(const struct device *dev) -{ - /* const struct spi_mcux_config *config = dev->config; */ - struct spi_mcux_data *data = dev->data; - LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base); - -#if CONFIG_NXP_LP_FLEXCOMM - LPSPI_MasterTransferHandleIRQ(LPSPI_GetInstance(base), &data->handle); -#else - LPSPI_MasterTransferHandleIRQ(base, &data->handle); -#endif -} - -#ifdef CONFIG_SPI_RTIO -static void spi_mcux_iodev_complete(const struct device *dev, int status); -#endif - static void spi_mcux_master_transfer_callback(LPSPI_Type *base, lpspi_master_handle_t *handle, status_t status, void *userData) { @@ -214,10 +214,9 @@ static int spi_mcux_configure(const struct device *dev, LPSPI_MasterGetDefaultConfig(&master_config); - if (spi_cfg->slave > CHIP_SELECT_COUNT) { + if (spi_cfg->slave >= CHIP_SELECT_COUNT) { LOG_ERR("Slave %d is greater than %d", - spi_cfg->slave, - CHIP_SELECT_COUNT); + spi_cfg->slave, CHIP_SELECT_COUNT); return -EINVAL; } @@ -269,12 +268,12 @@ static int spi_mcux_configure(const struct device *dev, * CR register cannot be written. */ LPSPI_Enable(base, false); - while ((base->CR & LPSPI_CR_MEN_MASK) != 0U) { - /* Wait until LPSPI is disabled. Datasheet: - * After writing 0, MEN (Module Enable) remains set until the LPSPI has - * completed the current transfer and is idle. - */ - } + /* Wait until LPSPI is disabled. Datasheet: + * After writing 0, MEN (Module Enable) remains set until the LPSPI has + * completed the current transfer and is idle. + */ + while ((base->CR & LPSPI_CR_MEN_MASK) != 0U) + ; } LPSPI_MasterInit(base, &master_config, clock_freq); @@ -290,74 +289,89 @@ static int spi_mcux_configure(const struct device *dev, return 0; } + #ifdef CONFIG_SPI_MCUX_LPSPI_DMA -static int spi_mcux_dma_rxtx_load(const struct device *dev, - size_t *dma_size); +/* helper function */ +static size_t spi_mcux_dma_equal_block_length(struct spi_context *ctx) +{ + if (ctx->tx_len == 0 || ctx->rx_len == 0) { + return MAX(ctx->tx_len, ctx->rx_len); + } else { + return MIN(ctx->tx_len, ctx->rx_len); + } +} /* This function is executed in the interrupt context */ static void spi_mcux_dma_callback(const struct device *dev, void *arg, uint32_t channel, int status) { - /* arg directly holds the spi device */ const struct device *spi_dev = arg; struct spi_mcux_data *data = (struct spi_mcux_data *)spi_dev->data; if (status < 0) { LOG_ERR("DMA callback error with channel %d.", channel); data->status_flags |= SPI_MCUX_LPSPI_DMA_ERROR_FLAG; + goto out; + } + + /* identify the origin of this callback */ + if (channel == data->dma_tx.channel) { + LOG_DBG("DMA TX Block Complete"); + data->status_flags |= SPI_MCUX_LPSPI_DMA_TX_DONE_FLAG; + } else if (channel == data->dma_rx.channel) { + LOG_DBG("DMA RX Block Complete"); + data->status_flags |= SPI_MCUX_LPSPI_DMA_RX_DONE_FLAG; } else { - /* identify the origin of this callback */ - if (channel == data->dma_tx.channel) { - /* this part of the transfer ends */ - data->status_flags |= SPI_MCUX_LPSPI_DMA_TX_DONE_FLAG; - LOG_DBG("DMA TX Block Complete"); - } else if (channel == data->dma_rx.channel) { - /* this part of the transfer ends */ - data->status_flags |= SPI_MCUX_LPSPI_DMA_RX_DONE_FLAG; - LOG_DBG("DMA RX Block Complete"); - } else { - LOG_ERR("DMA callback channel %d is not valid.", - channel); - data->status_flags |= SPI_MCUX_LPSPI_DMA_ERROR_FLAG; - } + LOG_ERR("DMA callback channel %d is not valid.", channel); + data->status_flags |= SPI_MCUX_LPSPI_DMA_ERROR_FLAG; } -#if CONFIG_SPI_ASYNC - if (data->ctx.asynchronous && - ((data->status_flags & SPI_MCUX_LPSPI_DMA_DONE_FLAG) == - SPI_MCUX_LPSPI_DMA_DONE_FLAG)) { - /* Load dma blocks of equal length */ - size_t dma_size = MIN(data->ctx.tx_len, data->ctx.rx_len); - if (dma_size == 0) { - dma_size = MAX(data->ctx.tx_len, data->ctx.rx_len); - } +#ifdef CONFIG_SPI_ASYNC + bool dma_done = data->status_flags & SPI_MCUX_LPSPI_DMA_DONE_FLAG; + size_t dma_size; + + if (data->ctx.asynchronous && dma_done) { + dma_size = spi_mcux_dma_equal_block_length(&data->ctx); spi_context_update_tx(&data->ctx, 1, dma_size); spi_context_update_rx(&data->ctx, 1, dma_size); if (data->ctx.tx_len == 0 && data->ctx.rx_len == 0) { - spi_context_complete(&data->ctx, spi_dev, 0); + goto out; + } else { + return; } - return; } -#endif +#endif /* CONFIG_SPI_ASYNC */ + +out: spi_context_complete(&data->ctx, spi_dev, 0); } +static void spi_mcux_dma_common_load(const struct device *dev, + struct stream *dma_stream, size_t len) +{ + struct dma_block_config *blk_cfg = &stream->dma_blk_cfg; + + memset(blk_cfg, 0, sizeof(struct dma_block_config)); + + blk_cfg->block_size = len; + + dma_stream->dma_cfg.head_block = blk_cfg; + dma_stream->dma_cfg.user_data = (struct device *)dev; + dma_stream->dma_cfg.source_burst_length = 1; +} + static int spi_mcux_dma_tx_load(const struct device *dev, const uint8_t *buf, size_t len) { - /* const struct spi_mcux_config *cfg = dev->config; */ struct spi_mcux_data *data = dev->data; - struct dma_block_config *blk_cfg; LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base); /* remember active TX DMA channel (used in callback) */ struct stream *stream = &data->dma_tx; + struct dma_block_config *blk_cfg = &stream->dma_blk_cfg; - blk_cfg = &stream->dma_blk_cfg; - - /* prepare the block for this TX DMA channel */ - memset(blk_cfg, 0, sizeof(struct dma_block_config)); + spi_mcux_dma_common_load(dev, stream, len); if (buf == NULL) { /* Treat the transfer as a peripheral to peripheral one, so that DMA @@ -374,13 +388,7 @@ static int spi_mcux_dma_tx_load(const struct device *dev, const uint8_t *buf, si blk_cfg->source_gather_en = 1; /* Dest is LPSPI tx fifo */ blk_cfg->dest_address = LPSPI_GetTxRegisterAddress(base); - blk_cfg->block_size = len; - /* Transfer 1 byte each DMA loop */ - stream->dma_cfg.source_burst_length = 1; - stream->dma_cfg.head_block = &stream->dma_blk_cfg; - /* give the client dev as arg, as the callback comes from the dma */ - stream->dma_cfg.user_data = (struct device *)dev; /* pass our client origin to the dma: data->dma_tx.dma_channel */ return dma_config(data->dma_tx.dma_dev, data->dma_tx.channel, &stream->dma_cfg); @@ -389,18 +397,14 @@ static int spi_mcux_dma_tx_load(const struct device *dev, const uint8_t *buf, si static int spi_mcux_dma_rx_load(const struct device *dev, uint8_t *buf, size_t len) { - /*const struct spi_mcux_config *cfg = dev->config; */ struct spi_mcux_data *data = dev->data; - struct dma_block_config *blk_cfg; LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base); /* retrieve active RX DMA channel (used in callback) */ struct stream *stream = &data->dma_rx; + struct dma_block_config *blk_cfg = &stream->dma_blk_cfg; - blk_cfg = &stream->dma_blk_cfg; - - /* prepare the block for this RX DMA channel */ - memset(blk_cfg, 0, sizeof(struct dma_block_config)); + spi_mcux_dma_common_load(dev, stream, len); if (buf == NULL) { /* Treat the transfer as a peripheral to peripheral one, so that DMA @@ -413,15 +417,10 @@ static int spi_mcux_dma_rx_load(const struct device *dev, uint8_t *buf, blk_cfg->dest_address = (uint32_t)buf; stream->dma_cfg.channel_direction = PERIPHERAL_TO_MEMORY; } - blk_cfg->block_size = len; /* Enable scatter/gather */ blk_cfg->dest_scatter_en = 1; /* Source is LPSPI rx fifo */ blk_cfg->source_address = LPSPI_GetRxRegisterAddress(base); - stream->dma_cfg.source_burst_length = 1; - - stream->dma_cfg.head_block = blk_cfg; - stream->dma_cfg.user_data = (struct device *)dev; /* pass our client origin to the dma: data->dma_rx.channel */ return dma_config(data->dma_rx.dma_dev, data->dma_rx.channel, @@ -431,7 +430,7 @@ static int spi_mcux_dma_rx_load(const struct device *dev, uint8_t *buf, static int wait_dma_rx_tx_done(const struct device *dev) { struct spi_mcux_data *data = dev->data; - int ret = -1; + int ret; while (1) { ret = spi_context_wait_for_completion(&data->ctx); @@ -439,6 +438,7 @@ static int wait_dma_rx_tx_done(const struct device *dev) LOG_DBG("Timed out waiting for SPI context to complete"); return ret; } + if (data->status_flags & SPI_MCUX_LPSPI_DMA_ERROR_FLAG) { return -EIO; } @@ -455,39 +455,28 @@ static inline int spi_mcux_dma_rxtx_load(const struct device *dev, size_t *dma_size) { struct spi_mcux_data *lpspi_data = dev->data; - int ret = 0; + int ret; - /* Clear status flags */ - lpspi_data->status_flags = 0U; - /* Load dma blocks of equal length */ - *dma_size = MIN(lpspi_data->ctx.tx_len, lpspi_data->ctx.rx_len); - if (*dma_size == 0) { - *dma_size = MAX(lpspi_data->ctx.tx_len, lpspi_data->ctx.rx_len); - } + lpspi_data->status_flags = 0; + + *dma_size = spi_mcux_dma_equal_block_length(&lpspi_data->ctx); - ret = spi_mcux_dma_tx_load(dev, lpspi_data->ctx.tx_buf, - *dma_size); + ret = spi_mcux_dma_tx_load(dev, lpspi_data->ctx.tx_buf, *dma_size); if (ret != 0) { return ret; } - ret = spi_mcux_dma_rx_load(dev, lpspi_data->ctx.rx_buf, - *dma_size); + ret = spi_mcux_dma_rx_load(dev, lpspi_data->ctx.rx_buf, *dma_size); if (ret != 0) { return ret; } - /* Start DMA */ - ret = dma_start(lpspi_data->dma_tx.dma_dev, - lpspi_data->dma_tx.channel); + ret = dma_start(lpspi_data->dma_tx.dma_dev, lpspi_data->dma_tx.channel); if (ret != 0) { return ret; } - ret = dma_start(lpspi_data->dma_rx.dma_dev, - lpspi_data->dma_rx.channel); - return ret; - + return dma_start(lpspi_data->dma_rx.dma_dev, lpspi_data->dma_rx.channel); } static int transceive_dma(const struct device *dev, @@ -498,64 +487,29 @@ static int transceive_dma(const struct device *dev, spi_callback_t cb, void *userdata) { - /* const struct spi_mcux_config *config = dev->config; */ struct spi_mcux_data *data = dev->data; LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base); - int ret; size_t dma_size; + int ret; - if (!asynchronous) { + if (asynchronous) { + spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1); + } else { spi_context_lock(&data->ctx, asynchronous, cb, userdata, spi_cfg); } ret = spi_mcux_configure(dev, spi_cfg); - if (ret) { - if (!asynchronous) { - spi_context_release(&data->ctx, ret); - } + if (ret && asynchronous) { return ret; + } else if (ret && !asynchronous) { + goto out; } /* DMA is fast enough watermarks are not required */ LPSPI_SetFifoWatermarks(base, 0U, 0U); - if (!asynchronous) { - spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1); - spi_context_cs_control(&data->ctx, true); - - /* Send each spi buf via DMA, updating context as DMA completes */ - while (data->ctx.rx_len > 0 || data->ctx.tx_len > 0) { - /* Load dma block */ - ret = spi_mcux_dma_rxtx_load(dev, &dma_size); - if (ret != 0) { - goto out; - } - /* Enable DMA Requests */ - LPSPI_EnableDMA(base, kLPSPI_TxDmaEnable | kLPSPI_RxDmaEnable); - - /* Wait for DMA to finish */ - ret = wait_dma_rx_tx_done(dev); - if (ret != 0) { - goto out; - } - while ((LPSPI_GetStatusFlags(base) & kLPSPI_ModuleBusyFlag)) { - /* wait until module is idle */ - } - - /* Disable DMA */ - LPSPI_DisableDMA(base, kLPSPI_TxDmaEnable | kLPSPI_RxDmaEnable); - - /* Update SPI contexts with amount of data we just sent */ - spi_context_update_tx(&data->ctx, 1, dma_size); - spi_context_update_rx(&data->ctx, 1, dma_size); - } - spi_context_cs_control(&data->ctx, false); - -out: - spi_context_release(&data->ctx, ret); - } -#if CONFIG_SPI_ASYNC - else { +#ifdef CONFIG_SPI_ASYNC + if (asynchronous) { data->ctx.asynchronous = asynchronous; data->ctx.callback = cb; data->ctx.callback_data = userdata; @@ -567,12 +521,47 @@ static int transceive_dma(const struct device *dev, /* Enable DMA Requests */ LPSPI_EnableDMA(base, kLPSPI_TxDmaEnable | kLPSPI_RxDmaEnable); + + return ret; } #endif + spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1); + spi_context_cs_control(&data->ctx, true); + + /* Send each spi buf via DMA, updating context as DMA completes */ + while (data->ctx.rx_len > 0 || data->ctx.tx_len > 0) { + /* Load dma block */ + ret = spi_mcux_dma_rxtx_load(dev, &dma_size); + if (ret != 0) { + goto out; + } + /* Enable DMA Requests */ + LPSPI_EnableDMA(base, kLPSPI_TxDmaEnable | kLPSPI_RxDmaEnable); + + /* Wait for DMA to finish */ + ret = wait_dma_rx_tx_done(dev); + if (ret != 0) { + goto out; + } + while ((LPSPI_GetStatusFlags(base) & kLPSPI_ModuleBusyFlag)) { + /* wait until module is idle */ + } + + /* Disable DMA */ + LPSPI_DisableDMA(base, kLPSPI_TxDmaEnable | kLPSPI_RxDmaEnable); + + /* Update SPI contexts with amount of data we just sent */ + spi_context_update_tx(&data->ctx, 1, dma_size); + spi_context_update_rx(&data->ctx, 1, dma_size); + } + spi_context_cs_control(&data->ctx, false); + +out: + spi_context_release(&data->ctx, ret); return ret; } -#endif +#endif /* CONFIG_SPI_MCUX_LPSPI_DMA */ static int transceive(const struct device *dev, const struct spi_config *spi_cfg, @@ -602,27 +591,40 @@ static int transceive(const struct device *dev, } ret = spi_context_wait_for_completion(&data->ctx); + out: spi_context_release(&data->ctx, ret); return ret; } - -static int spi_mcux_transceive(const struct device *dev, - const struct spi_config *spi_cfg, - const struct spi_buf_set *tx_bufs, - const struct spi_buf_set *rx_bufs) +static int spi_mcux_transceive_wrapper(const struct device *dev, + const struct spi_config *spi_cfg, + const struct spi_buf_set *tx_bufs, + const struct spi_buf_set *rx_bufs, + bool async, + spi_callback_t cb, + void *userdata) { #ifdef CONFIG_SPI_MCUX_LPSPI_DMA - const struct spi_mcux_data *data = dev->data; + struct spi_mcux_data *data = dev->data; + bool dma = data->dma_rx.dma_dev && data->dma_tx.dma_dev; - if (data->dma_rx.dma_dev && data->dma_tx.dma_dev) { - return transceive_dma(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL); + if (dma) { + return transceive_dma(dev, spi_cfg, tx_bufs, rx_bufs, async, cb, userdata); } #endif /* CONFIG_SPI_MCUX_LPSPI_DMA */ - return transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL); + return transceive(dev, spi_cfg, tx_bufs, rx_bufs, async, cb, userdata); +} + + +static int spi_mcux_transceive(const struct device *dev, + const struct spi_config *spi_cfg, + const struct spi_buf_set *tx_bufs, + const struct spi_buf_set *rx_bufs) +{ + return spi_mcux_transceive_wrapper(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL); } #ifdef CONFIG_SPI_ASYNC @@ -633,17 +635,7 @@ static int spi_mcux_transceive_async(const struct device *dev, spi_callback_t cb, void *userdata) { -#ifdef CONFIG_SPI_MCUX_LPSPI_DMA - struct spi_mcux_data *data = dev->data; - - if (data->dma_rx.dma_dev && data->dma_tx.dma_dev) { - spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1); - } - - return transceive_dma(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata); -#else - return transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata); -#endif /* CONFIG_SPI_MCUX_LPSPI_DMA */ + return spi_mcux_transceive_wrapper(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata); } #endif /* CONFIG_SPI_ASYNC */ @@ -657,86 +649,19 @@ static int spi_mcux_release(const struct device *dev, return 0; } -static int spi_mcux_init(const struct device *dev) -{ - int err; - const struct spi_mcux_config *config = dev->config; - struct spi_mcux_data *data = dev->data; - - DEVICE_MMIO_NAMED_MAP(dev, reg_base, K_MEM_CACHE_NONE | K_MEM_DIRECT_MAP); - -#if CONFIG_NXP_LP_FLEXCOMM - /* When using LP Flexcomm driver, register the interrupt handler - * so we receive notification from the LP Flexcomm interrupt handler. - */ - nxp_lp_flexcomm_setirqhandler(config->parent_dev, dev, - LP_FLEXCOMM_PERIPH_LPSPI, spi_mcux_isr); -#else - /* Interrupt is managed by this driver */ - config->irq_config_func(dev); -#endif - - err = spi_context_cs_configure_all(&data->ctx); - if (err < 0) { - return err; - } - - spi_context_unlock_unconditionally(&data->ctx); - - data->dev = dev; - -#ifdef CONFIG_SPI_MCUX_LPSPI_DMA - if (data->dma_tx.dma_dev && data->dma_rx.dma_dev) { - if (!device_is_ready(data->dma_tx.dma_dev)) { - LOG_ERR("%s device is not ready", data->dma_tx.dma_dev->name); - return -ENODEV; - } - - if (!device_is_ready(data->dma_rx.dma_dev)) { - LOG_ERR("%s device is not ready", data->dma_rx.dma_dev->name); - return -ENODEV; - } - } -#endif /* CONFIG_SPI_MCUX_LPSPI_DMA */ - -#ifdef CONFIG_SPI_RTIO - data->dt_spec.bus = dev; - data->iodev.api = &spi_iodev_api; - data->iodev.data = &data->dt_spec; - mpsc_init(&data->io_q); -#endif - - err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); - if (err) { - return err; - } - - spi_context_unlock_unconditionally(&data->ctx); - - return 0; -} #ifdef CONFIG_SPI_RTIO -static inline k_spinlock_key_t spi_spin_lock(const struct device *dev) -{ - struct spi_mcux_data *data = dev->data; - - return k_spin_lock(&data->lock); -} - -static inline void spi_spin_unlock(const struct device *dev, k_spinlock_key_t key) +static void spi_mcux_iodev_submit(const struct device *dev, + struct rtio_iodev_sqe *iodev_sqe) { struct spi_mcux_data *data = dev->data; - k_spin_unlock(&data->lock, key); + mpsc_push(&data->io_q, &iodev_sqe->q); + spi_mcux_iodev_next(dev, false); } - -static void spi_mcux_iodev_next(const struct device *dev, bool completion); - static void spi_mcux_iodev_start(const struct device *dev) { - /* const struct spi_mcux_config *config = dev->config; */ struct spi_mcux_data *data = dev->data; struct rtio_sqe *sqe = &data->txn_curr->sqe; struct spi_dt_spec *spi_dt_spec = sqe->iodev->data; @@ -782,11 +707,11 @@ static void spi_mcux_iodev_start(const struct device *dev) data->transfer_len = transfer.dataSize; - k_spinlock_key_t key = spi_spin_lock(dev); + k_spinlock_key_t key = k_spin_lock(&data->lock); status = LPSPI_MasterTransferNonBlocking(base, &data->handle, &transfer); - spi_spin_unlock(dev, key); + k_spin_unlock(&data->lock, key); if (status != kStatus_Success) { LOG_ERR("Transfer could not start"); rtio_iodev_sqe_err(txn_head, -EIO); @@ -797,10 +722,10 @@ static void spi_mcux_iodev_next(const struct device *dev, bool completion) { struct spi_mcux_data *data = dev->data; - k_spinlock_key_t key = spi_spin_lock(dev); + k_spinlock_key_t key = k_spin_lock(&data->lock); if (!completion && data->txn_curr != NULL) { - spi_spin_unlock(dev, key); + k_spin_unlock(&data->lock, key); return; } @@ -816,7 +741,7 @@ static void spi_mcux_iodev_next(const struct device *dev, bool completion) data->txn_curr = NULL; } - spi_spin_unlock(dev, key); + k_spin_unlock(&data->lock, key); if (data->txn_curr != NULL) { struct spi_dt_spec *spi_dt_spec = data->txn_curr->sqe.iodev->data; @@ -828,15 +753,6 @@ static void spi_mcux_iodev_next(const struct device *dev, bool completion) } } -static void spi_mcux_iodev_submit(const struct device *dev, - struct rtio_iodev_sqe *iodev_sqe) -{ - struct spi_mcux_data *data = dev->data; - - mpsc_push(&data->io_q, &iodev_sqe->q); - spi_mcux_iodev_next(dev, false); -} - static void spi_mcux_iodev_complete(const struct device *dev, int status) { struct spi_mcux_data *data = dev->data; @@ -852,28 +768,90 @@ static void spi_mcux_iodev_complete(const struct device *dev, int status) rtio_iodev_sqe_ok(txn_head, status); } } - - -#endif +#endif /* CONFIG_SPI_RTIO */ static const struct spi_driver_api spi_mcux_driver_api = { .transceive = spi_mcux_transceive, #ifdef CONFIG_SPI_ASYNC .transceive_async = spi_mcux_transceive_async, -#endif +#endif /* CONFIG_SPI_ASYNC */ #ifdef CONFIG_SPI_RTIO .iodev_submit = spi_mcux_iodev_submit, -#endif +#endif /* CONFIG_SPI_RTIO */ .release = spi_mcux_release, }; -#define SPI_MCUX_RTIO_DEFINE(n) RTIO_DEFINE(spi_mcux_rtio_##n, CONFIG_SPI_MCUX_RTIO_SQ_SIZE, \ - CONFIG_SPI_MCUX_RTIO_SQ_SIZE) +static void spi_mcux_isr(const struct device *dev) +{ + struct spi_mcux_data *data = dev->data; + LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base); + + LPSPI_MasterTransferHandleIRQ( + /* SDK has different first argument for some platform */ + COND_CODE_1(CONFIG_NXP_LP_FLEXCOMM, + (LPSPI_GetInstance(base)), + (base)), + &data->handle); +} + +static int spi_mcux_init(const struct device *dev) +{ + int err; + const struct spi_mcux_config *config = dev->config; + struct spi_mcux_data *data = dev->data; + + DEVICE_MMIO_NAMED_MAP(dev, reg_base, K_MEM_CACHE_NONE | K_MEM_DIRECT_MAP); + + config->irq_config_func(dev); + + err = spi_context_cs_configure_all(&data->ctx); + if (err < 0) { + return err; + } + + spi_context_unlock_unconditionally(&data->ctx); + + data->dev = dev; #ifdef CONFIG_SPI_MCUX_LPSPI_DMA -#define SPI_DMA_CHANNELS(n) \ + if (data->dma_tx.dma_dev && data->dma_rx.dma_dev) { + if (!device_is_ready(data->dma_tx.dma_dev)) { + LOG_ERR("%s device is not ready", data->dma_tx.dma_dev->name); + return -ENODEV; + } + + if (!device_is_ready(data->dma_rx.dma_dev)) { + LOG_ERR("%s device is not ready", data->dma_rx.dma_dev->name); + return -ENODEV; + } + } +#endif /* CONFIG_SPI_MCUX_LPSPI_DMA */ + +#ifdef CONFIG_SPI_RTIO + data->dt_spec.bus = dev; + data->iodev.api = &spi_iodev_api; + data->iodev.data = &data->dt_spec; + mpsc_init(&data->io_q); +#endif /* CONFIG_SPI_RTIO */ + + err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); + if (err) { + return err; + } + + spi_context_unlock_unconditionally(&data->ctx); + + return 0; +} + +#define SPI_MCUX_RTIO_DEFINE(n) IF_ENABLED(CONFIG_SPI_RTIO, \ + (RTIO_DEFINE(spi_mcux_rtio_##n, \ + CONFIG_SPI_MCUX_RTIO_SQ_SIZE, \ + CONFIG_SPI_MCUX_RTIO_SQ_SIZE))) + +#define SPI_DMA_CHANNELS(n) IF_ENABLED(CONFIG_SPI_MCUX_LPSPI_DMA, ( \ IF_ENABLED(DT_INST_DMAS_HAS_NAME(n, tx), \ ( \ .dma_tx = { \ @@ -905,10 +883,7 @@ static const struct spi_driver_api spi_mcux_driver_api = { .dma_slot = DT_INST_DMAS_CELL_BY_NAME(n, rx, source) \ } \ }, \ - )) -#else -#define SPI_DMA_CHANNELS(n) -#endif /* CONFIG_SPI_MCUX_LPSPI_DMA */ + )))) #define SPI_MCUX_LPSPI_MODULE_IRQ_CONNECT(n) \ do { \ @@ -919,26 +894,30 @@ static const struct spi_driver_api spi_mcux_driver_api = { irq_enable(DT_INST_IRQN(n)); \ } while (false) -#define SPI_MCUX_LPSPI_MODULE_IRQ(n) \ - IF_ENABLED(DT_INST_IRQ_HAS_IDX(n, 0), \ - (SPI_MCUX_LPSPI_MODULE_IRQ_CONNECT(n))) +#define SPI_MCUX_LP_FLEXCOMM_LPSPI_IRQ(n) \ + nxp_lp_flexcomm_setirqhandler(DEVICE_DT_GET(DT_INST_PARENT(n)), \ + DEVICE_DT_INST_GET(n), \ + LP_FLEXCOMM_PERIPH_LPSPI, \ + spi_mcux_isr) + +#define SPI_MCUX_LPSPI_IRQ_CFG(n) \ + COND_CODE_1(IS_ENABLED(CONFIG_NXP_LP_FLEXCOMM), \ + (SPI_MCUX_LP_FLEXCOMM_LPSPI_IRQ(n)), \ + (SPI_MCUX_LPSPI_MODULE_IRQ_CONNECT(n))) -#ifdef CONFIG_NXP_LP_FLEXCOMM -#define PARENT_DEV(n) \ - .parent_dev = DEVICE_DT_GET(DT_INST_PARENT(n)), -#else -#define PARENT_DEV(n) -#endif /* CONFIG_NXP_LP_FLEXCOMM */ #define SPI_MCUX_LPSPI_INIT(n) \ PINCTRL_DT_INST_DEFINE(n); \ - COND_CODE_1(CONFIG_SPI_RTIO, (SPI_MCUX_RTIO_DEFINE(n)), ()); \ \ - static void spi_mcux_config_func_##n(const struct device *dev); \ + SPI_MCUX_RTIO_DEFINE(n); \ + \ + static void spi_mcux_config_func_##n(const struct device *dev) \ + { \ + SPI_MCUX_LPSPI_IRQ_CFG(n); \ + } \ \ static const struct spi_mcux_config spi_mcux_config_##n = { \ DEVICE_MMIO_NAMED_ROM_INIT(reg_base, DT_DRV_INST(n)), \ - PARENT_DEV(n) \ .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \ .clock_subsys = \ (clock_control_subsys_t)DT_INST_CLOCKS_CELL(n, name), \ @@ -971,10 +950,5 @@ static const struct spi_driver_api spi_mcux_driver_api = { &spi_mcux_config_##n, POST_KERNEL, \ CONFIG_SPI_INIT_PRIORITY, \ &spi_mcux_driver_api); \ - \ - static void spi_mcux_config_func_##n(const struct device *dev) \ - { \ - SPI_MCUX_LPSPI_MODULE_IRQ(n); \ - } DT_INST_FOREACH_STATUS_OKAY(SPI_MCUX_LPSPI_INIT) From 615114cf3bb8e013b55c24f0c50e5232c58dde39 Mon Sep 17 00:00:00 2001 From: Declan Snyder Date: Mon, 16 Sep 2024 12:16:39 -0500 Subject: [PATCH 2/2] drivers: spi_mcux_lpspi: clang format driver file clang format the spi_mcux_lpspi driver. Signed-off-by: Declan Snyder --- drivers/spi/spi_mcux_lpspi.c | 340 ++++++++++++++--------------------- 1 file changed, 136 insertions(+), 204 deletions(-) diff --git a/drivers/spi/spi_mcux_lpspi.c b/drivers/spi/spi_mcux_lpspi.c index 3a44e46867b09e..bda320fe88a04f 100644 --- a/drivers/spi/spi_mcux_lpspi.c +++ b/drivers/spi/spi_mcux_lpspi.c @@ -33,21 +33,19 @@ LOG_MODULE_REGISTER(spi_mcux_lpspi, CONFIG_SPI_LOG_LEVEL); #include "spi_context.h" /* These two defines should be made into DT properties if they ever are different */ -#define CHIP_SELECT_COUNT 4 -#define MAX_DATA_WIDTH 4096 +#define CHIP_SELECT_COUNT 4 +#define MAX_DATA_WIDTH 4096 /* Defines required by DEVICE_MMIO_NAMED_* macros */ -#define DEV_CFG(_dev) \ - ((const struct spi_mcux_config *)(_dev)->config) -#define DEV_DATA(_dev) ((struct spi_mcux_data *)(_dev)->data) - +#define DEV_CFG(_dev) ((const struct spi_mcux_config *)(_dev)->config) +#define DEV_DATA(_dev) ((struct spi_mcux_data *)(_dev)->data) /* ******** DMA Definitions ******* */ #if defined(CONFIG_SPI_MCUX_LPSPI_DMA) -#define SPI_MCUX_LPSPI_DMA_ERROR_FLAG 0x01 -#define SPI_MCUX_LPSPI_DMA_RX_DONE_FLAG 0x02 -#define SPI_MCUX_LPSPI_DMA_TX_DONE_FLAG 0x04 -#define SPI_MCUX_LPSPI_DMA_DONE_FLAG \ +#define SPI_MCUX_LPSPI_DMA_ERROR_FLAG 0x01 +#define SPI_MCUX_LPSPI_DMA_RX_DONE_FLAG 0x02 +#define SPI_MCUX_LPSPI_DMA_TX_DONE_FLAG 0x04 +#define SPI_MCUX_LPSPI_DMA_DONE_FLAG \ (SPI_MCUX_LPSPI_DMA_RX_DONE_FLAG | SPI_MCUX_LPSPI_DMA_TX_DONE_FLAG) struct stream { @@ -61,7 +59,6 @@ static int spi_mcux_dma_rxtx_load(const struct device *dev, size_t *dma_size); #endif /* CONFIG_SPI_MCUX_LPSPI_DMA */ /* ******************************** */ - /* ******* RTIO Definitions ******* */ #if defined(CONFIG_SPI_RTIO) static void spi_mcux_iodev_complete(const struct device *dev, int status); @@ -70,7 +67,6 @@ static void spi_mcux_iodev_start(const struct device *dev); #endif /* CONFIG_SPI_RTIO */ /* ******************************** */ - struct spi_mcux_config { DEVICE_MMIO_NAMED_ROM(reg_base); const struct device *clock_dev; @@ -111,7 +107,6 @@ struct spi_mcux_data { #endif }; - static int spi_mcux_transfer_next_packet(const struct device *dev) { struct spi_mcux_data *data = dev->data; @@ -127,8 +122,8 @@ static int spi_mcux_transfer_next_packet(const struct device *dev) return 0; } - transfer.configFlags = kLPSPI_MasterPcsContinuous | - (ctx->config->slave << LPSPI_MASTER_PCS_SHIFT); + transfer.configFlags = + kLPSPI_MasterPcsContinuous | (ctx->config->slave << LPSPI_MASTER_PCS_SHIFT); if (ctx->tx_len == 0) { /* rx only, nothing to tx */ @@ -137,12 +132,12 @@ static int spi_mcux_transfer_next_packet(const struct device *dev) transfer.dataSize = ctx->rx_len; } else if (ctx->rx_len == 0) { /* tx only, nothing to rx */ - transfer.txData = (uint8_t *) ctx->tx_buf; + transfer.txData = (uint8_t *)ctx->tx_buf; transfer.rxData = NULL; transfer.dataSize = ctx->tx_len; } else if (ctx->tx_len == ctx->rx_len) { /* rx and tx are the same length */ - transfer.txData = (uint8_t *) ctx->tx_buf; + transfer.txData = (uint8_t *)ctx->tx_buf; transfer.rxData = ctx->rx_buf; transfer.dataSize = ctx->tx_len; } else if (ctx->tx_len > ctx->rx_len) { @@ -150,7 +145,7 @@ static int spi_mcux_transfer_next_packet(const struct device *dev) * rx into a longer intermediate buffer. Leave chip select * active between transfers. */ - transfer.txData = (uint8_t *) ctx->tx_buf; + transfer.txData = (uint8_t *)ctx->tx_buf; transfer.rxData = ctx->rx_buf; transfer.dataSize = ctx->rx_len; } else { @@ -158,15 +153,14 @@ static int spi_mcux_transfer_next_packet(const struct device *dev) * tx from a longer intermediate buffer. Leave chip select * active between transfers. */ - transfer.txData = (uint8_t *) ctx->tx_buf; + transfer.txData = (uint8_t *)ctx->tx_buf; transfer.rxData = ctx->rx_buf; transfer.dataSize = ctx->tx_len; } data->transfer_len = transfer.dataSize; - status = LPSPI_MasterTransferNonBlocking(base, &data->handle, - &transfer); + status = LPSPI_MasterTransferNonBlocking(base, &data->handle, &transfer); if (status != kStatus_Success) { LOG_ERR("Transfer could not start on %s: %d", dev->name, status); return status == kStatus_LPSPI_Busy ? -EBUSY : -EINVAL; @@ -175,8 +169,8 @@ static int spi_mcux_transfer_next_packet(const struct device *dev) return 0; } -static void spi_mcux_master_transfer_callback(LPSPI_Type *base, - lpspi_master_handle_t *handle, status_t status, void *userData) +static void spi_mcux_master_transfer_callback(LPSPI_Type *base, lpspi_master_handle_t *handle, + status_t status, void *userData) { struct spi_mcux_data *data = userData; @@ -192,8 +186,7 @@ static void spi_mcux_master_transfer_callback(LPSPI_Type *base, spi_mcux_transfer_next_packet(data->dev); } -static int spi_mcux_configure(const struct device *dev, - const struct spi_config *spi_cfg) +static int spi_mcux_configure(const struct device *dev, const struct spi_config *spi_cfg) { const struct spi_mcux_config *config = dev->config; struct spi_mcux_data *data = dev->data; @@ -215,34 +208,28 @@ static int spi_mcux_configure(const struct device *dev, LPSPI_MasterGetDefaultConfig(&master_config); if (spi_cfg->slave >= CHIP_SELECT_COUNT) { - LOG_ERR("Slave %d is greater than %d", - spi_cfg->slave, CHIP_SELECT_COUNT); + LOG_ERR("Slave %d is greater than %d", spi_cfg->slave, CHIP_SELECT_COUNT); return -EINVAL; } word_size = SPI_WORD_SIZE_GET(spi_cfg->operation); if (word_size > MAX_DATA_WIDTH) { - LOG_ERR("Word size %d is greater than %d", - word_size, MAX_DATA_WIDTH); + LOG_ERR("Word size %d is greater than %d", word_size, MAX_DATA_WIDTH); return -EINVAL; } master_config.bitsPerFrame = word_size; - master_config.cpol = - (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL) - ? kLPSPI_ClockPolarityActiveLow - : kLPSPI_ClockPolarityActiveHigh; + master_config.cpol = (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL) + ? kLPSPI_ClockPolarityActiveLow + : kLPSPI_ClockPolarityActiveHigh; - master_config.cpha = - (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA) - ? kLPSPI_ClockPhaseSecondEdge - : kLPSPI_ClockPhaseFirstEdge; + master_config.cpha = (SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA) + ? kLPSPI_ClockPhaseSecondEdge + : kLPSPI_ClockPhaseFirstEdge; master_config.direction = - (spi_cfg->operation & SPI_TRANSFER_LSB) - ? kLPSPI_LsbFirst - : kLPSPI_MsbFirst; + (spi_cfg->operation & SPI_TRANSFER_LSB) ? kLPSPI_LsbFirst : kLPSPI_MsbFirst; master_config.baudRate = spi_cfg->frequency; @@ -257,8 +244,7 @@ static int spi_mcux_configure(const struct device *dev, return -ENODEV; } - if (clock_control_get_rate(config->clock_dev, config->clock_subsys, - &clock_freq)) { + if (clock_control_get_rate(config->clock_dev, config->clock_subsys, &clock_freq)) { return -EINVAL; } @@ -278,8 +264,7 @@ static int spi_mcux_configure(const struct device *dev, LPSPI_MasterInit(base, &master_config, clock_freq); - LPSPI_MasterTransferCreateHandle(base, &data->handle, - spi_mcux_master_transfer_callback, + LPSPI_MasterTransferCreateHandle(base, &data->handle, spi_mcux_master_transfer_callback, data); LPSPI_SetDummyData(base, 0); @@ -289,7 +274,6 @@ static int spi_mcux_configure(const struct device *dev, return 0; } - #ifdef CONFIG_SPI_MCUX_LPSPI_DMA /* helper function */ static size_t spi_mcux_dma_equal_block_length(struct spi_context *ctx) @@ -302,8 +286,7 @@ static size_t spi_mcux_dma_equal_block_length(struct spi_context *ctx) } /* This function is executed in the interrupt context */ -static void spi_mcux_dma_callback(const struct device *dev, void *arg, - uint32_t channel, int status) +static void spi_mcux_dma_callback(const struct device *dev, void *arg, uint32_t channel, int status) { const struct device *spi_dev = arg; struct spi_mcux_data *data = (struct spi_mcux_data *)spi_dev->data; @@ -390,12 +373,10 @@ static int spi_mcux_dma_tx_load(const struct device *dev, const uint8_t *buf, si blk_cfg->dest_address = LPSPI_GetTxRegisterAddress(base); /* pass our client origin to the dma: data->dma_tx.dma_channel */ - return dma_config(data->dma_tx.dma_dev, data->dma_tx.channel, - &stream->dma_cfg); + return dma_config(data->dma_tx.dma_dev, data->dma_tx.channel, &stream->dma_cfg); } -static int spi_mcux_dma_rx_load(const struct device *dev, uint8_t *buf, - size_t len) +static int spi_mcux_dma_rx_load(const struct device *dev, uint8_t *buf, size_t len) { struct spi_mcux_data *data = dev->data; LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base); @@ -423,8 +404,7 @@ static int spi_mcux_dma_rx_load(const struct device *dev, uint8_t *buf, blk_cfg->source_address = LPSPI_GetRxRegisterAddress(base); /* pass our client origin to the dma: data->dma_rx.channel */ - return dma_config(data->dma_rx.dma_dev, data->dma_rx.channel, - &stream->dma_cfg); + return dma_config(data->dma_rx.dma_dev, data->dma_rx.channel, &stream->dma_cfg); } static int wait_dma_rx_tx_done(const struct device *dev) @@ -444,15 +424,14 @@ static int wait_dma_rx_tx_done(const struct device *dev) } if ((data->status_flags & SPI_MCUX_LPSPI_DMA_DONE_FLAG) == - SPI_MCUX_LPSPI_DMA_DONE_FLAG) { + SPI_MCUX_LPSPI_DMA_DONE_FLAG) { LOG_DBG("DMA block completed"); return 0; } } } -static inline int spi_mcux_dma_rxtx_load(const struct device *dev, - size_t *dma_size) +static inline int spi_mcux_dma_rxtx_load(const struct device *dev, size_t *dma_size) { struct spi_mcux_data *lpspi_data = dev->data; int ret; @@ -479,13 +458,9 @@ static inline int spi_mcux_dma_rxtx_load(const struct device *dev, return dma_start(lpspi_data->dma_rx.dma_dev, lpspi_data->dma_rx.channel); } -static int transceive_dma(const struct device *dev, - const struct spi_config *spi_cfg, - const struct spi_buf_set *tx_bufs, - const struct spi_buf_set *rx_bufs, - bool asynchronous, - spi_callback_t cb, - void *userdata) +static int transceive_dma(const struct device *dev, const struct spi_config *spi_cfg, + const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, + bool asynchronous, spi_callback_t cb, void *userdata) { struct spi_mcux_data *data = dev->data; LPSPI_Type *base = (LPSPI_Type *)DEVICE_MMIO_NAMED_GET(dev, reg_base); @@ -563,13 +538,9 @@ static int transceive_dma(const struct device *dev, } #endif /* CONFIG_SPI_MCUX_LPSPI_DMA */ -static int transceive(const struct device *dev, - const struct spi_config *spi_cfg, - const struct spi_buf_set *tx_bufs, - const struct spi_buf_set *rx_bufs, - bool asynchronous, - spi_callback_t cb, - void *userdata) +static int transceive(const struct device *dev, const struct spi_config *spi_cfg, + const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs, + bool asynchronous, spi_callback_t cb, void *userdata) { struct spi_mcux_data *data = dev->data; int ret; @@ -598,13 +569,10 @@ static int transceive(const struct device *dev, return ret; } -static int spi_mcux_transceive_wrapper(const struct device *dev, - const struct spi_config *spi_cfg, - const struct spi_buf_set *tx_bufs, - const struct spi_buf_set *rx_bufs, - bool async, - spi_callback_t cb, - void *userdata) +static int spi_mcux_transceive_wrapper(const struct device *dev, const struct spi_config *spi_cfg, + const struct spi_buf_set *tx_bufs, + const struct spi_buf_set *rx_bufs, bool async, + spi_callback_t cb, void *userdata) { #ifdef CONFIG_SPI_MCUX_LPSPI_DMA struct spi_mcux_data *data = dev->data; @@ -618,29 +586,23 @@ static int spi_mcux_transceive_wrapper(const struct device *dev, return transceive(dev, spi_cfg, tx_bufs, rx_bufs, async, cb, userdata); } - -static int spi_mcux_transceive(const struct device *dev, - const struct spi_config *spi_cfg, - const struct spi_buf_set *tx_bufs, - const struct spi_buf_set *rx_bufs) +static int spi_mcux_transceive(const struct device *dev, const struct spi_config *spi_cfg, + const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs) { return spi_mcux_transceive_wrapper(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL); } #ifdef CONFIG_SPI_ASYNC -static int spi_mcux_transceive_async(const struct device *dev, - const struct spi_config *spi_cfg, - const struct spi_buf_set *tx_bufs, - const struct spi_buf_set *rx_bufs, - spi_callback_t cb, - void *userdata) +static int spi_mcux_transceive_async(const struct device *dev, const struct spi_config *spi_cfg, + const struct spi_buf_set *tx_bufs, + const struct spi_buf_set *rx_bufs, spi_callback_t cb, + void *userdata) { return spi_mcux_transceive_wrapper(dev, spi_cfg, tx_bufs, rx_bufs, true, cb, userdata); } #endif /* CONFIG_SPI_ASYNC */ -static int spi_mcux_release(const struct device *dev, - const struct spi_config *spi_cfg) +static int spi_mcux_release(const struct device *dev, const struct spi_config *spi_cfg) { struct spi_mcux_data *data = dev->data; @@ -649,10 +611,8 @@ static int spi_mcux_release(const struct device *dev, return 0; } - #ifdef CONFIG_SPI_RTIO -static void spi_mcux_iodev_submit(const struct device *dev, - struct rtio_iodev_sqe *iodev_sqe) +static void spi_mcux_iodev_submit(const struct device *dev, struct rtio_iodev_sqe *iodev_sqe) { struct spi_mcux_data *data = dev->data; @@ -672,8 +632,8 @@ static void spi_mcux_iodev_start(const struct device *dev) lpspi_transfer_t transfer; status_t status; - transfer.configFlags = kLPSPI_MasterPcsContinuous | - (spi_cfg->slave << LPSPI_MASTER_PCS_SHIFT); + transfer.configFlags = + kLPSPI_MasterPcsContinuous | (spi_cfg->slave << LPSPI_MASTER_PCS_SHIFT); switch (sqe->op) { case RTIO_OP_RX: @@ -709,8 +669,7 @@ static void spi_mcux_iodev_start(const struct device *dev) k_spinlock_key_t key = k_spin_lock(&data->lock); - status = LPSPI_MasterTransferNonBlocking(base, &data->handle, - &transfer); + status = LPSPI_MasterTransferNonBlocking(base, &data->handle, &transfer); k_spin_unlock(&data->lock, key); if (status != kStatus_Success) { LOG_ERR("Transfer could not start"); @@ -770,7 +729,6 @@ static void spi_mcux_iodev_complete(const struct device *dev, int status) } #endif /* CONFIG_SPI_RTIO */ - static const struct spi_driver_api spi_mcux_driver_api = { .transceive = spi_mcux_transceive, #ifdef CONFIG_SPI_ASYNC @@ -782,7 +740,6 @@ static const struct spi_driver_api spi_mcux_driver_api = { .release = spi_mcux_release, }; - static void spi_mcux_isr(const struct device *dev) { struct spi_mcux_data *data = dev->data; @@ -790,9 +747,7 @@ static void spi_mcux_isr(const struct device *dev) LPSPI_MasterTransferHandleIRQ( /* SDK has different first argument for some platform */ - COND_CODE_1(CONFIG_NXP_LP_FLEXCOMM, - (LPSPI_GetInstance(base)), - (base)), + COND_CODE_1(CONFIG_NXP_LP_FLEXCOMM, (LPSPI_GetInstance(base)), (base)), &data->handle); } @@ -846,109 +801,86 @@ static int spi_mcux_init(const struct device *dev) return 0; } -#define SPI_MCUX_RTIO_DEFINE(n) IF_ENABLED(CONFIG_SPI_RTIO, \ - (RTIO_DEFINE(spi_mcux_rtio_##n, \ - CONFIG_SPI_MCUX_RTIO_SQ_SIZE, \ - CONFIG_SPI_MCUX_RTIO_SQ_SIZE))) - -#define SPI_DMA_CHANNELS(n) IF_ENABLED(CONFIG_SPI_MCUX_LPSPI_DMA, ( \ - IF_ENABLED(DT_INST_DMAS_HAS_NAME(n, tx), \ - ( \ - .dma_tx = { \ - .dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(n, tx)), \ - .channel = \ - DT_INST_DMAS_CELL_BY_NAME(n, tx, mux), \ - .dma_cfg = { \ - .channel_direction = MEMORY_TO_PERIPHERAL, \ - .dma_callback = spi_mcux_dma_callback, \ - .source_data_size = 1, \ - .dest_data_size = 1, \ - .block_count = 1, \ - .dma_slot = DT_INST_DMAS_CELL_BY_NAME(n, tx, source) \ - } \ - }, \ - )) \ - IF_ENABLED(DT_INST_DMAS_HAS_NAME(n, rx), \ - ( \ - .dma_rx = { \ - .dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(n, rx)), \ - .channel = \ - DT_INST_DMAS_CELL_BY_NAME(n, rx, mux), \ - .dma_cfg = { \ - .channel_direction = PERIPHERAL_TO_MEMORY, \ - .dma_callback = spi_mcux_dma_callback, \ - .source_data_size = 1, \ - .dest_data_size = 1, \ - .block_count = 1, \ - .dma_slot = DT_INST_DMAS_CELL_BY_NAME(n, rx, source) \ - } \ - }, \ - )))) - -#define SPI_MCUX_LPSPI_MODULE_IRQ_CONNECT(n) \ - do { \ - IRQ_CONNECT(DT_INST_IRQN(n), \ - DT_INST_IRQ(n, priority), \ - spi_mcux_isr, \ - DEVICE_DT_INST_GET(n), 0); \ - irq_enable(DT_INST_IRQN(n)); \ +#define SPI_MCUX_RTIO_DEFINE(n) \ + IF_ENABLED(CONFIG_SPI_RTIO, (RTIO_DEFINE(spi_mcux_rtio_##n, CONFIG_SPI_MCUX_RTIO_SQ_SIZE, \ + CONFIG_SPI_MCUX_RTIO_SQ_SIZE))) + +#define SPI_DMA_CHANNELS(n) \ + IF_ENABLED( \ + CONFIG_SPI_MCUX_LPSPI_DMA, \ + (IF_ENABLED( \ + DT_INST_DMAS_HAS_NAME(n, tx), \ + (.dma_tx = {.dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(n, tx)), \ + .channel = DT_INST_DMAS_CELL_BY_NAME(n, tx, mux), \ + .dma_cfg = {.channel_direction = MEMORY_TO_PERIPHERAL, \ + .dma_callback = spi_mcux_dma_callback, \ + .source_data_size = 1, \ + .dest_data_size = 1, \ + .block_count = 1, \ + .dma_slot = DT_INST_DMAS_CELL_BY_NAME( \ + n, tx, source)}},)) \ + IF_ENABLED(DT_INST_DMAS_HAS_NAME(n, rx), \ + (.dma_rx = {.dma_dev = DEVICE_DT_GET( \ + DT_INST_DMAS_CTLR_BY_NAME(n, rx)), \ + .channel = DT_INST_DMAS_CELL_BY_NAME(n, rx, mux), \ + .dma_cfg = {.channel_direction = \ + PERIPHERAL_TO_MEMORY, \ + .dma_callback = spi_mcux_dma_callback, \ + .source_data_size = 1, \ + .dest_data_size = 1, \ + .block_count = 1, \ + .dma_slot = DT_INST_DMAS_CELL_BY_NAME( \ + n, rx, source)}},)))) + +#define SPI_MCUX_LPSPI_MODULE_IRQ_CONNECT(n) \ + do { \ + IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), spi_mcux_isr, \ + DEVICE_DT_INST_GET(n), 0); \ + irq_enable(DT_INST_IRQN(n)); \ } while (false) -#define SPI_MCUX_LP_FLEXCOMM_LPSPI_IRQ(n) \ - nxp_lp_flexcomm_setirqhandler(DEVICE_DT_GET(DT_INST_PARENT(n)), \ - DEVICE_DT_INST_GET(n), \ - LP_FLEXCOMM_PERIPH_LPSPI, \ - spi_mcux_isr) - -#define SPI_MCUX_LPSPI_IRQ_CFG(n) \ - COND_CODE_1(IS_ENABLED(CONFIG_NXP_LP_FLEXCOMM), \ - (SPI_MCUX_LP_FLEXCOMM_LPSPI_IRQ(n)), \ - (SPI_MCUX_LPSPI_MODULE_IRQ_CONNECT(n))) - - -#define SPI_MCUX_LPSPI_INIT(n) \ - PINCTRL_DT_INST_DEFINE(n); \ - \ - SPI_MCUX_RTIO_DEFINE(n); \ - \ - static void spi_mcux_config_func_##n(const struct device *dev) \ - { \ - SPI_MCUX_LPSPI_IRQ_CFG(n); \ - } \ - \ - static const struct spi_mcux_config spi_mcux_config_##n = { \ - DEVICE_MMIO_NAMED_ROM_INIT(reg_base, DT_DRV_INST(n)), \ - .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \ - .clock_subsys = \ - (clock_control_subsys_t)DT_INST_CLOCKS_CELL(n, name), \ - .irq_config_func = spi_mcux_config_func_##n, \ - .pcs_sck_delay = UTIL_AND( \ - DT_INST_NODE_HAS_PROP(n, pcs_sck_delay), \ - DT_INST_PROP(n, pcs_sck_delay)), \ - .sck_pcs_delay = UTIL_AND( \ - DT_INST_NODE_HAS_PROP(n, sck_pcs_delay), \ - DT_INST_PROP(n, sck_pcs_delay)), \ - .transfer_delay = UTIL_AND( \ - DT_INST_NODE_HAS_PROP(n, transfer_delay), \ - DT_INST_PROP(n, transfer_delay)), \ - .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ - .data_pin_config = DT_INST_ENUM_IDX(n, data_pin_config),\ - }; \ - \ - static struct spi_mcux_data spi_mcux_data_##n = { \ - SPI_CONTEXT_INIT_LOCK(spi_mcux_data_##n, ctx), \ - SPI_CONTEXT_INIT_SYNC(spi_mcux_data_##n, ctx), \ - SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) \ - SPI_DMA_CHANNELS(n) \ - IF_ENABLED(CONFIG_SPI_RTIO, \ - (.r = &spi_mcux_rtio_##n,)) \ - \ - }; \ - \ - DEVICE_DT_INST_DEFINE(n, spi_mcux_init, NULL, \ - &spi_mcux_data_##n, \ - &spi_mcux_config_##n, POST_KERNEL, \ - CONFIG_SPI_INIT_PRIORITY, \ - &spi_mcux_driver_api); \ +#define SPI_MCUX_LP_FLEXCOMM_LPSPI_IRQ(n) \ + nxp_lp_flexcomm_setirqhandler(DEVICE_DT_GET(DT_INST_PARENT(n)), DEVICE_DT_INST_GET(n), \ + LP_FLEXCOMM_PERIPH_LPSPI, spi_mcux_isr) + +#define SPI_MCUX_LPSPI_IRQ_CFG(n) \ + COND_CODE_1(IS_ENABLED(CONFIG_NXP_LP_FLEXCOMM), (SPI_MCUX_LP_FLEXCOMM_LPSPI_IRQ(n)), \ + (SPI_MCUX_LPSPI_MODULE_IRQ_CONNECT(n))) + +#define SPI_MCUX_LPSPI_INIT(n) \ + PINCTRL_DT_INST_DEFINE(n); \ + \ + SPI_MCUX_RTIO_DEFINE(n); \ + \ + static void spi_mcux_config_func_##n(const struct device *dev) \ + { \ + SPI_MCUX_LPSPI_IRQ_CFG(n); \ + } \ + \ + static const struct spi_mcux_config spi_mcux_config_##n = { \ + DEVICE_MMIO_NAMED_ROM_INIT(reg_base, DT_DRV_INST(n)), \ + .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \ + .clock_subsys = (clock_control_subsys_t)DT_INST_CLOCKS_CELL(n, name), \ + .irq_config_func = spi_mcux_config_func_##n, \ + .pcs_sck_delay = UTIL_AND(DT_INST_NODE_HAS_PROP(n, pcs_sck_delay), \ + DT_INST_PROP(n, pcs_sck_delay)), \ + .sck_pcs_delay = UTIL_AND(DT_INST_NODE_HAS_PROP(n, sck_pcs_delay), \ + DT_INST_PROP(n, sck_pcs_delay)), \ + .transfer_delay = UTIL_AND(DT_INST_NODE_HAS_PROP(n, transfer_delay), \ + DT_INST_PROP(n, transfer_delay)), \ + .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ + .data_pin_config = DT_INST_ENUM_IDX(n, data_pin_config), \ + }; \ + \ + static struct spi_mcux_data spi_mcux_data_##n = { \ + SPI_CONTEXT_INIT_LOCK(spi_mcux_data_##n, ctx), \ + SPI_CONTEXT_INIT_SYNC(spi_mcux_data_##n, ctx), \ + SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) SPI_DMA_CHANNELS(n) \ + IF_ENABLED(CONFIG_SPI_RTIO, (.r = &spi_mcux_rtio_##n,)) \ + \ + }; \ + \ + DEVICE_DT_INST_DEFINE(n, spi_mcux_init, NULL, &spi_mcux_data_##n, &spi_mcux_config_##n, \ + POST_KERNEL, CONFIG_SPI_INIT_PRIORITY, &spi_mcux_driver_api); DT_INST_FOREACH_STATUS_OKAY(SPI_MCUX_LPSPI_INIT)