From 962b4cf3d569bf761ddac78f8e396337d617cf09 Mon Sep 17 00:00:00 2001 From: Zhang Peng Date: Wed, 20 Dec 2023 11:55:34 +0900 Subject: [PATCH] schedule: zephyr_ll: Fix schedule bug for multiple instances When run two instances at same time the sound has noise, for example: playback and record in i.MX8QM or i.MX8ULP platform. The reason is one dma interrupt will process two task cause the schedule have no mechanism to make sure only handle the task needed to process. Fix this issue by adding check if task is in pending state. Signed-off-by: Zhang Peng --- src/include/sof/schedule/ll_schedule_domain.h | 3 +- src/schedule/zephyr_dma_domain.c | 30 ++++++++++++++++++- src/schedule/zephyr_ll.c | 13 ++++++-- 3 files changed, 42 insertions(+), 4 deletions(-) diff --git a/src/include/sof/schedule/ll_schedule_domain.h b/src/include/sof/schedule/ll_schedule_domain.h index 9765df5ef0bc..343b9060f80c 100644 --- a/src/include/sof/schedule/ll_schedule_domain.h +++ b/src/include/sof/schedule/ll_schedule_domain.h @@ -222,7 +222,8 @@ static inline bool domain_is_pending(struct ll_schedule_domain *domain, { bool ret; - assert(domain->ops->domain_is_pending); + if (!domain->ops->domain_is_pending) + return true; ret = domain->ops->domain_is_pending(domain, task, comp); diff --git a/src/schedule/zephyr_dma_domain.c b/src/schedule/zephyr_dma_domain.c index 45c26387c4fe..c05aa74943ab 100644 --- a/src/schedule/zephyr_dma_domain.c +++ b/src/schedule/zephyr_dma_domain.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -98,12 +99,15 @@ static int zephyr_dma_domain_register(struct ll_schedule_domain *domain, static int zephyr_dma_domain_unregister(struct ll_schedule_domain *domain, struct task *task, uint32_t num_tasks); +static bool zephyr_dma_domain_is_pending(struct ll_schedule_domain *domain, + struct task *task, struct comp_dev **comp); static void zephyr_dma_domain_task_cancel(struct ll_schedule_domain *domain, struct task *task); static const struct ll_schedule_domain_ops zephyr_dma_domain_ops = { .domain_register = zephyr_dma_domain_register, .domain_unregister = zephyr_dma_domain_unregister, + .domain_is_pending = zephyr_dma_domain_is_pending, .domain_task_cancel = zephyr_dma_domain_task_cancel }; @@ -150,6 +154,18 @@ static void zephyr_dma_domain_thread_fn(void *p1, void *p2, void *p3) } } +void pipe_task_notify(void *arg, enum notify_id type, void *data) +{ + struct pipeline_task *pipe_task = (void *)arg; + struct task *task; + + if (!pipe_task) + return; + task = &pipe_task->task; + + task->state = SOF_TASK_STATE_PENDING; +} + static void dma_irq_handler(void *data) { struct zephyr_dma_domain_irq *irq_data; @@ -169,8 +185,11 @@ static void dma_irq_handler(void *data) list_for_item(i, &irq_data->channels) { chan_data = container_of(i, struct zephyr_dma_domain_channel, list); - if (dma_interrupt_legacy(chan_data->channel, DMA_IRQ_STATUS_GET)) + if (dma_interrupt_legacy(chan_data->channel, DMA_IRQ_STATUS_GET)) { dma_interrupt_legacy(chan_data->channel, DMA_IRQ_CLEAR); + notifier_event(chan_data, NOTIFIER_ID_DMA_IRQ, + NOTIFIER_TARGET_CORE_LOCAL, NULL, 0); + } } /* clear IRQ - the mask argument is unused ATM */ @@ -351,6 +370,9 @@ static int register_dma_irq(struct zephyr_dma_domain *domain, irq_local_enable(flags); + notifier_register(pipe_task, chan_data, NOTIFIER_ID_DMA_IRQ, + pipe_task_notify, 0); + return 0; } } @@ -597,6 +619,12 @@ static int zephyr_dma_domain_unregister(struct ll_schedule_domain *domain, return 0; } +static bool zephyr_dma_domain_is_pending(struct ll_schedule_domain *domain, + struct task *task, struct comp_dev **comp) +{ + return (task->state == SOF_TASK_STATE_PENDING) +} + static void zephyr_dma_domain_task_cancel(struct ll_schedule_domain *domain, struct task *task) { diff --git a/src/schedule/zephyr_ll.c b/src/schedule/zephyr_ll.c index 2069bb6585c4..77afb1dd064b 100644 --- a/src/schedule/zephyr_ll.c +++ b/src/schedule/zephyr_ll.c @@ -188,6 +188,7 @@ static void zephyr_ll_run(void *data) for (list = sch->tasks.next; !list_is_empty(&sch->tasks); list = sch->tasks.next) { enum task_state state; struct zephyr_ll_pdata *pdata; + struct ll_schedule_domain *domain = sch->ll_domain; task = container_of(list, struct task, list); pdata = task->priv_data; @@ -198,19 +199,25 @@ static void zephyr_ll_run(void *data) continue; } - pdata->run = true; - task->state = SOF_TASK_STATE_RUNNING; + if (domain_is_pending(domain, task, NULL)) { + pdata->run = true; + task->state = SOF_TASK_STATE_RUNNING; + } /* Move the task to a temporary list */ list_item_del(list); list_item_append(list, &task_head); + if (task->state != SOF_TASK_STATE_RUNNING) + continue; + zephyr_ll_unlock(sch, &flags); /* * task's .run() should only return either * SOF_TASK_STATE_COMPLETED or SOF_TASK_STATE_RESCHEDULE */ + state = do_task_run(task); if (state != SOF_TASK_STATE_COMPLETED && state != SOF_TASK_STATE_RESCHEDULE) { @@ -237,6 +244,8 @@ static void zephyr_ll_run(void *data) break; } } + /* update task state */ + task->state = state; } /* Move tasks back */