From 5d1c7c131b3328197f8c4cffacd819e4817872c9 Mon Sep 17 00:00:00 2001 From: Zhang Peng Date: Wed, 20 Dec 2023 11:55:34 +0900 Subject: [PATCH] schedule: zephyr_ll: Fix schedule bug for multi instances When run two instances at same time the sound has noise, for example: playback and record in i.MX8QM or i.MX8ULP platform. The reason is one dma interrupt will process two task cause the schedule have no mechanism to make sure only handle the task needed to process. Fix this issue by adding check if task is in pending state. Signed-off-by: Zhang Peng --- src/include/sof/schedule/ll_schedule_domain.h | 3 +- src/schedule/zephyr_dma_domain.c | 31 ++++++++- src/schedule/zephyr_ll.c | 64 +++++++++++-------- 3 files changed, 68 insertions(+), 30 deletions(-) diff --git a/src/include/sof/schedule/ll_schedule_domain.h b/src/include/sof/schedule/ll_schedule_domain.h index 9765df5ef0bc..343b9060f80c 100644 --- a/src/include/sof/schedule/ll_schedule_domain.h +++ b/src/include/sof/schedule/ll_schedule_domain.h @@ -222,7 +222,8 @@ static inline bool domain_is_pending(struct ll_schedule_domain *domain, { bool ret; - assert(domain->ops->domain_is_pending); + if (!domain->ops->domain_is_pending) + return true; ret = domain->ops->domain_is_pending(domain, task, comp); diff --git a/src/schedule/zephyr_dma_domain.c b/src/schedule/zephyr_dma_domain.c index 45c26387c4fe..ccafc0942277 100644 --- a/src/schedule/zephyr_dma_domain.c +++ b/src/schedule/zephyr_dma_domain.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -98,12 +99,15 @@ static int zephyr_dma_domain_register(struct ll_schedule_domain *domain, static int zephyr_dma_domain_unregister(struct ll_schedule_domain *domain, struct task *task, uint32_t num_tasks); +static bool zephyr_dma_domain_is_pending(struct ll_schedule_domain *domain, + struct task *task, struct comp_dev **comp); static void zephyr_dma_domain_task_cancel(struct ll_schedule_domain *domain, struct task *task); static const struct ll_schedule_domain_ops zephyr_dma_domain_ops = { .domain_register = zephyr_dma_domain_register, .domain_unregister = zephyr_dma_domain_unregister, + .domain_is_pending = zephyr_dma_domain_is_pending, .domain_task_cancel = zephyr_dma_domain_task_cancel }; @@ -150,6 +154,17 @@ static void zephyr_dma_domain_thread_fn(void *p1, void *p2, void *p3) } } +void pipe_task_notify(void *arg, enum notify_id type, void *data) +{ + struct pipeline_task *pipe_task = (void *)arg; + struct task *task; + if (!pipe_task) + return; + task = &pipe_task->task; + + task->state = SOF_TASK_STATE_PENDING; +} + static void dma_irq_handler(void *data) { struct zephyr_dma_domain_irq *irq_data; @@ -169,8 +184,10 @@ static void dma_irq_handler(void *data) list_for_item(i, &irq_data->channels) { chan_data = container_of(i, struct zephyr_dma_domain_channel, list); - if (dma_interrupt_legacy(chan_data->channel, DMA_IRQ_STATUS_GET)) + if (dma_interrupt_legacy(chan_data->channel, DMA_IRQ_STATUS_GET)) { dma_interrupt_legacy(chan_data->channel, DMA_IRQ_CLEAR); + notifier_event(chan_data, NOTIFIER_ID_DMA_IRQ, NOTIFIER_TARGET_CORE_LOCAL, NULL, 0); + } } /* clear IRQ - the mask argument is unused ATM */ @@ -351,6 +368,9 @@ static int register_dma_irq(struct zephyr_dma_domain *domain, irq_local_enable(flags); + notifier_register(pipe_task, chan_data, NOTIFIER_ID_DMA_IRQ, + pipe_task_notify, 0); + return 0; } } @@ -597,6 +617,15 @@ static int zephyr_dma_domain_unregister(struct ll_schedule_domain *domain, return 0; } +static bool zephyr_dma_domain_is_pending(struct ll_schedule_domain *domain, + struct task *task, struct comp_dev **comp) +{ + if (task->state == SOF_TASK_STATE_PENDING) + return true; + else + return false; +} + static void zephyr_dma_domain_task_cancel(struct ll_schedule_domain *domain, struct task *task) { diff --git a/src/schedule/zephyr_ll.c b/src/schedule/zephyr_ll.c index 2069bb6585c4..0eaed908a1ad 100644 --- a/src/schedule/zephyr_ll.c +++ b/src/schedule/zephyr_ll.c @@ -188,6 +188,7 @@ static void zephyr_ll_run(void *data) for (list = sch->tasks.next; !list_is_empty(&sch->tasks); list = sch->tasks.next) { enum task_state state; struct zephyr_ll_pdata *pdata; + struct ll_schedule_domain *domain = sch->ll_domain; task = container_of(list, struct task, list); pdata = task->priv_data; @@ -198,44 +199,51 @@ static void zephyr_ll_run(void *data) continue; } - pdata->run = true; - task->state = SOF_TASK_STATE_RUNNING; + if (domain_is_pending(domain, task, NULL)) { + pdata->run = true; + task->state = SOF_TASK_STATE_RUNNING; + } /* Move the task to a temporary list */ list_item_del(list); list_item_append(list, &task_head); - zephyr_ll_unlock(sch, &flags); - - /* - * task's .run() should only return either - * SOF_TASK_STATE_COMPLETED or SOF_TASK_STATE_RESCHEDULE - */ - state = do_task_run(task); - if (state != SOF_TASK_STATE_COMPLETED && - state != SOF_TASK_STATE_RESCHEDULE) { - tr_err(&ll_tr, - "zephyr_ll_run: invalid return state %u", - state); - state = SOF_TASK_STATE_RESCHEDULE; - } - - zephyr_ll_lock(sch, &flags); + if (task->state == SOF_TASK_STATE_RUNNING) { + zephyr_ll_unlock(sch, &flags); - if (pdata->freeing || state == SOF_TASK_STATE_COMPLETED) { - zephyr_ll_task_done(sch, task); - } else { /* - * task->state could've been changed to - * SOF_TASK_STATE_CANCEL + * task's .run() should only return either + * SOF_TASK_STATE_COMPLETED or SOF_TASK_STATE_RESCHEDULE */ - switch (task->state) { - case SOF_TASK_STATE_CANCEL: + + state = do_task_run(task); + if (state != SOF_TASK_STATE_COMPLETED && + state != SOF_TASK_STATE_RESCHEDULE) { + tr_err(&ll_tr, + "zephyr_ll_run: invalid return state %u", + state); + state = SOF_TASK_STATE_RESCHEDULE; + } + + zephyr_ll_lock(sch, &flags); + + if (pdata->freeing || state == SOF_TASK_STATE_COMPLETED) { zephyr_ll_task_done(sch, task); - break; - default: - break; + } else { + /* + * task->state could've been changed to + * SOF_TASK_STATE_CANCEL + */ + switch (task->state) { + case SOF_TASK_STATE_CANCEL: + zephyr_ll_task_done(sch, task); + break; + default: + break; + } } + /* update task state */ + task->state = state; } }