Skip to content

Commit

Permalink
schedule: zephyr_ll: Fix schedule bug for multiple instances
Browse files Browse the repository at this point in the history
When run two instances at same time the sound has noise, for
example: playback and record in i.MX8QM or i.MX8ULP platform.

The reason is one dma interrupt will process two task cause the
schedule have no mechanism to make sure only handle the task
needed to process.

Fix this issue by adding check if task is in pending state.

Signed-off-by: Zhang Peng <peng.zhang_8@nxp.com>
  • Loading branch information
Zhang Peng committed Dec 20, 2023
1 parent 4f028c5 commit f140a8d
Show file tree
Hide file tree
Showing 3 changed files with 70 additions and 30 deletions.
3 changes: 2 additions & 1 deletion src/include/sof/schedule/ll_schedule_domain.h
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,8 @@ static inline bool domain_is_pending(struct ll_schedule_domain *domain,
{
bool ret;

assert(domain->ops->domain_is_pending);
if (!domain->ops->domain_is_pending)
return true;

ret = domain->ops->domain_is_pending(domain, task, comp);

Expand Down
33 changes: 32 additions & 1 deletion src/schedule/zephyr_dma_domain.c
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
#include <rtos/alloc.h>
#include <sof/lib/cpu.h>
#include <sof/lib/dma.h>
#include <sof/lib/notifier.h>
#include <sof/platform.h>
#include <sof/schedule/ll_schedule.h>
#include <sof/schedule/ll_schedule_domain.h>
Expand Down Expand Up @@ -98,12 +99,15 @@ static int zephyr_dma_domain_register(struct ll_schedule_domain *domain,
static int zephyr_dma_domain_unregister(struct ll_schedule_domain *domain,
struct task *task,
uint32_t num_tasks);
static bool zephyr_dma_domain_is_pending(struct ll_schedule_domain *domain,
struct task *task, struct comp_dev **comp);
static void zephyr_dma_domain_task_cancel(struct ll_schedule_domain *domain,
struct task *task);

static const struct ll_schedule_domain_ops zephyr_dma_domain_ops = {
.domain_register = zephyr_dma_domain_register,
.domain_unregister = zephyr_dma_domain_unregister,
.domain_is_pending = zephyr_dma_domain_is_pending,
.domain_task_cancel = zephyr_dma_domain_task_cancel
};

Expand Down Expand Up @@ -150,6 +154,18 @@ static void zephyr_dma_domain_thread_fn(void *p1, void *p2, void *p3)
}
}

void pipe_task_notify(void *arg, enum notify_id type, void *data)
{
struct pipeline_task *pipe_task = (void *)arg;
struct task *task;

if (!pipe_task)
return;
task = &pipe_task->task;

task->state = SOF_TASK_STATE_PENDING;
}

static void dma_irq_handler(void *data)
{
struct zephyr_dma_domain_irq *irq_data;
Expand All @@ -169,8 +185,11 @@ static void dma_irq_handler(void *data)
list_for_item(i, &irq_data->channels) {
chan_data = container_of(i, struct zephyr_dma_domain_channel, list);

if (dma_interrupt_legacy(chan_data->channel, DMA_IRQ_STATUS_GET))
if (dma_interrupt_legacy(chan_data->channel, DMA_IRQ_STATUS_GET)) {
dma_interrupt_legacy(chan_data->channel, DMA_IRQ_CLEAR);
notifier_event(chan_data, NOTIFIER_ID_DMA_IRQ,
NOTIFIER_TARGET_CORE_LOCAL, NULL, 0);
}
}

/* clear IRQ - the mask argument is unused ATM */
Expand Down Expand Up @@ -351,6 +370,9 @@ static int register_dma_irq(struct zephyr_dma_domain *domain,

irq_local_enable(flags);

notifier_register(pipe_task, chan_data, NOTIFIER_ID_DMA_IRQ,
pipe_task_notify, 0);

return 0;
}
}
Expand Down Expand Up @@ -597,6 +619,15 @@ static int zephyr_dma_domain_unregister(struct ll_schedule_domain *domain,
return 0;
}

static bool zephyr_dma_domain_is_pending(struct ll_schedule_domain *domain,
struct task *task, struct comp_dev **comp)
{
if (task->state == SOF_TASK_STATE_PENDING)
return true;
else
return false;
}

static void zephyr_dma_domain_task_cancel(struct ll_schedule_domain *domain,
struct task *task)
{
Expand Down
64 changes: 36 additions & 28 deletions src/schedule/zephyr_ll.c
Original file line number Diff line number Diff line change
Expand Up @@ -188,6 +188,7 @@ static void zephyr_ll_run(void *data)
for (list = sch->tasks.next; !list_is_empty(&sch->tasks); list = sch->tasks.next) {
enum task_state state;
struct zephyr_ll_pdata *pdata;
struct ll_schedule_domain *domain = sch->ll_domain;

task = container_of(list, struct task, list);
pdata = task->priv_data;
Expand All @@ -198,44 +199,51 @@ static void zephyr_ll_run(void *data)
continue;
}

pdata->run = true;
task->state = SOF_TASK_STATE_RUNNING;
if (domain_is_pending(domain, task, NULL)) {
pdata->run = true;
task->state = SOF_TASK_STATE_RUNNING;
}

/* Move the task to a temporary list */
list_item_del(list);
list_item_append(list, &task_head);

zephyr_ll_unlock(sch, &flags);

/*
* task's .run() should only return either
* SOF_TASK_STATE_COMPLETED or SOF_TASK_STATE_RESCHEDULE
*/
state = do_task_run(task);
if (state != SOF_TASK_STATE_COMPLETED &&
state != SOF_TASK_STATE_RESCHEDULE) {
tr_err(&ll_tr,
"zephyr_ll_run: invalid return state %u",
state);
state = SOF_TASK_STATE_RESCHEDULE;
}

zephyr_ll_lock(sch, &flags);
if (task->state == SOF_TASK_STATE_RUNNING) {
zephyr_ll_unlock(sch, &flags);

if (pdata->freeing || state == SOF_TASK_STATE_COMPLETED) {
zephyr_ll_task_done(sch, task);
} else {
/*
* task->state could've been changed to
* SOF_TASK_STATE_CANCEL
* task's .run() should only return either
* SOF_TASK_STATE_COMPLETED or SOF_TASK_STATE_RESCHEDULE
*/
switch (task->state) {
case SOF_TASK_STATE_CANCEL:

state = do_task_run(task);
if (state != SOF_TASK_STATE_COMPLETED &&
state != SOF_TASK_STATE_RESCHEDULE) {
tr_err(&ll_tr,
"zephyr_ll_run: invalid return state %u",
state);
state = SOF_TASK_STATE_RESCHEDULE;
}

zephyr_ll_lock(sch, &flags);

if (pdata->freeing || state == SOF_TASK_STATE_COMPLETED) {
zephyr_ll_task_done(sch, task);
break;
default:
break;
} else {
/*
* task->state could've been changed to
* SOF_TASK_STATE_CANCEL
*/
switch (task->state) {
case SOF_TASK_STATE_CANCEL:
zephyr_ll_task_done(sch, task);
break;
default:
break;
}
}
/* update task state */
task->state = state;
}
}

Expand Down

0 comments on commit f140a8d

Please sign in to comment.