From 3d4883a655db5c1185dc31c7ec65184e5be47161 Mon Sep 17 00:00:00 2001 From: Marcin Szkudlinski Date: Mon, 23 Oct 2023 15:09:27 +0200 Subject: [PATCH] DP: provide data to next LL module no earlier than DP deadline lets assume DP with 10ms period (a.k.a a deadline). It starts and finishes earlier, i.e. in 2ms providing 10ms of data LL starts consuming data in 1ms chunks and will drain 10ms buffer in 10ms, expecting a new portion of data on 11th ms BUT - the DP module deadline is still 10ms, regardless if it had finished earlier and it is completely fine that processing in next cycle takes full 10ms - as long as it fits into the deadline. It may lead to underruns: LL1 (1ms) ---> DP (10ms) -->LL2 (1ms) ticks 0..9 -> LL1 is producing 1ms data portions, DP is waiting, LL2 is waiting tick 10 - DP has enough data to run, it starts processing tick 12 - DP finishes earlier, LL2 starts consuming, LL1 is producing data ticks 13-19 LL1 is producing data, LL2 is consuming data (both in 1ms chunks) tick 20 - DP starts processing a new portion of 10ms data, having 10ms to finish !!!! but LL2 has already consumed 8ms !!!! tick 22 - LL2 is consuming the last 1ms data chunk tick 23 - DP is still processing, LL2 has no data to process !!! UNDERRUN !!!! tick 19 - DP finishes properly in a deadline time Solution: even if DP finishes before its deadline, the data must be held till deadline time, so LL2 may start processing no earlier than tick 20 Signed-off-by: Marcin Szkudlinski --- src/audio/module_adapter/module_adapter.c | 3 ++ src/include/module/module/base.h | 29 +++++++++++++++++ src/schedule/zephyr_dp_schedule.c | 38 ++++++++++++++++------- 3 files changed, 59 insertions(+), 11 deletions(-) diff --git a/src/audio/module_adapter/module_adapter.c b/src/audio/module_adapter/module_adapter.c index 1c6af1f7732f..2b3ab4faaad7 100644 --- a/src/audio/module_adapter/module_adapter.c +++ b/src/audio/module_adapter/module_adapter.c @@ -1049,6 +1049,9 @@ static int module_adapter_copy_dp_queues(struct comp_dev *dev) dp_queue = dp_queue_get_next_item(dp_queue); } + if (mod->dp_startup_delay) + return 0; + dp_queue = dp_queue_get_first_item(&mod->dp_queue_dp_to_ll_list); list_for_item(blist, &dev->bsink_list) { /* output - we need to copy data from dp_queue (as source) diff --git a/src/include/module/module/base.h b/src/include/module/module/base.h index ea951412c653..904b83741961 100644 --- a/src/include/module/module/base.h +++ b/src/include/module/module/base.h @@ -124,6 +124,35 @@ struct processing_module { /* module-specific flags for comp_verify_params() */ uint32_t verify_params_flags; + /* indicates that this DP module did not yet reach its first deadline and + * no data should be passed yet to next LL module + * + * why: lets assume DP with 10ms period (a.k.a a deadline). It starts and finishes + * Earlier, i.e. in 2ms providing 10ms of data. LL starts consuming data in 1ms chunks and + * will drain 10ms buffer in 10ms, expecting a new portion of data on 11th ms + * BUT - the DP module deadline is still 10ms, regardless if it had finished earlier + * and it is completely fine that processing in next cycle takes full 10ms - as long as it + * fits into the deadline. + * It may lead to underruns: + * + * LL1 (1ms) ---> DP (10ms) -->LL2 (1ms) + * + * ticks 0..9 -> LL1 is producing 1ms data portions, DP is waiting, LL2 is waiting + * tick 10 - DP has enough data to run, it starts processing + * tick 12 - DP finishes earlier, LL2 starts consuming, LL1 is producing data + * ticks 13-19 LL1 is producing data, LL2 is consuming data (both in 1ms chunks) + * tick 20 - DP starts processing a new portion of 10ms data, having 10ms to finish + * !!!! but LL2 has already consumed 8ms !!!! + * tick 22 - LL2 is consuming the last 1ms data chunk + * tick 23 - DP is still processing, LL2 has no data to process + * !!! UNDERRUN !!!! + * tick 19 - DP finishes properly in a deadline time + * + * Solution: even if DP finishes before its deadline, the data must be held till + * deadline time, so LL2 may start processing no earlier than tick 20 + */ + bool dp_startup_delay; + /* flag to indicate module does not pause */ bool no_pause; diff --git a/src/schedule/zephyr_dp_schedule.c b/src/schedule/zephyr_dp_schedule.c index cdc5e6a8ad32..2e3eed146586 100644 --- a/src/schedule/zephyr_dp_schedule.c +++ b/src/schedule/zephyr_dp_schedule.c @@ -35,10 +35,12 @@ struct scheduler_dp_data { struct task_dp_pdata { k_tid_t thread_id; /* zephyr thread ID */ - uint32_t period_clock_ticks; /* period the task should be scheduled in Zephyr ticks */ + uint32_t deadline_clock_ticks; /* dp module deadline in Zephyr ticks */ + uint32_t deadline_ll_cycles; /* dp module deadline in LL cycles */ k_thread_stack_t __sparse_cache *p_stack; /* pointer to thread stack */ struct k_sem sem; /* semaphore for task scheduling */ struct processing_module *mod; /* the module to be scheduled */ + uint32_t ll_cycles_to_deadline; /* current number of LL cycles till deadline */ }; /* Single CPU-wide lock @@ -227,11 +229,20 @@ void scheduler_dp_ll_tick(void *receiver_data, enum notify_id event_type, void * lock_key = scheduler_dp_lock(); list_for_item(tlist, &dp_sch->tasks) { curr_task = container_of(tlist, struct task, list); + pdata = curr_task->priv_data; + struct processing_module *mod = pdata->mod; + + /* decrease number of LL ticks/cycles left till the module reaches its deadline */ + if (pdata->ll_cycles_to_deadline) { + pdata->ll_cycles_to_deadline--; + if (!pdata->ll_cycles_to_deadline) + /* deadline reached, clear startup delay flag. + * see dp_startup_delay comment for details + */ + mod->dp_startup_delay = false; + } - /* step 1 - check if the module is ready for processing */ if (curr_task->state == SOF_TASK_STATE_QUEUED) { - pdata = curr_task->priv_data; - struct processing_module *mod = pdata->mod; bool mod_ready; mod_ready = module_is_ready_to_process(mod, mod->sources, @@ -240,7 +251,9 @@ void scheduler_dp_ll_tick(void *receiver_data, enum notify_id event_type, void * mod->num_of_sinks); if (mod_ready) { /* set a deadline for given num of ticks, starting now */ - k_thread_deadline_set(pdata->thread_id, pdata->period_clock_ticks); + k_thread_deadline_set(pdata->thread_id, + pdata->deadline_clock_ticks); + pdata->ll_cycles_to_deadline = pdata->deadline_ll_cycles; /* trigger the task */ curr_task->state = SOF_TASK_STATE_RUNNING; @@ -352,7 +365,7 @@ static int scheduler_dp_task_shedule(void *data, struct task *task, uint64_t sta struct scheduler_dp_data *dp_sch = (struct scheduler_dp_data *)data; struct task_dp_pdata *pdata = task->priv_data; unsigned int lock_key; - uint64_t period_clock_ticks; + uint64_t deadline_clock_ticks; lock_key = scheduler_dp_lock(); @@ -371,13 +384,16 @@ static int scheduler_dp_task_shedule(void *data, struct task *task, uint64_t sta task->state = SOF_TASK_STATE_QUEUED; list_item_prepend(&task->list, &dp_sch->tasks); - period_clock_ticks = period * CONFIG_SYS_CLOCK_TICKS_PER_SEC; - /* period is in us - convert to seconds in next step - * or it always will be zero because of fixed point calculation + deadline_clock_ticks = period * CONFIG_SYS_CLOCK_TICKS_PER_SEC; + /* period/deadline is in us - convert to seconds in next step + * or it always will be zero because of integer calculation */ - period_clock_ticks /= 1000000; + deadline_clock_ticks /= 1000000; - pdata->period_clock_ticks = period_clock_ticks; + pdata->deadline_clock_ticks = deadline_clock_ticks; + pdata->deadline_ll_cycles = period / LL_TIMER_PERIOD_US; + pdata->ll_cycles_to_deadline = 0; + pdata->mod->dp_startup_delay = true; scheduler_dp_unlock(lock_key); tr_dbg(&dp_tr, "DP task scheduled with period %u [us]", (uint32_t)period);