Skip to content

Commit

Permalink
DP: provide data to next LL module no earlier than DP deadline
Browse files Browse the repository at this point in the history
lets assume DP with 10ms period (a.k.a a deadline).
It starts and finishes earlier, i.e. in 2ms providing 10ms of data
LL starts consuming data in 1ms chunks and will drain
10ms buffer in 10ms, expecting a new portion of data on 11th ms

BUT - the DP module deadline is still 10ms,
regardless if it had finished earlier and it is completely fine
that processing in next cycle takes full 10ms - as long as it
fits into the deadline.

It may lead to underruns:

LL1 (1ms) ---> DP (10ms) -->LL2 (1ms)

ticks 0..9 -> LL1 is producing 1ms data portions,
             DP is waiting, LL2 is waiting
tick 10 - DP has enough data to run, it starts processing
tick 12 - DP finishes earlier, LL2 starts consuming,
          LL1 is producing data
ticks 13-19 LL1 is producing data,
            LL2 is consuming data (both in 1ms chunks)
tick 20  - DP starts processing a new portion of 10ms data,
           having 10ms to finish
	      !!!! but LL2 has already consumed 8ms !!!!
tick 22 - LL2 is consuming the last 1ms data chunk
tick 23 - DP is still processing, LL2 has no data to process
	 			!!! UNDERRUN !!!!
tick 19 - DP finishes properly in a deadline time

Solution: even if DP finishes before its deadline,
the data must be held till deadline time, so LL2 may
start processing no earlier than tick 20

Signed-off-by: Marcin Szkudlinski <marcin.szkudlinski@intel.com>
  • Loading branch information
marcinszkudlinski authored and lgirdwood committed Dec 19, 2023
1 parent 0bf0309 commit 3d4883a
Show file tree
Hide file tree
Showing 3 changed files with 59 additions and 11 deletions.
3 changes: 3 additions & 0 deletions src/audio/module_adapter/module_adapter.c
Original file line number Diff line number Diff line change
Expand Up @@ -1049,6 +1049,9 @@ static int module_adapter_copy_dp_queues(struct comp_dev *dev)
dp_queue = dp_queue_get_next_item(dp_queue);
}

if (mod->dp_startup_delay)
return 0;

dp_queue = dp_queue_get_first_item(&mod->dp_queue_dp_to_ll_list);
list_for_item(blist, &dev->bsink_list) {
/* output - we need to copy data from dp_queue (as source)
Expand Down
29 changes: 29 additions & 0 deletions src/include/module/module/base.h
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,35 @@ struct processing_module {
/* module-specific flags for comp_verify_params() */
uint32_t verify_params_flags;

/* indicates that this DP module did not yet reach its first deadline and
* no data should be passed yet to next LL module
*
* why: lets assume DP with 10ms period (a.k.a a deadline). It starts and finishes
* Earlier, i.e. in 2ms providing 10ms of data. LL starts consuming data in 1ms chunks and
* will drain 10ms buffer in 10ms, expecting a new portion of data on 11th ms
* BUT - the DP module deadline is still 10ms, regardless if it had finished earlier
* and it is completely fine that processing in next cycle takes full 10ms - as long as it
* fits into the deadline.
* It may lead to underruns:
*
* LL1 (1ms) ---> DP (10ms) -->LL2 (1ms)
*
* ticks 0..9 -> LL1 is producing 1ms data portions, DP is waiting, LL2 is waiting
* tick 10 - DP has enough data to run, it starts processing
* tick 12 - DP finishes earlier, LL2 starts consuming, LL1 is producing data
* ticks 13-19 LL1 is producing data, LL2 is consuming data (both in 1ms chunks)
* tick 20 - DP starts processing a new portion of 10ms data, having 10ms to finish
* !!!! but LL2 has already consumed 8ms !!!!
* tick 22 - LL2 is consuming the last 1ms data chunk
* tick 23 - DP is still processing, LL2 has no data to process
* !!! UNDERRUN !!!!
* tick 19 - DP finishes properly in a deadline time
*
* Solution: even if DP finishes before its deadline, the data must be held till
* deadline time, so LL2 may start processing no earlier than tick 20
*/
bool dp_startup_delay;

/* flag to indicate module does not pause */
bool no_pause;

Expand Down
38 changes: 27 additions & 11 deletions src/schedule/zephyr_dp_schedule.c
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,12 @@ struct scheduler_dp_data {

struct task_dp_pdata {
k_tid_t thread_id; /* zephyr thread ID */
uint32_t period_clock_ticks; /* period the task should be scheduled in Zephyr ticks */
uint32_t deadline_clock_ticks; /* dp module deadline in Zephyr ticks */
uint32_t deadline_ll_cycles; /* dp module deadline in LL cycles */
k_thread_stack_t __sparse_cache *p_stack; /* pointer to thread stack */
struct k_sem sem; /* semaphore for task scheduling */
struct processing_module *mod; /* the module to be scheduled */
uint32_t ll_cycles_to_deadline; /* current number of LL cycles till deadline */
};

/* Single CPU-wide lock
Expand Down Expand Up @@ -227,11 +229,20 @@ void scheduler_dp_ll_tick(void *receiver_data, enum notify_id event_type, void *
lock_key = scheduler_dp_lock();
list_for_item(tlist, &dp_sch->tasks) {
curr_task = container_of(tlist, struct task, list);
pdata = curr_task->priv_data;
struct processing_module *mod = pdata->mod;

/* decrease number of LL ticks/cycles left till the module reaches its deadline */
if (pdata->ll_cycles_to_deadline) {
pdata->ll_cycles_to_deadline--;
if (!pdata->ll_cycles_to_deadline)
/* deadline reached, clear startup delay flag.
* see dp_startup_delay comment for details
*/
mod->dp_startup_delay = false;
}

/* step 1 - check if the module is ready for processing */
if (curr_task->state == SOF_TASK_STATE_QUEUED) {
pdata = curr_task->priv_data;
struct processing_module *mod = pdata->mod;
bool mod_ready;

mod_ready = module_is_ready_to_process(mod, mod->sources,
Expand All @@ -240,7 +251,9 @@ void scheduler_dp_ll_tick(void *receiver_data, enum notify_id event_type, void *
mod->num_of_sinks);
if (mod_ready) {
/* set a deadline for given num of ticks, starting now */
k_thread_deadline_set(pdata->thread_id, pdata->period_clock_ticks);
k_thread_deadline_set(pdata->thread_id,
pdata->deadline_clock_ticks);
pdata->ll_cycles_to_deadline = pdata->deadline_ll_cycles;

/* trigger the task */
curr_task->state = SOF_TASK_STATE_RUNNING;
Expand Down Expand Up @@ -352,7 +365,7 @@ static int scheduler_dp_task_shedule(void *data, struct task *task, uint64_t sta
struct scheduler_dp_data *dp_sch = (struct scheduler_dp_data *)data;
struct task_dp_pdata *pdata = task->priv_data;
unsigned int lock_key;
uint64_t period_clock_ticks;
uint64_t deadline_clock_ticks;

lock_key = scheduler_dp_lock();

Expand All @@ -371,13 +384,16 @@ static int scheduler_dp_task_shedule(void *data, struct task *task, uint64_t sta
task->state = SOF_TASK_STATE_QUEUED;
list_item_prepend(&task->list, &dp_sch->tasks);

period_clock_ticks = period * CONFIG_SYS_CLOCK_TICKS_PER_SEC;
/* period is in us - convert to seconds in next step
* or it always will be zero because of fixed point calculation
deadline_clock_ticks = period * CONFIG_SYS_CLOCK_TICKS_PER_SEC;
/* period/deadline is in us - convert to seconds in next step
* or it always will be zero because of integer calculation
*/
period_clock_ticks /= 1000000;
deadline_clock_ticks /= 1000000;

pdata->period_clock_ticks = period_clock_ticks;
pdata->deadline_clock_ticks = deadline_clock_ticks;
pdata->deadline_ll_cycles = period / LL_TIMER_PERIOD_US;
pdata->ll_cycles_to_deadline = 0;
pdata->mod->dp_startup_delay = true;
scheduler_dp_unlock(lock_key);

tr_dbg(&dp_tr, "DP task scheduled with period %u [us]", (uint32_t)period);
Expand Down

0 comments on commit 3d4883a

Please sign in to comment.