diff --git a/src/debug/telemetry/Kconfig b/src/debug/telemetry/Kconfig index 6e6a55d18f3c..f380a90e1808 100644 --- a/src/debug/telemetry/Kconfig +++ b/src/debug/telemetry/Kconfig @@ -3,6 +3,7 @@ config SOF_TELEMETRY bool "enable telemetry" default n + depends on !SOF_USERSPACE_LL help Enables telemetry. Enables performance measurements and debug utilities that use memory window 2 (debug window) as interface. Measurements include diff --git a/src/include/sof/schedule/ll_schedule.h b/src/include/sof/schedule/ll_schedule.h index 241a20daff40..fa0a52736d25 100644 --- a/src/include/sof/schedule/ll_schedule.h +++ b/src/include/sof/schedule/ll_schedule.h @@ -53,6 +53,11 @@ int zephyr_ll_task_init(struct task *task, #define scheduler_init_ll zephyr_ll_scheduler_init #define schedule_task_init_ll zephyr_ll_task_init +struct task *zephyr_ll_task_alloc(void); +k_tid_t zephyr_ll_get_thread(int core); +struct k_mem_domain *zephyr_ll_mem_domain(void); +struct k_heap *zephyr_ll_heap_init(void); + #endif /** diff --git a/src/include/sof/schedule/ll_schedule_domain.h b/src/include/sof/schedule/ll_schedule_domain.h index 5fc7691e3b13..7be6d0d0b247 100644 --- a/src/include/sof/schedule/ll_schedule_domain.h +++ b/src/include/sof/schedule/ll_schedule_domain.h @@ -66,7 +66,11 @@ struct ll_schedule_domain_ops { struct ll_schedule_domain { uint64_t next_tick; /**< ticks just set for next run */ uint64_t new_target_tick; /**< for the next set, used during the reschedule stage */ - struct k_spinlock lock; /**< standard lock */ +#if defined(__ZEPHYR__) + struct k_mutex *lock; /**< standard lock */ +#else + struct k_spinlock lock; /**< standard lock */ +#endif atomic_t total_num_tasks; /**< total number of registered tasks */ atomic_t enabled_cores; /**< number of enabled cores */ uint32_t ticks_per_ms; /**< number of clock ticks per ms */ @@ -93,13 +97,30 @@ static inline struct ll_schedule_domain *dma_domain_get(void) return sof_get()->platform_dma_domain; } +#if defined(__ZEPHYR__) +struct ll_schedule_domain *zephyr_ll_domain(void); +struct ll_schedule_domain *zephyr_domain_init(int clk); +#if CONFIG_SOF_USERSPACE_LL +struct k_heap *zephyr_ll_heap(void); +struct task *zephyr_ll_task_alloc(void); +void zephyr_ll_resources_init(void); +int zephyr_ll_mem_domain_add_partition(struct k_mem_partition *partition); +int zephyr_ll_mem_domain_add_thread(k_tid_t thread); +#endif /* CONFIG_SOF_USERSPACE_LL */ +#endif + static inline struct ll_schedule_domain *domain_init (int type, int clk, bool synchronous, const struct ll_schedule_domain_ops *ops) { struct ll_schedule_domain *domain; +#if defined(__ZEPHYR__) && CONFIG_SOF_USERSPACE_LL + domain = sof_heap_alloc(zephyr_ll_heap(), SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, + sizeof(*domain), sizeof(void *)); +#else domain = rzalloc(SOF_MEM_FLAG_KERNEL | SOF_MEM_FLAG_COHERENT, sizeof(*domain)); +#endif if (!domain) return NULL; domain->type = type; @@ -116,7 +137,23 @@ static inline struct ll_schedule_domain *domain_init domain->next_tick = UINT64_MAX; domain->new_target_tick = UINT64_MAX; +#if defined(__ZEPHYR__) + +#if defined(CONFIG_SOF_USERSPACE_LL) + /* Allocate mutex dynamically for userspace access */ + domain->lock = k_object_alloc(K_OBJ_MUTEX); +#else /* !CONFIG_SOF_USERSPACE_LL */ + domain->lock = rzalloc(SOF_MEM_FLAG_KERNEL | SOF_MEM_FLAG_COHERENT, sizeof(*domain->lock)); +#endif + if (!domain->lock) { + rfree(domain); + return NULL; + } + k_mutex_init(domain->lock); + +#else /* !__ZEPHYR */ k_spinlock_init(&domain->lock); +#endif atomic_init(&domain->total_num_tasks, 0); atomic_init(&domain->enabled_cores, 0); @@ -243,9 +280,8 @@ struct ll_schedule_domain *zephyr_dma_domain_init(struct dma *dma_array, uint32_t num_dma, int clk); #endif /* CONFIG_DMA_DOMAIN */ -struct ll_schedule_domain *zephyr_ll_domain(void); -struct ll_schedule_domain *zephyr_domain_init(int clk); #define timer_domain_init(timer, clk) zephyr_domain_init(clk) +k_tid_t zephyr_domain_thread_tid(struct ll_schedule_domain *domain); #endif struct ll_schedule_domain *dma_multi_chan_domain_init(struct dma *dma_array, diff --git a/src/init/init.c b/src/init/init.c index ddb4c604b4a6..c630e455870c 100644 --- a/src/init/init.c +++ b/src/init/init.c @@ -218,6 +218,10 @@ __cold static int primary_core_init(int argc, char *argv[], struct sof *sof) io_perf_monitor_init(); #endif +#if CONFIG_SOF_USERSPACE_LL + zephyr_ll_resources_init(); +#endif + /* init the platform */ if (platform_init(sof) < 0) sof_panic(SOF_IPC_PANIC_PLATFORM); diff --git a/src/schedule/Kconfig b/src/schedule/Kconfig index 99ca2861f650..f1383e70103f 100644 --- a/src/schedule/Kconfig +++ b/src/schedule/Kconfig @@ -15,6 +15,7 @@ config SCHEDULE_DMA_MULTI_CHANNEL config SCHEDULE_LL_STATS_LOG bool "Log low-latency scheduler statistics" default y + depends on !SOF_USERSPACE_LL help Log statistics from low-latency scheduler. This is a low overhead mechanism to gather average and worst-case execution times of diff --git a/src/schedule/zephyr_domain.c b/src/schedule/zephyr_domain.c index 6e03158f2bb9..aafda2078b6c 100644 --- a/src/schedule/zephyr_domain.c +++ b/src/schedule/zephyr_domain.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -38,17 +39,40 @@ LOG_MODULE_DECLARE(ll_schedule, CONFIG_SOF_LOG_LEVEL); #define ZEPHYR_LL_STACK_SIZE 8192 +#if CONFIG_SOF_USERSPACE_LL + +K_THREAD_STACK_ARRAY_DEFINE(ll_sched_stack, CONFIG_CORE_COUNT, ZEPHYR_LL_STACK_SIZE); + +/** + * Memory resources for userspace LL scheduler + * + * This structure encapsulates the memory management resources required for the + * low-latency (LL) scheduler in userspace mode. It provides memory isolation + * and heap management for LL scheduler threads. + */ +struct zephyr_ll_mem_resources { + struct k_mem_domain mem_domain; /**< Memory domain for LL thread isolation */ + struct k_heap *heap; /**< Heap allocator for LL scheduler memory */ + struct k_mutex lock; /**< Mutex protecting memory domain operations */ +}; + +static struct zephyr_ll_mem_resources ll_mem_resources; + +#else + K_KERNEL_STACK_ARRAY_DEFINE(ll_sched_stack, CONFIG_CORE_COUNT, ZEPHYR_LL_STACK_SIZE); +#endif /* CONFIG_SOF_USERSPACE_LL */ + struct zephyr_domain_thread { - struct k_thread ll_thread; - struct k_sem sem; + struct k_thread *ll_thread; + struct k_sem *sem; void (*handler)(void *arg); void *arg; }; struct zephyr_domain { - struct k_timer timer; + struct k_timer *timer; struct zephyr_domain_thread domain_thread[CONFIG_CORE_COUNT]; struct ll_schedule_domain *ll_domain; #if CONFIG_CROSS_CORE_STREAM @@ -77,16 +101,19 @@ static inline void stats_report(unsigned int runs, int core, unsigned int cycles static void zephyr_domain_thread_fn(void *p1, void *p2, void *p3) { struct zephyr_domain *zephyr_domain = p1; - int core = cpu_get_id(); + int core = (int)p2; struct zephyr_domain_thread *dt = zephyr_domain->domain_thread + core; #ifdef CONFIG_SCHEDULE_LL_STATS_LOG unsigned int runs = 0, overruns = 0, cycles_sum = 0, cycles_max = 0; unsigned int cycles0, cycles1, diff, timer_fired; #endif + /* not working in IMMEDIATE log mode */ + tr_info(&ll_tr, "ll core %u thread starting", core); + for (;;) { /* immediately go to sleep, waiting to be woken up by the timer */ - k_sem_take(&dt->sem, K_FOREVER); + k_sem_take(dt->sem, K_FOREVER); #ifdef CONFIG_SCHEDULE_LL_STATS_LOG cycles0 = k_cycle_get_32(); @@ -120,7 +147,7 @@ static void zephyr_domain_thread_fn(void *p1, void *p2, void *p3) /* This handles wrapping correctly too */ diff = cycles1 - cycles0; - timer_fired = k_timer_status_get(&zephyr_domain->timer); + timer_fired = k_timer_status_get(zephyr_domain->timer); if (timer_fired > 1) overruns++; @@ -163,10 +190,24 @@ static void zephyr_domain_timer_fn(struct k_timer *timer) struct zephyr_domain_thread *dt = zephyr_domain->domain_thread + core; if (dt->handler) - k_sem_give(&dt->sem); + k_sem_give(dt->sem); } } +#if CONFIG_SOF_USERSPACE_LL + +k_tid_t zephyr_domain_thread_tid(struct ll_schedule_domain *domain) +{ + struct zephyr_domain *zephyr_domain = ll_sch_domain_get_pdata(domain); + int core = cpu_get_id(); + struct zephyr_domain_thread *dt = zephyr_domain->domain_thread + core; + + tr_dbg(&ll_tr, "entry"); + + return dt->ll_thread; +} +#endif + static int zephyr_domain_register(struct ll_schedule_domain *domain, struct task *task, void (*handler)(void *arg), void *arg) @@ -175,8 +216,8 @@ static int zephyr_domain_register(struct ll_schedule_domain *domain, int core = cpu_get_id(); struct zephyr_domain_thread *dt = zephyr_domain->domain_thread + core; char thread_name[] = "ll_thread0"; + int flags = 0; k_tid_t thread; - k_spinlock_key_t key; tr_dbg(&ll_tr, "entry"); @@ -184,41 +225,75 @@ static int zephyr_domain_register(struct ll_schedule_domain *domain, if (dt->handler) return 0; +#ifdef CONFIG_SOF_USERSPACE_LL + __ASSERT_NO_MSG(task->core == core); +#endif + dt->handler = handler; dt->arg = arg; - /* 10 is rather random, we better not accumulate 10 missed timer interrupts */ - k_sem_init(&dt->sem, 0, 10); - thread_name[sizeof(thread_name) - 2] = '0' + core; - thread = k_thread_create(&dt->ll_thread, - ll_sched_stack[core], - ZEPHYR_LL_STACK_SIZE, - zephyr_domain_thread_fn, zephyr_domain, NULL, NULL, - CONFIG_LL_THREAD_PRIORITY, 0, K_FOREVER); +#if CONFIG_SOF_USERSPACE_LL + flags |= K_USER; +#endif + + if (!IS_ENABLED(CONFIG_SOF_USERSPACE_LL) || !dt->ll_thread) { + /* Allocate thread structure dynamically */ +#if CONFIG_SOF_USERSPACE_LL + dt->ll_thread = k_object_alloc(K_OBJ_THREAD); +#else + dt->ll_thread = rzalloc(SOF_MEM_FLAG_KERNEL | SOF_MEM_FLAG_COHERENT, + sizeof(*dt->ll_thread)); + +#endif + if (!dt->ll_thread) { + tr_err(&ll_tr, "Failed to allocate thread object for core %d", core); + return -ENOMEM; + } + + thread = k_thread_create(dt->ll_thread, + ll_sched_stack[core], + ZEPHYR_LL_STACK_SIZE, + zephyr_domain_thread_fn, zephyr_domain, (void *)core, NULL, + CONFIG_LL_THREAD_PRIORITY, flags, K_FOREVER); + + k_thread_cpu_mask_clear(thread); + k_thread_cpu_mask_enable(thread, core); + k_thread_name_set(thread, thread_name); + +#if CONFIG_SOF_USERSPACE_LL + /* + * TODO: + * - grant permissions + */ + tr_dbg(&ll_tr, "Grant access to %p (core %d, thread %p)", dt->sem, core, thread); + k_thread_access_grant(thread, dt->sem); - k_thread_cpu_mask_clear(thread); - k_thread_cpu_mask_enable(thread, core); - k_thread_name_set(thread, thread_name); + zephyr_ll_mem_domain_add_thread(thread); + k_thread_access_grant(thread, domain->lock); + k_thread_access_grant(thread, zephyr_domain->timer); + tr_dbg(&ll_tr, "Added access to %p", zephyr_domain); +#endif - k_thread_start(thread); + k_thread_start(thread); + } - key = k_spin_lock(&domain->lock); + k_mutex_lock(domain->lock, K_FOREVER); - if (!k_timer_user_data_get(&zephyr_domain->timer)) { + if (!k_timer_user_data_get(zephyr_domain->timer)) { k_timeout_t start = {0}; - k_timer_init(&zephyr_domain->timer, zephyr_domain_timer_fn, NULL); - k_timer_user_data_set(&zephyr_domain->timer, zephyr_domain); + k_timer_init(zephyr_domain->timer, zephyr_domain_timer_fn, NULL); + k_timer_user_data_set(zephyr_domain->timer, zephyr_domain); - k_timer_start(&zephyr_domain->timer, start, K_USEC(LL_TIMER_PERIOD_US)); + k_timer_start(zephyr_domain->timer, start, K_USEC(LL_TIMER_PERIOD_US)); /* Enable the watchdog */ watchdog_enable(core); } - k_spin_unlock(&domain->lock, key); + k_mutex_unlock(domain->lock); tr_info(&ll_tr, "zephyr_domain_register domain->type %d domain->clk %d domain->ticks_per_ms %d period %d", domain->type, domain->clk, domain->ticks_per_ms, (uint32_t)LL_TIMER_PERIOD_US); @@ -230,8 +305,11 @@ static int zephyr_domain_unregister(struct ll_schedule_domain *domain, struct task *task, uint32_t num_tasks) { struct zephyr_domain *zephyr_domain = ll_sch_domain_get_pdata(domain); +#ifdef CONFIG_SOF_USERSPACE_LL + int core = task->core; +#else int core = cpu_get_id(); - k_spinlock_key_t key; +#endif tr_dbg(&ll_tr, "entry"); @@ -239,28 +317,39 @@ static int zephyr_domain_unregister(struct ll_schedule_domain *domain, if (num_tasks) return 0; - key = k_spin_lock(&domain->lock); + k_mutex_lock(domain->lock, K_FOREVER); if (!atomic_read(&domain->total_num_tasks)) { /* Disable the watchdog */ watchdog_disable(core); - k_timer_stop(&zephyr_domain->timer); - k_timer_user_data_set(&zephyr_domain->timer, NULL); + k_timer_stop(zephyr_domain->timer); + k_timer_user_data_set(zephyr_domain->timer, NULL); } zephyr_domain->domain_thread[core].handler = NULL; - k_spin_unlock(&domain->lock, key); + k_mutex_unlock(domain->lock); tr_info(&ll_tr, "zephyr_domain_unregister domain->type %d domain->clk %d", domain->type, domain->clk); +#ifndef CONFIG_SOF_USERSPACE_LL /* + * With user-space LL, the thread is left running, only timer + * is stopped. + * * If running in the context of the domain thread, k_thread_abort() will * not return */ - k_thread_abort(&zephyr_domain->domain_thread[core].ll_thread); + if (zephyr_domain->domain_thread[core].ll_thread) { + k_thread_abort(zephyr_domain->domain_thread[core].ll_thread); + k_object_free(zephyr_domain->domain_thread[core].ll_thread); + zephyr_domain->domain_thread[core].ll_thread = NULL; + } +#endif + + tr_dbg(&ll_tr, "exit"); return 0; } @@ -299,6 +388,45 @@ static const struct ll_schedule_domain_ops zephyr_domain_ops = { #endif }; +#if CONFIG_SOF_USERSPACE_LL +static struct k_heap *zephyr_ll_heap_init(void) +{ + struct k_heap *heap = module_driver_heap_init(); + struct k_mem_partition mem_partition; + + /* Create memory partition for sch_data array */ + mem_partition.start = (uintptr_t)sys_cache_cached_ptr_get(heap->heap.init_mem); + mem_partition.size = heap->heap.init_bytes; + mem_partition.attr = K_MEM_PARTITION_P_RW_U_RW | XTENSA_MMU_CACHED_WB; + + zephyr_ll_mem_domain_add_partition(&mem_partition); + tr_dbg(&ll_tr, "init ll heap %p, size %u (cached)", + (void *)mem_partition.start, + heap->heap.init_bytes); + + mem_partition.start = (uintptr_t)sys_cache_uncached_ptr_get(heap->heap.init_mem); + zephyr_ll_mem_domain_add_partition(&mem_partition); + tr_dbg(&ll_tr, "init ll heap %p, size %u (uncached)", + (void *)mem_partition.start, + heap->heap.init_bytes); + + return heap; +} + +void zephyr_ll_resources_init(void) +{ + k_mem_domain_init(&ll_mem_resources.mem_domain, 0, NULL); + k_mutex_init(&ll_mem_resources.lock); + + ll_mem_resources.heap = zephyr_ll_heap_init(); +} + +struct k_heap *zephyr_ll_heap(void) +{ + return ll_mem_resources.heap; +} +#endif + struct ll_schedule_domain *zephyr_domain_init(int clk) { struct ll_schedule_domain *domain; @@ -311,8 +439,13 @@ struct ll_schedule_domain *zephyr_domain_init(int clk) return NULL; } +#if CONFIG_SOF_USERSPACE_LL + zephyr_domain = sof_heap_alloc(zephyr_ll_heap(), SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, + sizeof(*zephyr_domain), sizeof(void *)); +#else zephyr_domain = rzalloc(SOF_MEM_FLAG_KERNEL | SOF_MEM_FLAG_COHERENT, sizeof(*zephyr_domain)); +#endif if (!zephyr_domain) { tr_err(&ll_tr, "domain allocation failed"); rfree(domain); @@ -321,6 +454,27 @@ struct ll_schedule_domain *zephyr_domain_init(int clk) zephyr_domain->ll_domain = domain; +#if CONFIG_SOF_USERSPACE_LL + /* Allocate timer dynamically for userspace access */ + zephyr_domain->timer = k_object_alloc(K_OBJ_TIMER); + if (!zephyr_domain->timer) { + tr_err(&ll_tr, "timer allocation failed"); + rfree(zephyr_domain); + rfree(domain); + return NULL; + } +#else + /* For kernel mode, allocate timer on the heap */ + zephyr_domain->timer = rzalloc(SOF_MEM_FLAG_KERNEL | SOF_MEM_FLAG_COHERENT, + sizeof(struct k_timer)); + if (!zephyr_domain->timer) { + tr_err(&ll_tr, "timer allocation failed"); + rfree(zephyr_domain); + rfree(domain); + return NULL; + } +#endif + #if CONFIG_CROSS_CORE_STREAM atomic_set(&zephyr_domain->block, 0); k_mutex_init(&zephyr_domain->block_mutex); @@ -329,9 +483,67 @@ struct ll_schedule_domain *zephyr_domain_init(int clk) ll_sch_domain_set_pdata(domain, zephyr_domain); + struct zephyr_domain_thread *dt = zephyr_domain->domain_thread + cpu_get_id(); +#if CONFIG_SOF_USERSPACE_LL + /* Add zephyr_domain_ops to the memory domain for user thread access */ + struct k_mem_partition ops_partition; + + ops_partition.start = (uintptr_t)&zephyr_domain_ops; + ops_partition.size = sizeof(zephyr_domain_ops); + ops_partition.attr = K_MEM_PARTITION_P_RO_U_RO; + + k_mutex_lock(&ll_mem_resources.lock, K_FOREVER); + k_mem_domain_add_partition(&ll_mem_resources.mem_domain, &ops_partition); + k_mutex_unlock(&ll_mem_resources.lock); + + dt->sem = k_object_alloc(K_OBJ_SEM); +#else + dt->sem = rzalloc(SOF_MEM_FLAG_KERNEL | SOF_MEM_FLAG_COHERENT, + sizeof(*dt->sem)); +#endif + + if (!dt->sem) { + tr_err(&ll_tr, "Failed to allocate semaphore for core %d", cpu_get_id()); + k_panic(); + } + k_sem_init(dt->sem, 0, 10); + return domain; } +#if CONFIG_SOF_USERSPACE_LL +struct k_mem_domain *zephyr_ll_mem_domain(void) +{ + return &ll_mem_resources.mem_domain; +} + +/* Wrapper function to add a memory partition to the domain with mutex protection */ +int zephyr_ll_mem_domain_add_partition(struct k_mem_partition *partition) +{ + int ret; + + k_mutex_lock(&ll_mem_resources.lock, K_FOREVER); + ret = k_mem_domain_add_partition(&ll_mem_resources.mem_domain, partition); + k_mutex_unlock(&ll_mem_resources.lock); + + return ret; +} +EXPORT_SYMBOL(zephyr_ll_mem_domain_add_partition); + +/* Wrapper function to add a thread to the domain with mutex protection */ +int zephyr_ll_mem_domain_add_thread(k_tid_t thread) +{ + int ret; + + k_mutex_lock(&ll_mem_resources.lock, K_FOREVER); + ret = k_mem_domain_add_thread(&ll_mem_resources.mem_domain, thread); + k_mutex_unlock(&ll_mem_resources.lock); + + return ret; +} +EXPORT_SYMBOL(zephyr_ll_mem_domain_add_thread); +#endif /* CONFIG_SOF_USERSPACE_LL */ + /* Check if currently running in the LL scheduler thread context */ bool ll_sch_is_current(void) { @@ -342,6 +554,20 @@ bool ll_sch_is_current(void) struct zephyr_domain_thread *dt = zephyr_domain->domain_thread + cpu_get_id(); - return k_current_get() == &dt->ll_thread; + return k_current_get() == dt->ll_thread; } EXPORT_SYMBOL(ll_sch_is_current); + +/* Get the LL scheduler thread for a specific core */ +k_tid_t zephyr_ll_get_thread(int core) +{ + struct zephyr_domain *zephyr_domain = ll_sch_domain_get_pdata(zephyr_ll_domain()); + + if (!zephyr_domain || core >= CONFIG_CORE_COUNT) + return NULL; + + struct zephyr_domain_thread *dt = zephyr_domain->domain_thread + core; + + return dt->ll_thread; +} +EXPORT_SYMBOL(zephyr_ll_get_thread); diff --git a/src/schedule/zephyr_ll.c b/src/schedule/zephyr_ll.c index 851fd9a96a98..558e424818d7 100644 --- a/src/schedule/zephyr_ll.c +++ b/src/schedule/zephyr_ll.c @@ -30,6 +30,10 @@ struct zephyr_ll { unsigned int n_tasks; /* task counter */ struct ll_schedule_domain *ll_domain; /* scheduling domain */ unsigned int core; /* core ID of this instance */ +#if CONFIG_SOF_USERSPACE_LL + struct k_mutex *lock; /* mutex for userspace */ +#endif + struct k_heap *heap; }; /* per-task scheduler data */ @@ -41,17 +45,26 @@ struct zephyr_ll_pdata { static void zephyr_ll_lock(struct zephyr_ll *sch, uint32_t *flags) { +#if CONFIG_SOF_USERSPACE_LL + k_mutex_lock(sch->lock, K_FOREVER); +#else irq_local_disable(*flags); +#endif } static void zephyr_ll_unlock(struct zephyr_ll *sch, uint32_t *flags) { +#if CONFIG_SOF_USERSPACE_LL + k_mutex_unlock(sch->lock); +#else irq_local_enable(*flags); +#endif } static void zephyr_ll_assert_core(const struct zephyr_ll *sch) { - assert(CONFIG_CORE_COUNT == 1 || sch->core == cpu_get_id()); + assert(CONFIG_CORE_COUNT == 1 || IS_ENABLED(CONFIG_SOF_USERSPACE_LL) || + sch->core == cpu_get_id()); } /* Locking: caller should hold the domain lock */ @@ -176,6 +189,8 @@ static void zephyr_ll_run(void *data) struct list_item *list, *tmp, task_head = LIST_INIT(task_head); uint32_t flags; + tr_dbg(&ll_tr, "entry"); + zephyr_ll_lock(sch, &flags); /* @@ -248,8 +263,11 @@ static void zephyr_ll_run(void *data) zephyr_ll_unlock(sch, &flags); +#ifndef CONFIG_SOF_USERSPACE_LL + /* TODO: what to do with notifiers? */ notifier_event(sch, NOTIFIER_ID_LL_POST_RUN, NOTIFIER_TARGET_CORE_LOCAL, NULL, 0); +#endif } static void schedule_ll_callback(void *data) @@ -345,6 +363,16 @@ static int zephyr_ll_task_schedule_common(struct zephyr_ll *sch, struct task *ta tr_err(&ll_tr, "cannot register domain %d", ret); +#if CONFIG_SOF_USERSPACE_LL + k_thread_access_grant(zephyr_domain_thread_tid(sch->ll_domain), sch->lock); + zephyr_ll_mem_domain_add_thread(zephyr_domain_thread_tid(sch->ll_domain)); + + tr_dbg(&ll_tr, "granting access to lock %p for thread %p", sch->lock, + zephyr_domain_thread_tid(sch->ll_domain)); + tr_dbg(&ll_tr, "granting access to domain lock %p for thread %p", &sch->ll_domain->lock, + zephyr_domain_thread_tid(sch->ll_domain)); +#endif + return 0; } @@ -432,7 +460,7 @@ static int zephyr_ll_task_free(void *data, struct task *task) /* Protect against racing with schedule_task() */ zephyr_ll_lock(sch, &flags); task->priv_data = NULL; - rfree(pdata); + sof_heap_free(sch->heap, pdata); zephyr_ll_unlock(sch, &flags); return 0; @@ -493,14 +521,26 @@ static const struct scheduler_ops zephyr_ll_ops = { .scheduler_free = zephyr_ll_scheduler_free, }; +#if CONFIG_SOF_USERSPACE_LL +struct task *zephyr_ll_task_alloc(void) +{ + return sof_heap_alloc(zephyr_ll_heap(), SOF_MEM_FLAG_USER, + sizeof(struct task), sizeof(void *)); +} +#endif /* CONFIG_SOF_USERSPACE_LL */ + int zephyr_ll_task_init(struct task *task, const struct sof_uuid_entry *uid, uint16_t type, uint16_t priority, enum task_state (*run)(void *data), void *data, uint16_t core, uint32_t flags) { struct zephyr_ll_pdata *pdata; + struct k_heap *heap = sof_sys_heap_get(); + int alloc_flags = SOF_MEM_FLAG_KERNEL | SOF_MEM_FLAG_COHERENT; int ret; + tr_dbg(&ll_tr, "ll-scheduler task %p init", data); + if (task->priv_data) return -EEXIST; @@ -509,13 +549,18 @@ int zephyr_ll_task_init(struct task *task, if (ret < 0) return ret; - pdata = rzalloc(SOF_MEM_FLAG_KERNEL | SOF_MEM_FLAG_COHERENT, - sizeof(*pdata)); +#if CONFIG_SOF_USERSPACE_LL + heap = zephyr_ll_heap(); + alloc_flags = SOF_MEM_FLAG_USER; +#endif + pdata = sof_heap_alloc(heap, alloc_flags, sizeof(*pdata), 0); if (!pdata) { tr_err(&ll_tr, "alloc failed"); return -ENOMEM; } + memset(pdata, 0, sizeof(*pdata)); + k_sem_init(&pdata->sem, 0, 1); task->priv_data = pdata; @@ -529,18 +574,45 @@ EXPORT_SYMBOL(zephyr_ll_task_init); int zephyr_ll_scheduler_init(struct ll_schedule_domain *domain) { struct zephyr_ll *sch; + int core = cpu_get_id(); + struct k_heap *heap = sof_sys_heap_get(); + int flags = SOF_MEM_FLAG_KERNEL | SOF_MEM_FLAG_COHERENT; + +#if CONFIG_SOF_USERSPACE_LL + heap = zephyr_ll_heap(); + flags = SOF_MEM_FLAG_USER; +#endif + tr_dbg(&ll_tr, "init on core %d\n", core); /* initialize per-core scheduler private data */ - sch = rzalloc(SOF_MEM_FLAG_KERNEL, sizeof(*sch)); + sch = sof_heap_alloc(heap, flags, sizeof(*sch), 0); if (!sch) { tr_err(&ll_tr, "allocation failed"); return -ENOMEM; } + + memset(sch, 0, sizeof(*sch)); + list_init(&sch->tasks); sch->ll_domain = domain; sch->core = cpu_get_id(); sch->n_tasks = 0; +#if CONFIG_SOF_USERSPACE_LL + /* Allocate mutex dynamically for userspace access */ + sch->lock = k_object_alloc(K_OBJ_MUTEX); + if (!sch->lock) { + tr_err(&ll_tr, "mutex allocation failed"); + sof_heap_free(sch->heap, sch); + return -ENOMEM; + } + k_mutex_init(sch->lock); + + sch->heap = heap; + + tr_dbg(&ll_tr, "ll-scheduler init done, sch %p sch->lock %p", sch, sch->lock); +#endif + scheduler_init(domain->type, &zephyr_ll_ops, sch); return 0; diff --git a/zephyr/Kconfig b/zephyr/Kconfig index 665d404ef35e..9fe32a360696 100644 --- a/zephyr/Kconfig +++ b/zephyr/Kconfig @@ -29,6 +29,16 @@ config SOF_USERSPACE_INTERFACE_DMA help Allow user-space threads to use the SOF DMA interface. +config SOF_USERSPACE_LL + bool "Run Low-Latency pipelines in userspace threads" + depends on USERSPACE + select SOF_USERSPACE_INTERFACE_DMA + help + Run Low-Latency (LL) pipelines in userspace threads. This adds + memory protection between operating system resources and + audio application threads. + If unsure, select "N". + config SOF_ZEPHYR_HEAP_CACHED bool "Cached Zephyr heap for SOF memory non-shared zones" default y if CAVS || ACE diff --git a/zephyr/test/CMakeLists.txt b/zephyr/test/CMakeLists.txt index c0fabc414109..c5b66c83bbaa 100644 --- a/zephyr/test/CMakeLists.txt +++ b/zephyr/test/CMakeLists.txt @@ -1,21 +1,25 @@ -if (CONFIG_SOF_BOOT_TEST) - zephyr_library_sources_ifdef(CONFIG_VIRTUAL_HEAP - vmh.c - ) - zephyr_library_sources_ifdef(CONFIG_USERSPACE - userspace/ksem.c - ) +if(CONFIG_SOF_BOOT_TEST) + zephyr_library_sources_ifdef(CONFIG_VIRTUAL_HEAP + vmh.c + ) + zephyr_library_sources_ifdef(CONFIG_USERSPACE + userspace/ksem.c + ) endif() -if (CONFIG_SOF_BOOT_TEST_STANDALONE AND CONFIG_SOF_USERSPACE_INTERFACE_DMA) - if (CONFIG_DT_HAS_INTEL_ADSP_HDA_HOST_IN_ENABLED) - zephyr_library_sources(userspace/test_intel_hda_dma.c) - endif() - if (CONFIG_DT_HAS_INTEL_ADSP_HDA_SSP_CAP_ENABLED) - zephyr_library_sources(userspace/test_intel_ssp_dai.c) - endif() +if(CONFIG_SOF_BOOT_TEST_STANDALONE AND CONFIG_SOF_USERSPACE_INTERFACE_DMA) + if(CONFIG_DT_HAS_INTEL_ADSP_HDA_HOST_IN_ENABLED) + zephyr_library_sources(userspace/test_intel_hda_dma.c) + endif() + if(CONFIG_DT_HAS_INTEL_ADSP_HDA_SSP_CAP_ENABLED) + zephyr_library_sources(userspace/test_intel_ssp_dai.c) + endif() endif() -if (CONFIG_SOF_BOOT_TEST_STANDALONE AND CONFIG_USERSPACE) +if(CONFIG_SOF_BOOT_TEST_STANDALONE AND CONFIG_USERSPACE) zephyr_library_sources(userspace/test_mailbox.c) endif() + +if(CONFIG_SOF_BOOT_TEST_STANDALONE AND CONFIG_SOF_USERSPACE_LL) + zephyr_library_sources(userspace/test_ll_task.c) +endif() diff --git a/zephyr/test/userspace/README.md b/zephyr/test/userspace/README.md index 703a264ef430..aec0b60f02ed 100644 --- a/zephyr/test/userspace/README.md +++ b/zephyr/test/userspace/README.md @@ -12,12 +12,18 @@ Available tests: - Test Zephyr DAI interface, together with SOF DMA wrapper from a user thread. Mimics the call flows done in sof/src/audio/dai-zephyr.c. Use cavstool.py as host runner. +- test_ll_test.c + - Test Low-Latency (LL) scheduler in user-space mode. Creates + a user-space LL scheduler, and uses it to create and run tasks. + - Tests functionality used by SOF audio pipeline framework to + create tasks for audio pipeline logic. - test_mailbox.c - Test use of sof/mailbox.h interface from a Zephyr user thread. Building for Intel Panther Lake: ./scripts/xtensa-build-zephyr.py --cmake-args=-DCONFIG_SOF_BOOT_TEST_STANDALONE=y \ --cmake-args=-DCONFIG_SOF_USERSPACE_INTERFACE_DMA=y \ + --cmake-args=-DCONFIG_SOF_USERSPACE_LL=y \ -o app/overlays/ptl/userspace_overlay.conf -o app/winconsole_overlay.conf ptl Running test: diff --git a/zephyr/test/userspace/test_ll_task.c b/zephyr/test/userspace/test_ll_task.c new file mode 100644 index 000000000000..1bad95a5a3fc --- /dev/null +++ b/zephyr/test/userspace/test_ll_task.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * Copyright(c) 2026 Intel Corporation. + */ + +/* + * Test case for creation of low-latency threads in user-space. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include /* offsetof() */ + +LOG_MODULE_DECLARE(sof_boot_test, LOG_LEVEL_DBG); + +/* f11818eb-e92e-4082-82a3-dc54c604ebf3 */ +SOF_DEFINE_UUID("test_task", test_task_uuid, 0xf11818eb, 0xe92e, 0x4082, + 0x82, 0xa3, 0xdc, 0x54, 0xc6, 0x04, 0xeb, 0xf3); + +K_APPMEM_PARTITION_DEFINE(userspace_ll_part); + +/* Global variable for test runs counter, accessible from user-space */ +K_APP_BMEM(userspace_ll_part) static int test_runs; + +static enum task_state task_callback(void *arg) +{ + LOG_INF("entry"); + + if (++test_runs > 3) + return SOF_TASK_STATE_COMPLETED; + + return SOF_TASK_STATE_RESCHEDULE; +} + +static void ll_task_test(void) +{ + struct task *task; + int priority = 0; + int core = 0; + int ret; + + /* Initialize global test runs counter */ + test_runs = 0; + + task = zephyr_ll_task_alloc(); + + /* allow user space to report status via 'test_runs' */ + k_mem_domain_add_partition(zephyr_ll_mem_domain(), &userspace_ll_part); + + /* work in progress, see pipeline-schedule.c */ + ret = schedule_task_init_ll(task, SOF_UUID(test_task_uuid), SOF_SCHEDULE_LL_TIMER, + priority, task_callback, + (void *)&test_runs, core, 0); + zassert_equal(ret, 0); + + LOG_INF("task init done"); + + /* Schedule the task to run immediately with 1ms period */ + ret = schedule_task(task, 0, 1000); /* 0 = start now, 1000us = 1ms period */ + zassert_equal(ret, 0); + + LOG_INF("task scheduled and running"); + + /* Let the task run for a bit */ + k_sleep(K_MSEC(10)); + + /* Cancel the task to stop any scheduled execution */ + ret = schedule_task_cancel(task); + zassert_equal(ret, 0); + + /* Free task resources */ + ret = schedule_task_free(task); + zassert_equal(ret, 0); + + LOG_INF("test complete"); +} + +ZTEST(userspace_ll, ll_task_test) +{ + ll_task_test(); + ztest_test_pass(); +} + +ZTEST_SUITE(userspace_ll, NULL, NULL, NULL, NULL, NULL); + +/** + * SOF main has booted up and IPC handling is stopped. + * Run test suites with ztest_run_all. + */ +static int run_tests(void) +{ + ztest_run_test_suite(userspace_ll, false, 1, 1, NULL); + return 0; +} + +SYS_INIT(run_tests, APPLICATION, 99);