Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CI ONLY] Test #8878

Draft
wants to merge 8 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/include/sof/audio/kpb.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ struct comp_buffer;
#define KPB_MAX_SINK_CNT (1 + KPB_MAX_NO_OF_CLIENTS)
#define KPB_NO_OF_HISTORY_BUFFERS 2 /**< no of internal buffers */
#define KPB_ALLOCATION_STEP 0x100
#define KPB_NO_OF_MEM_POOLS 3
#define KPB_NO_OF_MEM_POOLS 5
#define KPB_BYTES_TO_FRAMES(bytes, sample_width, channels_number) \
((bytes) / ((KPB_SAMPLE_CONTAINER_SIZE(sample_width) / 8) * \
(channels_number)))
Expand Down
8 changes: 8 additions & 0 deletions zephyr/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -86,4 +86,12 @@ config SOF_BOOT_TEST
initialized. After that SOF will continue running and be usable as
usual.

config VIRTUAL_HEAP
bool "Use virtual memory heap to allocate a buffers"
default y if ACE
default n
depends on ACE
help
Enabling this option will use the virtual memory heap allocator to allocate buffers.
It is based on a set of buffers whose size is predetermined.
endif
2 changes: 1 addition & 1 deletion zephyr/include/sof/lib/regions_mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
* either be spanned on specifically configured heap or have
* individual configs with bigger block sizes.
*/
#define MAX_MEMORY_ALLOCATORS_COUNT 8
#define MAX_MEMORY_ALLOCATORS_COUNT 10

/* vmh_get_default_heap_config() function will try to split the region
* down the given count. Only applicable when API client did not
Expand Down
152 changes: 146 additions & 6 deletions zephyr/lib/alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,18 @@
#include <sof/trace/trace.h>
#include <rtos/symbol.h>
#include <rtos/wait.h>
#if CONFIG_VIRTUAL_HEAP
#include <sof/lib/regions_mm.h>

struct vmh_heap *virtual_buffers_heap[CONFIG_MP_MAX_NUM_CPUS];
struct k_spinlock vmh_lock;

#undef HEAPMEM_SIZE
/* Buffers are allocated from virtual space so we can safely reduce the heap size.
*/
#define HEAPMEM_SIZE 0x40000
#endif /* CONFIG_VIRTUAL_HEAP */


/* Zephyr includes */
#include <zephyr/init.h>
Expand Down Expand Up @@ -193,6 +205,98 @@ static void l3_heap_free(struct k_heap *h, void *mem)

#endif

#if CONFIG_VIRTUAL_HEAP
static void *virtual_heap_alloc(struct vmh_heap *heap, uint32_t flags, uint32_t caps, size_t bytes,
uint32_t align)
{
void *mem;

//K_SPINLOCK(&vmh_lock) {
// heap->core_id = cpu_get_id();
mem = vmh_alloc(heap, bytes);
//}

if (!mem)
return NULL;

assert(IS_ALIGNED(mem, align));

if (flags & SOF_MEM_FLAG_COHERENT)
return sys_cache_uncached_ptr_get((__sparse_force void __sparse_cache *)mem);

return mem;
}

/**
* Checks whether pointer is from virtual memory range.
* @param ptr Pointer to memory being checked.
* @return True if pointer falls into virtual memory region, false otherwise.
*/
static bool is_virtual_heap_pointer(void *ptr)
{
uintptr_t virtual_heap_start = POINTER_TO_UINT(sys_cache_cached_ptr_get(&heapmem)) +
HEAPMEM_SIZE;
uintptr_t virtual_heap_end = CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE;

if (!is_cached(ptr))
ptr = (__sparse_force void *)sys_cache_cached_ptr_get(ptr);

return ((POINTER_TO_UINT(ptr) >= virtual_heap_start) &&
(POINTER_TO_UINT(ptr) < virtual_heap_end));
}

static void virtual_heap_free(void *ptr)
{
struct vmh_heap *const heap = virtual_buffers_heap[cpu_get_id()];
int ret;

ptr = (__sparse_force void *)sys_cache_cached_ptr_get(ptr);

//K_SPINLOCK(&vmh_lock) {
//virtual_buffers_heap->core_id = cpu_get_id();
ret = vmh_free(heap, ptr);
//}

if (ret)
tr_err(&zephyr_tr, "Unable to free %p! %d", ptr, ret);
}

static const struct vmh_heap_config static_hp_buffers = {
{
{ 128, 32},
{ 512, 8},
{ 1024, 44},
{ 2048, 8},
{ 4096, 11},
{ 8192, 10},
{ 65536, 3},
{ 131072, 1},
{ 524288, 1} /* buffer for kpb */
},
};

static int virtual_heap_init(void)
{
int core;

k_spinlock_init(&vmh_lock);

for (core = 0; core < CONFIG_MP_MAX_NUM_CPUS; core++) {
struct vmh_heap *heap = vmh_init_heap(&static_hp_buffers, MEM_REG_ATTR_CORE_HEAP,
core, false);
if (!heap)
tr_err(&zephyr_tr, "Unable to init virtual heap for core %d!", core);

virtual_buffers_heap[core] = heap;
}

return 0;
}

SYS_INIT(virtual_heap_init, POST_KERNEL, 1);

#endif /* CONFIG_VIRTUAL_HEAP */

static void *heap_alloc_aligned(struct k_heap *h, size_t min_align, size_t bytes)
{
k_spinlock_key_t key;
Expand Down Expand Up @@ -284,11 +388,12 @@ void *rmalloc(enum mem_zone zone, uint32_t flags, uint32_t caps, size_t bytes)
{
void *ptr;
struct k_heap *heap;

const char *hn = "none";
/* choose a heap */
if (caps & SOF_MEM_CAPS_L3) {
#if CONFIG_L3_HEAP
heap = &l3_heap;
hn = "l3_heap";
/* Uncached L3_HEAP should be not used */
if (!zone_is_cached(zone)) {
tr_err(&zephyr_tr, "L3_HEAP available for cached zones only!");
Expand All @@ -305,6 +410,7 @@ void *rmalloc(enum mem_zone zone, uint32_t flags, uint32_t caps, size_t bytes)
#endif
} else {
heap = &sof_heap;
hn = "sof_heap";
}

if (zone_is_cached(zone) && !(flags & SOF_MEM_FLAG_COHERENT)) {
Expand All @@ -316,6 +422,8 @@ void *rmalloc(enum mem_zone zone, uint32_t flags, uint32_t caps, size_t bytes)
*/
ptr = heap_alloc_aligned(heap, PLATFORM_DCACHE_ALIGN, bytes);
}
if (!ptr)
tr_err(&zephyr_tr, "Failed to allocate %zu from %s!", bytes, hn);

if (!ptr && zone == SOF_MEM_ZONE_SYS)
k_panic();
Expand Down Expand Up @@ -384,13 +492,21 @@ EXPORT_SYMBOL(rzalloc);
void *rballoc_align(uint32_t flags, uint32_t caps, size_t bytes,
uint32_t align)
{
#if CONFIG_VIRTUAL_HEAP
struct vmh_heap *virtual_heap;
#endif
struct k_heap *heap;
void *ret;

/* choose a heap */
if (caps & SOF_MEM_CAPS_L3) {
#if CONFIG_L3_HEAP
heap = &l3_heap;
return (__sparse_force void *)l3_heap_alloc_aligned(heap, align, bytes);
ret = l3_heap_alloc_aligned(heap, align, bytes);
if (!ret)
tr_err(&zephyr_tr, "!l3_heap_alloc_aligned");

return (__sparse_force void *)ret;
#else
tr_err(&zephyr_tr, "L3_HEAP not available.");
return NULL;
Expand All @@ -399,10 +515,28 @@ void *rballoc_align(uint32_t flags, uint32_t caps, size_t bytes,
heap = &sof_heap;
}

if (flags & SOF_MEM_FLAG_COHERENT)
return heap_alloc_aligned(heap, align, bytes);
#if CONFIG_VIRTUAL_HEAP
/* Use virtual heap if it is available */
virtual_heap = virtual_buffers_heap[cpu_get_id()];
if (virtual_heap) {
ret = virtual_heap_alloc(virtual_heap, flags, caps, bytes, align);
if (!ret)
tr_err(&zephyr_tr, "!virtual_heap_alloc");
return ret;
}
#endif /* CONFIG_VIRTUAL_HEAP */

if (flags & SOF_MEM_FLAG_COHERENT) {
ret = heap_alloc_aligned(heap, align, bytes);
if (!ret)
tr_err(&zephyr_tr, "!heap_alloc_aligned");
return ret;
}

return (__sparse_force void *)heap_alloc_aligned_cached(heap, align, bytes);
ret = (__sparse_force void *)heap_alloc_aligned_cached(heap, align, bytes);
if (!ret)
tr_err(&zephyr_tr, "!heap_alloc_aligned_cached");
return ret;
}
EXPORT_SYMBOL(rballoc_align);

Expand All @@ -421,6 +555,13 @@ void rfree(void *ptr)
}
#endif

#if CONFIG_VIRTUAL_HEAP
if (is_virtual_heap_pointer(ptr)) {
virtual_heap_free(ptr);
return;
}
#endif

heap_free(&sof_heap, ptr);
}
EXPORT_SYMBOL(rfree);
Expand All @@ -432,7 +573,6 @@ static int heap_init(void)
#if CONFIG_L3_HEAP
sys_heap_init(&l3_heap.heap, UINT_TO_POINTER(get_l3_heap_start()), get_l3_heap_size());
#endif

return 0;
}

Expand Down
61 changes: 58 additions & 3 deletions zephyr/lib/regions_mm.c
Original file line number Diff line number Diff line change
Expand Up @@ -279,6 +279,44 @@ static bool vmh_get_map_region_boundaries(struct sys_mem_blocks *blocks, const v
return true;
}

/**
* @brief Determine the size of the mapped memory region.
*
* This function calculates the size of a mapped memory region starting from the given address.
* It uses a binary search algorithm to find the boundary of the mapped region by checking if
* pages are mapped or not.
*
* @param addr Starting address of the memory region.
* @param size Pointer to the size of the memory region. This value will be updated to reflect
* the size of the mapped region.
*
* @retval None
*/
static void vmh_get_mapped_size(void *addr, size_t *size)
{
int ret;
uintptr_t check, unused;
uintptr_t left, right;

if (*size <= CONFIG_MM_DRV_PAGE_SIZE)
return;

left = (POINTER_TO_UINT(addr));
right = left + *size;
check = right - CONFIG_MM_DRV_PAGE_SIZE;
while (right - left > CONFIG_MM_DRV_PAGE_SIZE) {
ret = sys_mm_drv_page_phys_get(UINT_TO_POINTER(check), &unused);
if (!ret) {
left = check; /* Page is mapped */
} else {
right = check; /* Page is unmapped */
}
check = ALIGN_DOWN(left / 2 + right / 2, CONFIG_MM_DRV_PAGE_SIZE);
}

*size = right - POINTER_TO_UINT(addr);
}

/**
* @brief Maps memory pages for a memory region if they have not been previously mapped for other
* allocations.
Expand Down Expand Up @@ -326,8 +364,11 @@ static int vmh_unmap_region(struct sys_mem_blocks *region, void *ptr, size_t siz
const size_t block_size = 1 << region->info.blk_sz_shift;
uintptr_t begin;

if (block_size >= CONFIG_MM_DRV_PAGE_SIZE)
return sys_mm_drv_unmap_region(ptr, ALIGN_UP(size, CONFIG_MM_DRV_PAGE_SIZE));
if (block_size >= CONFIG_MM_DRV_PAGE_SIZE) {
size = ALIGN_UP(size, CONFIG_MM_DRV_PAGE_SIZE);
vmh_get_mapped_size(ptr, &size);
return sys_mm_drv_unmap_region(ptr, size);
}

if (vmh_get_map_region_boundaries(region, ptr, size, &begin, &size))
return sys_mm_drv_unmap_region((void *)begin, size);
Expand Down Expand Up @@ -515,6 +556,7 @@ int vmh_free_heap(struct vmh_heap *heap)
* @retval 0 on success;
* @retval -ENOTEMPTY on heap having active allocations.
*/
int vmh_error;
int vmh_free(struct vmh_heap *heap, void *ptr)
{
int retval;
Expand Down Expand Up @@ -617,8 +659,21 @@ int vmh_free(struct vmh_heap *heap, void *ptr)
if (retval)
return retval;

return vmh_unmap_region(heap->physical_blocks_allocators[mem_block_iter], ptr,
/* Platforms based on xtensa have a non-coherent cache between cores. Before releasing
* a memory block, it is necessary to invalidate the cache. This memory block can be
* allocated by another core and performing cache writeback by the previous owner will
* destroy current content of the main memory. The cache is invalidated by the
* sys_mm_drv_unmap_region function, when a memory page is unmapped. There is no need to
* invalidate it when releasing buffers of at least a page in size.
*/
if (size_to_free < CONFIG_MM_DRV_PAGE_SIZE)
sys_cache_data_invd_range(ptr, size_to_free);
int ret;
ret = vmh_unmap_region(heap->physical_blocks_allocators[mem_block_iter], ptr,
size_to_free);
if (ret)
vmh_error = ret;
return ret;
}

/**
Expand Down
Loading