diff --git a/src/include/sof/trace/trace.h b/src/include/sof/trace/trace.h index b8d68b0d5ce3..155964698c56 100644 --- a/src/include/sof/trace/trace.h +++ b/src/include/sof/trace/trace.h @@ -97,6 +97,9 @@ struct trace_filter { #if CONFIG_TRACE +#include +#include /* LOG_LEVEL_... */ + /* * trace_event macro definition * @@ -116,6 +119,7 @@ struct trace_filter { * image size. This way more elaborate log messages are possible and encouraged, * for better debugging experience, without worrying about runtime performance. */ + /* Map the different trace_xxxx_with_ids(... ) levels to the * _trace_event_with_ids(level_xxxx, ...) macro shared across log * levels. @@ -143,10 +147,13 @@ void trace_off(void); void trace_init(struct sof *sof); /* All tracing macros in this file end up calling these functions in the end. */ +typedef void (*log_func_t)(bool send_atomic, const void *log_entry, const struct tr_ctx *ctx, + uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, va_list args); + void trace_log_filtered(bool send_atomic, const void *log_entry, const struct tr_ctx *ctx, - uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, ...); + uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, va_list args); void trace_log_unfiltered(bool send_atomic, const void *log_entry, const struct tr_ctx *ctx, - uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, ...); + uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, va_list args); struct sof_ipc_trace_filter_elem *trace_filter_fill(struct sof_ipc_trace_filter_elem *elem, struct sof_ipc_trace_filter_elem *end, struct trace_filter *filter); @@ -158,6 +165,20 @@ int trace_filter_update(const struct trace_filter *elem); #define _trace_event_atomic_with_ids(lvl, class, ctx, id_1, id_2, format, ...) \ _log_message(trace_log_filtered, true, lvl, class, ctx, id_1, id_2, format, ##__VA_ARGS__) +/** + * Appends one SOF dictionary entry and log statement to the ring buffer + * implementing the 'etrace' in shared memory. + * + * @param atomic_context Take the trace->lock if false. + * @param log_entry_pointer dictionary index produced by the + * _DECLARE_LOG_ENTRY macro. + * @param n_args number of va_args + */ +void mtrace_dict_entry(bool atomic_context, uint32_t log_entry_pointer, int n_args, ...); + +/** Posts a fully prepared log header + log entry */ +void mtrace_event(const char *complete_packet, uint32_t length); + /** The start of this linker output MUST match the 'ldc_entry_header' * struct defined in the logger program running in user space. */ @@ -183,6 +204,24 @@ int trace_filter_update(const struct trace_filter *elem); format \ } +#ifdef CONFIG_TRACEM /* Send everything to shared memory too */ +# ifdef __ZEPHYR__ +/* We don't use Zephyr's dictionary yet so there's not enough space for + * DEBUG messages + */ +# define MTRACE_DUPLICATION_LEVEL LOG_LEVEL_INFO +# else +# define MTRACE_DUPLICATION_LEVEL LOG_LEVEL_DEBUG +# endif +#else /* copy only ERRORS */ +# define MTRACE_DUPLICATION_LEVEL LOG_LEVEL_ERROR +#endif /* CONFIG_TRACEM */ + +/* This function is _not_ passed the format string to save space */ +void _log_sofdict(log_func_t sofdict_logf, bool atomic, const void *log_entry, + const struct tr_ctx *ctx, const uint32_t lvl, + uint32_t id_1, uint32_t id_2, int arg_count, ...); + /* _log_message() */ #ifdef CONFIG_LIBRARY @@ -222,7 +261,8 @@ _thrown_from_macro_BASE_LOG_in_trace_h /** _log_message is where the memory-saving dictionary magic described * above happens: the "format" string argument is moved to a special - * linker section and replaced by a &log_entry pointer to it. + * linker section and replaced by a &log_entry pointer to it. This must + * be a macro for the source location to be meaningful. */ #define _log_message(log_func, atomic, lvl, comp_class, ctx, id_1, id_2, format, ...) \ do { \ @@ -233,9 +273,25 @@ do { \ META_COUNT_VARAGS_BEFORE_COMPILE(__VA_ARGS__), \ BASE_LOG_ASSERT_FAIL_MSG \ ); \ - log_func(atomic, &log_entry, ctx, lvl, id_1, id_2, \ - META_COUNT_VARAGS_BEFORE_COMPILE(__VA_ARGS__), ##__VA_ARGS__); \ + _log_sofdict(log_func, atomic, &log_entry, ctx, lvl, id_1, id_2, \ + META_COUNT_VARAGS_BEFORE_COMPILE(__VA_ARGS__), ##__VA_ARGS__); \ + _log_nodict(atomic, META_COUNT_VARAGS_BEFORE_COMPILE(__VA_ARGS__), \ + lvl, format, ##__VA_ARGS__); \ +} while (0) + +#ifdef __ZEPHYR__ +/* Just like XTOS, only the most urgent messages go to limited + * shared memory. + */ +#define _log_nodict(atomic, arg_count, lvl, format, ...) \ +do { \ + if ((lvl) <= MTRACE_DUPLICATION_LEVEL) \ + printk("%llu " format "\n", platform_timer_get(NULL), \ + ##__VA_ARGS__); \ } while (0) +#else +#define _log_nodict(atomic, n_args, lvl, format, ...) +#endif #endif /* CONFIG_LIBRARY */ @@ -431,16 +487,11 @@ struct tr_ctx { too_many_mtrace_printf_arguments); \ _DECLARE_LOG_ENTRY(log_level, format_str, _TRACE_INV_CLASS, \ META_COUNT_VARAGS_BEFORE_COMPILE(__VA_ARGS__)); \ - mtrace_dict_entry((uint32_t)&log_entry, \ + mtrace_dict_entry(true, (uint32_t)&log_entry, \ META_COUNT_VARAGS_BEFORE_COMPILE(__VA_ARGS__), \ ##__VA_ARGS__); \ } while (0) -/** Adds log_header prefix and appends arguments before sending */ -void mtrace_dict_entry(uint32_t log_entry_pointer, int n_args, ...); - -/** Posts a fully prepared log header + log entry */ -void mtrace_event(const char *complete_packet, uint32_t length); #else diff --git a/src/ipc/ipc3/handler.c b/src/ipc/ipc3/handler.c index c5d1c034e7cd..30e72e91a1bf 100644 --- a/src/ipc/ipc3/handler.c +++ b/src/ipc/ipc3/handler.c @@ -729,7 +729,7 @@ static int ipc_glb_pm_message(uint32_t header) /* * Debug IPC Operations. */ -#if CONFIG_SUECREEK || defined __ZEPHYR__ +#if CONFIG_SUECREEK static int ipc_dma_trace_config(uint32_t header) { return 0; @@ -799,7 +799,7 @@ static int ipc_dma_trace_config(uint32_t header) error: return err; } -#endif /* CONFIG_SUECREEK || defined __ZEPHYR__ */ +#endif /* CONFIG_SUECREEK */ static int ipc_trace_filter_update(uint32_t header) { diff --git a/src/platform/intel/cavs/platform.c b/src/platform/intel/cavs/platform.c index 9f529efdd822..4356a880e150 100644 --- a/src/platform/intel/cavs/platform.c +++ b/src/platform/intel/cavs/platform.c @@ -505,7 +505,6 @@ int platform_init(struct sof *sof) if (ret < 0) return ret; -#ifndef __ZEPHYR__ #if CONFIG_DW_SPI /* initialize the SPI slave */ trace_point(TRACE_BOOT_PLATFORM_SPI); @@ -530,7 +529,7 @@ int platform_init(struct sof *sof) /* show heap status */ heap_trace_all(1); -#endif /* __ZEPHYR__ */ + return 0; } diff --git a/src/schedule/zephyr_ll.c b/src/schedule/zephyr_ll.c index 0c18eb8a1597..8d073602e3de 100644 --- a/src/schedule/zephyr_ll.c +++ b/src/schedule/zephyr_ll.c @@ -15,6 +15,12 @@ #include +/* 1547fe68-de0c-11eb-8461-3158a1294853 */ +DECLARE_SOF_UUID("zll-schedule", zll_sched_uuid, 0x1547fe68, 0xde0c, 0x11eb, + 0x84, 0x61, 0x31, 0x58, 0xa1, 0x29, 0x48, 0x53); + +DECLARE_TR_CTX(ll_tr, SOF_UUID(zll_sched_uuid), LOG_LEVEL_INFO); + /* per-scheduler data */ struct zephyr_ll { struct list_item tasks; /* list of ll tasks */ diff --git a/src/trace/trace.c b/src/trace/trace.c index 04d2393171de..f311d6021950 100644 --- a/src/trace/trace.c +++ b/src/trace/trace.c @@ -92,6 +92,7 @@ static void put_header(void *dst, const struct sof_uuid_entry *uid, } +#ifndef __ZEPHYR__ /** Ring buffer for the mailbox trace */ void mtrace_event(const char *data, uint32_t length) { @@ -110,6 +111,7 @@ void mtrace_event(const char *data, uint32_t length) dcache_writeback_region(t + trace->pos, length); trace->pos += length; } +#endif /* __ZEPHYR__ */ #if CONFIG_TRACE_FILTERING_VERBOSITY /** @@ -222,21 +224,15 @@ static bool trace_filter_flood(uint32_t log_level, uint32_t entry, uint64_t mess /** Implementation shared and invoked by both adaptive filtering and * not. Serializes events into trace messages and passes them to - * dtrace_event() or to mtrace_event() or to both depending on the log - * lvl and the Kconfiguration. + * dtrace_event() */ -static void vatrace_log(bool send_atomic, uint32_t log_entry, const struct tr_ctx *ctx, - uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, va_list vargs) +static void dma_trace_log(bool send_atomic, uint32_t log_entry, const struct tr_ctx *ctx, + uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, va_list vargs) { uint32_t data[MESSAGE_SIZE_DWORDS(_TRACE_EVENT_MAX_ARGUMENT_COUNT)]; const int message_size = MESSAGE_SIZE(arg_count); int i; -#if CONFIG_TRACEM - unsigned long flags; - struct trace *trace = trace_get(); -#endif /* CONFIG TRACEM */ - /* fill log content. arg_count is in the dictionary. */ put_header(data, ctx->uuid_p, id_1, id_2, log_entry, platform_safe_get_time(timer_get())); @@ -250,42 +246,25 @@ static void vatrace_log(bool send_atomic, uint32_t log_entry, const struct tr_ct else dtrace_event((const char *)data, message_size); -#if CONFIG_TRACEM - /* send event by mail box too. */ - if (send_atomic) { - mtrace_event((const char *)data, MESSAGE_SIZE(arg_count)); - } else { - spin_lock_irq(&trace->lock, flags); - mtrace_event((const char *)data, MESSAGE_SIZE(arg_count)); - spin_unlock_irq(&trace->lock, flags); - } -#else - /* send event by mail box if level is LOG_LEVEL_CRITICAL. */ - if (lvl == LOG_LEVEL_CRITICAL) - mtrace_event((const char *)data, MESSAGE_SIZE(arg_count)); -#endif /* CONFIG_TRACEM */ } void trace_log_unfiltered(bool send_atomic, const void *log_entry, const struct tr_ctx *ctx, - uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, ...) + uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, va_list vl) { struct trace *trace = trace_get(); - va_list vl; if (!trace->enable) { return; } - va_start(vl, arg_count); - vatrace_log(send_atomic, (uint32_t)log_entry, ctx, lvl, id_1, id_2, arg_count, vl); - va_end(vl); + dma_trace_log(send_atomic, (uint32_t)log_entry, ctx, lvl, id_1, id_2, arg_count, vl); } void trace_log_filtered(bool send_atomic, const void *log_entry, const struct tr_ctx *ctx, - uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, ...) + uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, va_list vl) { struct trace *trace = trace_get(); - va_list vl; + #if CONFIG_TRACE_FILTERING_ADAPTIVE uint64_t current_ts; #endif /* CONFIG_TRACE_FILTERING_ADAPTIVE */ @@ -310,9 +289,7 @@ void trace_log_filtered(bool send_atomic, const void *log_entry, const struct tr } #endif /* CONFIG_TRACE_FILTERING_ADAPTIVE */ - va_start(vl, arg_count); - vatrace_log(send_atomic, (uint32_t)log_entry, ctx, lvl, id_1, id_2, arg_count, vl); - va_end(vl); + dma_trace_log(send_atomic, (uint32_t)log_entry, ctx, lvl, id_1, id_2, arg_count, vl); } struct sof_ipc_trace_filter_elem *trace_filter_fill(struct sof_ipc_trace_filter_elem *elem, @@ -532,9 +509,9 @@ void trace_init(struct sof *sof) dma_trace_init_early(sof); } -void mtrace_dict_entry(uint32_t dict_entry_address, int n_args, ...) +static void mtrace_dict_entry_vl(bool atomic_context, uint32_t dict_entry_address, + int n_args, va_list ap) { - va_list ap; int i; char packet[MESSAGE_SIZE(_TRACE_EVENT_MAX_ARGUMENT_COUNT)]; uint32_t *args = (uint32_t *)&packet[MESSAGE_SIZE(0)]; @@ -543,10 +520,45 @@ void mtrace_dict_entry(uint32_t dict_entry_address, int n_args, ...) put_header(packet, dt_tr.uuid_p, _TRACE_INV_ID, _TRACE_INV_ID, dict_entry_address, tstamp); - va_start(ap, n_args); - for (i = 0; i < n_args; i++) + for (i = 0; i < MIN(n_args, _TRACE_EVENT_MAX_ARGUMENT_COUNT); i++) args[i] = va_arg(ap, uint32_t); + + if (atomic_context) { + mtrace_event(packet, MESSAGE_SIZE(n_args)); + } else { + struct trace * const trace = trace_get(); + uint32_t saved_flags; + + spin_lock_irq(&trace->lock, saved_flags); + mtrace_event(packet, MESSAGE_SIZE(n_args)); + spin_unlock_irq(&trace->lock, saved_flags); + } +} + +void mtrace_dict_entry(bool atomic_context, uint32_t dict_entry_address, int n_args, ...) +{ + va_list ap; + + va_start(ap, n_args); + mtrace_dict_entry_vl(atomic_context, dict_entry_address, n_args, ap); va_end(ap); +} - mtrace_event(packet, MESSAGE_SIZE(n_args)); +void _log_sofdict(log_func_t sofdict_logf, bool atomic, const void *log_entry, + const struct tr_ctx *ctx, const uint32_t lvl, + uint32_t id_1, uint32_t id_2, int arg_count, ...) +{ + va_list ap; + +#ifndef __ZEPHYR__ /* for Zephyr see _log_nodict() in trace.h */ + if (lvl <= MTRACE_DUPLICATION_LEVEL) { + va_start(ap, arg_count); + mtrace_dict_entry_vl(atomic, (uint32_t)log_entry, arg_count, ap); + va_end(ap); + } +#endif + + va_start(ap, arg_count); + sofdict_logf(atomic, log_entry, ctx, lvl, id_1, id_2, arg_count, ap); + va_end(ap); } diff --git a/test/cmocka/src/common_mocks.c b/test/cmocka/src/common_mocks.c index 59ed9fce496c..a5a6626f0796 100644 --- a/test/cmocka/src/common_mocks.c +++ b/test/cmocka/src/common_mocks.c @@ -101,7 +101,8 @@ void WEAK __panic(uint32_t p, char *filename, uint32_t linenum) } void WEAK trace_log_filtered(bool send_atomic, const void *log_entry, const struct tr_ctx *ctx, - uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, ...) + uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, + va_list args) { (void) send_atomic; (void) log_entry; @@ -110,6 +111,13 @@ void WEAK trace_log_filtered(bool send_atomic, const void *log_entry, const stru (void) id_1; (void) id_2; (void) arg_count; + (void) args; +} + +void WEAK _log_sofdict(log_func_t sofdict_logf, bool atomic, const void *log_entry, + const struct tr_ctx *ctx, const uint32_t lvl, + uint32_t id_1, uint32_t id_2, int arg_count, ...) +{ } void WEAK trace_flush_dma_to_mbox(void) diff --git a/zephyr/include/sof/trace/trace.h b/zephyr/include/sof/trace/trace.h index 7d24708c8845..64888cff22d6 100644 --- a/zephyr/include/sof/trace/trace.h +++ b/zephyr/include/sof/trace/trace.h @@ -25,10 +25,10 @@ struct timer; uint64_t platform_timer_get(struct timer *timer); /* - * Override SOF dictionary macros for now and let Zephyr take care of - * the physical log IO. + * Override SOF mtrace_printf() macro for now to support Zephyr's shared + * memory logger. Note the DMA trace can be copied to the shared memory + * too thanks to CONFIG_TRACEM. */ -#undef _log_message #undef mtrace_printf #if USE_PRINTK @@ -38,18 +38,8 @@ uint64_t platform_timer_get(struct timer *timer); printk("%llu: " format "\n", platform_timer_get(NULL), \ ##__VA_ARGS__); \ } while (0) -#define _log_message(log_func, atomic, level, comp_class, ctx, id1, id2, format, ...) \ - do { \ - if ((level) <= SOF_ZEPHYR_TRACE_LEVEL) \ - printk("%llu: " format "\n", platform_timer_get(NULL), \ - ##__VA_ARGS__); \ - } while (0) -#else /* not tested */ -#define _log_message(log_func, atomic, level, comp_class, ctx, id1, id2, format, ...) \ - do { \ - Z_LOG(level, "%u: " format, (uint32_t)platform_timer_get(NULL), \ - ##__VA_ARGS__); \ - } while (0) +#else +#error "TODO: Z_LOG()" #endif #endif /* __INCLUDE_TRACE__ */