Skip to content

Commit

Permalink
feat: agent - eBPF Create maps based on functional configuration
Browse files Browse the repository at this point in the history
  • Loading branch information
yinjiping committed Sep 24, 2024
1 parent c4b7a90 commit 2151b9f
Show file tree
Hide file tree
Showing 11 changed files with 97 additions and 73 deletions.
1 change: 1 addition & 0 deletions agent/src/ebpf/kernel/go_tls.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ struct bpf_map_def SEC("maps") tls_conn_map = {
.key_size = sizeof(struct tls_conn_key),
.value_size = sizeof(struct tls_conn),
.max_entries = MAX_SYSTEM_THREADS,
.feat = FEATURE_UPROBE_GOLANG,
};
/* *INDENT-ON* */

Expand Down
41 changes: 24 additions & 17 deletions agent/src/ebpf/kernel/include/bpf_base.h
Original file line number Diff line number Diff line change
Expand Up @@ -261,6 +261,7 @@ _Pragma("GCC error \"PT_GO_REGS_PARM\"");
#define __stringify(x) __stringify_1(x)

#define NAME(N) __##N
#define MAP_MAX_ENTRIES_DEF 40960

/*
* DeepFlow eBPF program naming convention:
Expand Down Expand Up @@ -375,18 +376,20 @@ struct bpf_map_def {
unsigned int key_size;
unsigned int value_size;
unsigned int max_entries;
unsigned int feat;
};

#define __BPF_MAP_DEF(_kt, _vt, _ents) \
#define __BPF_MAP_DEF(_kt, _vt, _ents, _f) \
.key_size = sizeof(_kt), \
.value_size = sizeof(_vt), \
.max_entries = (_ents)
.max_entries = (_ents), \
.feat = (_f)

#define MAP_ARRAY(name, key_type, value_type, max_entries) \
#define MAP_ARRAY(name, key_type, value_type, max_entries, feat) \
struct bpf_map_def SEC("maps") __##name = \
{ \
.type = BPF_MAP_TYPE_ARRAY, \
__BPF_MAP_DEF(key_type, value_type, max_entries), \
__BPF_MAP_DEF(key_type, value_type, max_entries, feat), \
}; \
static_always_inline __attribute__((unused)) value_type * name ## __lookup(key_type *key) \
{ \
Expand All @@ -402,11 +405,11 @@ static_always_inline __attribute__((unused)) int name ## __delete(key_type *key)
}

// BPF_MAP_TYPE_ARRAY define
#define MAP_PERARRAY(name, key_type, value_type, max_entries) \
#define MAP_PERARRAY(name, key_type, value_type, max_entries, feat) \
struct bpf_map_def SEC("maps") __##name = \
{ \
.type = BPF_MAP_TYPE_PERCPU_ARRAY, \
__BPF_MAP_DEF(key_type, value_type, max_entries), \
__BPF_MAP_DEF(key_type, value_type, max_entries, feat), \
}; \
static_always_inline __attribute__((unused)) value_type * name ## __lookup(key_type *key) \
{ \
Expand All @@ -421,33 +424,34 @@ static_always_inline __attribute__((unused)) int name ## __delete(key_type *key)
return bpf_map_delete_elem(& __##name, (const void *)key); \
}

#define MAP_PERF_EVENT(name, key_type, value_type, max_entries) \
#define MAP_PERF_EVENT(name, key_type, value_type, max_entries, feat) \
struct bpf_map_def SEC("maps") __ ## name = \
{ \
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY, \
__BPF_MAP_DEF(key_type, value_type, max_entries), \
__BPF_MAP_DEF(key_type, value_type, max_entries, feat), \
};

#define MAP_PROG_ARRAY(name, key_type, value_type, max_entries) \
#define MAP_PROG_ARRAY(name, key_type, value_type, max_entries, feat) \
struct bpf_map_def SEC("maps") __ ## name = \
{ \
.type = BPF_MAP_TYPE_PROG_ARRAY, \
__BPF_MAP_DEF(key_type, value_type, max_entries), \
__BPF_MAP_DEF(key_type, value_type, max_entries, feat), \
};

#define MAP_STACK_TRACE(name, max) \
#define MAP_STACK_TRACE(name, max, f) \
struct bpf_map_def SEC("maps") __ ## name = { \
.type = BPF_MAP_TYPE_STACK_TRACE, \
.key_size = sizeof(__u32), \
.value_size = PERF_MAX_STACK_DEPTH * sizeof(__u64), \
.max_entries = (max), \
.feat = (f), \
};

#define MAP_HASH(name, key_type, value_type, max_entries) \
#define MAP_HASH(name, key_type, value_type, max_entries, feat) \
struct bpf_map_def SEC("maps") __##name = \
{ \
.type = BPF_MAP_TYPE_HASH, \
__BPF_MAP_DEF(key_type, value_type, max_entries), \
__BPF_MAP_DEF(key_type, value_type, max_entries, feat), \
}; \
static_always_inline __attribute__((unused)) value_type * name ## __lookup(key_type *key) \
{ \
Expand All @@ -463,16 +467,19 @@ static_always_inline __attribute__((unused)) int name ## __delete(key_type *key)
}

#define BPF_HASH3(_name, _key_type, _leaf_type) \
MAP_HASH(_name, _key_type, _leaf_type, 40960)
MAP_HASH(_name, _key_type, _leaf_type, MAP_MAX_ENTRIES_DEF, 0)

#define BPF_HASH4(_name, _key_type, _leaf_type, _size) \
MAP_HASH(_name, _key_type, _leaf_type, _size)
MAP_HASH(_name, _key_type, _leaf_type, _size, 0)

#define BPF_HASH5(_name, _key_type, _leaf_type, _size, _feat) \
MAP_HASH(_name, _key_type, _leaf_type, _size, _feat)

// helper for default-variable macro function
#define BPF_HASHX(_1, _2, _3, _4, NAME, ...) NAME
#define BPF_HASHX(_1, _2, _3, _4, _5, NAME, ...) NAME

#define BPF_HASH(...) \
BPF_HASHX(__VA_ARGS__, BPF_HASH4, BPF_HASH3)(__VA_ARGS__)
BPF_HASHX(__VA_ARGS__, BPF_HASH5, BPF_HASH4, BPF_HASH3)(__VA_ARGS__)

#define BPF_LEN_CAP(x, cap) (x < cap ? (x & (cap - 1)) : cap)

Expand Down
2 changes: 1 addition & 1 deletion agent/src/ebpf/kernel/openssl.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ struct ssl_ctx_struct {
// Save function arguments and use them when the function returns
// key: pid_tgid
// value: SSL_* arguments
BPF_HASH(ssl_ctx_map, __u64, struct ssl_ctx_struct)
BPF_HASH(ssl_ctx_map, __u64, struct ssl_ctx_struct, MAP_MAX_ENTRIES_DEF, FEATURE_UPROBE_OPENSSL)
/* *INDENT-ON* */

static int get_fd_from_openssl_ssl(void *ssl)
Expand Down
24 changes: 12 additions & 12 deletions agent/src/ebpf/kernel/perf_profiler.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -66,12 +66,12 @@
* cache b for writing data and vice versa.
*/

MAP_PERF_EVENT(profiler_output_a, int, __u32, MAX_CPU)
MAP_PERF_EVENT(profiler_output_b, int, __u32, MAX_CPU)
MAP_PROG_ARRAY(cp_progs_jmp_pe_map, __u32, __u32, CP_PROG_PE_NUM)
MAP_PERF_EVENT(profiler_output_a, int, __u32, MAX_CPU, FEATURE_PROFILE_ONCPU)
MAP_PERF_EVENT(profiler_output_b, int, __u32, MAX_CPU, FEATURE_PROFILE_ONCPU)
MAP_PROG_ARRAY(cp_progs_jmp_pe_map, __u32, __u32, CP_PROG_PE_NUM, FEATURE_PROFILE_ONCPU)

MAP_STACK_TRACE(stack_map_a, STACK_MAP_ENTRIES)
MAP_STACK_TRACE(stack_map_b, STACK_MAP_ENTRIES)
MAP_STACK_TRACE(stack_map_a, STACK_MAP_ENTRIES, FEATURE_PROFILE_ONCPU)
MAP_STACK_TRACE(stack_map_b, STACK_MAP_ENTRIES, FEATURE_PROFILE_ONCPU)

typedef struct {
struct bpf_map_def *state;
Expand All @@ -93,8 +93,8 @@ typedef __u64 __raw_stack[PERF_MAX_STACK_DEPTH];
*
* Map sizes are configured in user space program
*/
MAP_HASH(custom_stack_map_a, __u32, __raw_stack, 1)
MAP_HASH(custom_stack_map_b, __u32, __raw_stack, 1)
MAP_HASH(custom_stack_map_a, __u32, __raw_stack, 1, FEATURE_DWARF_UNWINDING)
MAP_HASH(custom_stack_map_b, __u32, __raw_stack, 1, FEATURE_DWARF_UNWINDING)

/*
* The following maps are used for DWARF based unwinding
Expand All @@ -106,13 +106,13 @@ MAP_HASH(custom_stack_map_b, __u32, __raw_stack, 1)
*
* Map sizes are configured in user space program
*/
MAP_HASH(process_shard_list_table, __u32, process_shard_list_t, 1)
MAP_HASH(unwind_entry_shard_table, __u32, unwind_entry_shard_t, 1)
MAP_HASH(process_shard_list_table, __u32, process_shard_list_t, 1, FEATURE_DWARF_UNWINDING)
MAP_HASH(unwind_entry_shard_table, __u32, unwind_entry_shard_t, 1, FEATURE_DWARF_UNWINDING)

/*
* For sysinfo gathered from BTF
*/
MAP_ARRAY(unwind_sysinfo, __u32, unwind_sysinfo_t, 1)
MAP_ARRAY(unwind_sysinfo, __u32, unwind_sysinfo_t, 1, FEATURE_DWARF_UNWINDING)

typedef struct {
__u64 ip;
Expand Down Expand Up @@ -148,7 +148,7 @@ static inline __attribute__((always_inline)) void reset_unwind_state(unwind_stat
__builtin_memset(&state->stack, 0, sizeof(stack_t));
}

MAP_PERARRAY(heap, __u32, unwind_state_t, 1)
MAP_PERARRAY(heap, __u32, unwind_state_t, 1, FEATURE_DWARF_UNWINDING)
#else

typedef void stack_t; // placeholder
Expand All @@ -159,7 +159,7 @@ typedef void stack_t; // placeholder
* Used for communication between user space and BPF to control the
* switching between buffer a and buffer b.
*/
MAP_ARRAY(profiler_state_map, __u32, __u64, PROFILER_CNT)
MAP_ARRAY(profiler_state_map, __u32, __u64, PROFILER_CNT, FEATURE_PROFILE_ONCPU)
#ifdef LINUX_VER_5_2_PLUS
static inline __attribute__((always_inline)) void add_frame(stack_t *stack, __u64 frame) {
__u8 len = stack->len;
Expand Down
40 changes: 20 additions & 20 deletions agent/src/ebpf/kernel/socket_trace.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@
/*
* 向用户态传递数据的专用map
*/
MAP_PERF_EVENT(socket_data, int, __u32, MAX_CPU)
MAP_PERF_EVENT(socket_data, int, __u32, MAX_CPU, FEATURE_SOCKET_TRACER)

/*
* Why use two Tail Calls jmp tables ?
Expand All @@ -66,23 +66,23 @@ MAP_PERF_EVENT(socket_data, int, __u32, MAX_CPU)
* 'progs_jmp_tp_map' for tracepoint (`A -> B`, both A and B are tracepoint program)
*
*/
MAP_PROG_ARRAY(progs_jmp_kp_map, __u32, __u32, PROG_KP_NUM)
MAP_PROG_ARRAY(progs_jmp_tp_map, __u32, __u32, PROG_TP_NUM)
MAP_PROG_ARRAY(progs_jmp_kp_map, __u32, __u32, PROG_KP_NUM, FEATURE_SOCKET_TRACER)
MAP_PROG_ARRAY(progs_jmp_tp_map, __u32, __u32, PROG_TP_NUM, FEATURE_SOCKET_TRACER)

/*
* 因为ebpf栈只有512字节无法存放http数据,这里使用map做为buffer。
*/
MAP_PERARRAY(data_buf, __u32, struct __socket_data_buffer, 1)
MAP_PERARRAY(data_buf, __u32, struct __socket_data_buffer, 1, FEATURE_SOCKET_TRACER)

/*
* For protocol infer buffer
*/
MAP_PERARRAY(ctx_info, __u32, struct ctx_info_s, 1)
MAP_PERARRAY(ctx_info, __u32, struct ctx_info_s, 1, FEATURE_SOCKET_TRACER)

/*
* 结构体成员偏移
*/
MAP_PERARRAY(members_offset, __u32, struct member_fields_offset, 1)
MAP_PERARRAY(members_offset, __u32, struct member_fields_offset, 1, FEATURE_SOCKET_TRACER)

/*
* 记录追踪各种ID值(确保唯一性, per CPU 没有使用锁)
Expand All @@ -96,58 +96,58 @@ MAP_PERARRAY(members_offset, __u32, struct member_fields_offset, 1)
* 可以存储176年(如果从2022年开始)的数据而UID不会出现重复。
* ((2^56 - 1) - sys_boot_time)/10/1000/1000/60/60/24/365 = 176 years
*/
MAP_PERARRAY(tracer_ctx_map, __u32, struct tracer_ctx_s, 1)
MAP_PERARRAY(tracer_ctx_map, __u32, struct tracer_ctx_s, 1, FEATURE_SOCKET_TRACER)

/*
* 对各类map进行统计
*/
MAP_ARRAY(trace_stats_map, __u32, struct trace_stats, 1)
MAP_ARRAY(trace_stats_map, __u32, struct trace_stats, 1, FEATURE_SOCKET_TRACER)

// key: protocol id, value: is protocol enabled, size: PROTO_NUM
MAP_ARRAY(protocol_filter, int, int, PROTO_NUM)
MAP_ARRAY(protocol_filter, int, int, PROTO_NUM, FEATURE_SOCKET_TRACER)

/**
* @brief Record which protocols allow data segmentation
* reassembly processing.
*
* key: protocol id, value: is protocol allowed?, size: PROTO_NUM
*/
MAP_ARRAY(allow_reasm_protos_map, int, bool, PROTO_NUM)
MAP_ARRAY(allow_reasm_protos_map, int, bool, PROTO_NUM, FEATURE_SOCKET_TRACER)

// 0: allow bitmap; 1: bypass bitmap
MAP_ARRAY(kprobe_port_bitmap, __u32, struct kprobe_port_bitmap, 2)
MAP_ARRAY(kprobe_port_bitmap, __u32, struct kprobe_port_bitmap, 2, FEATURE_SOCKET_TRACER)

/*
* l7-protocol-ports
* Configuring application layer protocol ports, when performing protocol
* inference, inference is only targeted at specified ports of Layer 7
* protocols.
*/
MAP_ARRAY(proto_ports_bitmap, __u32, ports_bitmap_t, PROTO_NUM)
MAP_ARRAY(proto_ports_bitmap, __u32, ports_bitmap_t, PROTO_NUM, FEATURE_SOCKET_TRACER)

// write() syscall's input argument.
// Key is {tgid, pid}.
BPF_HASH(active_write_args_map, __u64, struct data_args_t)
BPF_HASH(active_write_args_map, __u64, struct data_args_t, MAP_MAX_ENTRIES_DEF, FEATURE_SOCKET_TRACER)

// read() syscall's input argument.
// Key is {tgid, pid}.
BPF_HASH(active_read_args_map, __u64, struct data_args_t)
BPF_HASH(active_read_args_map, __u64, struct data_args_t, MAP_MAX_ENTRIES_DEF, FEATURE_SOCKET_TRACER)

// socket_info_map, 这是个hash表,用于记录socket信息,
// Key is {pid + fd}. value is struct socket_info_s
BPF_HASH(socket_info_map, __u64, struct socket_info_s)
BPF_HASH(socket_info_map, __u64, struct socket_info_s, MAP_MAX_ENTRIES_DEF, FEATURE_SOCKET_TRACER)

// socket_info lifecycle is inconsistent with socket. If the role information
// is saved to the socket_info_map, it will affect the generation of syscall
// trace id. Create an independent map to save role information
// Key is {pid + fd}. value is role type
BPF_HASH(socket_role_map, __u64, __u32);
BPF_HASH(socket_role_map, __u64, __u32, MAP_MAX_ENTRIES_DEF, FEATURE_SOCKET_TRACER);

// Key is struct trace_key_t. value is trace_info_t
BPF_HASH(trace_map, struct trace_key_t, struct trace_info_t)
BPF_HASH(trace_map, struct trace_key_t, struct trace_info_t, MAP_MAX_ENTRIES_DEF, FEATURE_SOCKET_TRACER)

// Stores the identity used to fit the kernel, key: 0, vlaue:{tgid, pid}
MAP_ARRAY(adapt_kern_uid_map, __u32, __u64, 1)
MAP_ARRAY(adapt_kern_uid_map, __u32, __u64, 1, FEATURE_SOCKET_TRACER)

#if defined(LINUX_VER_KFUNC) || defined(LINUX_VER_5_2_PLUS)
/*
Expand All @@ -159,7 +159,7 @@ MAP_ARRAY(adapt_kern_uid_map, __u32, __u64, 1)
* The process-ID/thread-ID range [0, 5242880], if the process value exceeds the
* maximum value range, fast cache matching becomes invalid.
*/
MAP_ARRAY(proto_infer_cache_map, __u32, struct proto_infer_cache_t, PROTO_INFER_CACHE_SIZE)
MAP_ARRAY(proto_infer_cache_map, __u32, struct proto_infer_cache_t, PROTO_INFER_CACHE_SIZE, FEATURE_SOCKET_TRACER)
#endif
/* *INDENT-ON* */

Expand Down Expand Up @@ -2701,7 +2701,7 @@ KFUNC_PROG(__sys_connect, int fd, struct sockaddr __user * uservaddr,
}

// Store IO event information
MAP_PERARRAY(io_event_buffer, __u32, struct __io_event_buffer, 1)
MAP_PERARRAY(io_event_buffer, __u32, struct __io_event_buffer, 1, FEATURE_SOCKET_TRACER)

static __inline int finalize_data_output(void *ctx,
struct tracer_ctx_s *tracer_ctx,
Expand Down
8 changes: 7 additions & 1 deletion agent/src/ebpf/kernel/uprobe_base.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ struct bpf_map_def SEC("maps") http2_tcp_seq_map = {
.key_size = sizeof(struct http2_tcp_seq_key),
.value_size = sizeof(__u32),
.max_entries = HASH_ENTRIES_MAX,
.feat = FEATURE_UPROBE_GOLANG,
};

/*
Expand All @@ -54,6 +55,7 @@ struct bpf_map_def SEC("maps") proc_info_map = {
.key_size = sizeof(int),
.value_size = sizeof(struct ebpf_proc_info),
.max_entries = HASH_ENTRIES_MAX,
.feat = FEATURE_UPROBE_GOLANG,
};

// Process ID and coroutine ID, marking the coroutine in the system
Expand All @@ -71,6 +73,7 @@ struct bpf_map_def SEC("maps") go_ancerstor_map = {
.key_size = sizeof(struct go_key),
.value_size = sizeof(__u64),
.max_entries = HASH_ENTRIES_MAX,
.feat = FEATURE_UPROBE_GOLANG,
};

// Used to determine the timeout, as a termination condition for finding
Expand All @@ -82,6 +85,7 @@ struct bpf_map_def SEC("maps") go_rw_ts_map = {
.key_size = sizeof(struct go_key),
.value_size = sizeof(__u64),
.max_entries = HASH_ENTRIES_MAX,
.feat = FEATURE_UPROBE_GOLANG,
};

// Pass data between coroutine entry and exit functions
Expand All @@ -95,6 +99,7 @@ struct bpf_map_def SEC("maps") pid_tgid_callerid_map = {
.key_size = sizeof(__u64),
.value_size = sizeof(struct go_newproc_caller),
.max_entries = HASH_ENTRIES_MAX,
.feat = FEATURE_UPROBE_GOLANG,
};

/*
Expand All @@ -107,6 +112,7 @@ struct bpf_map_def SEC("maps") goroutines_map = {
.key_size = sizeof(__u64),
.value_size = sizeof(__u64),
.max_entries = MAX_SYSTEM_THREADS,
.feat = FEATURE_UPROBE_GOLANG,
};
/* *INDENT-ON* */

Expand Down Expand Up @@ -160,7 +166,7 @@ struct __http2_stack {
bool tls;
} __attribute__ ((packed));

MAP_PERARRAY(http2_stack, __u32, struct __http2_stack, 1)
MAP_PERARRAY(http2_stack, __u32, struct __http2_stack, 1, FEATURE_UPROBE_GOLANG)

static __inline struct __http2_stack *get_http2_stack()
{
Expand Down
Empty file modified agent/src/ebpf/tools/code.style
100644 → 100755
Empty file.
Loading

0 comments on commit 2151b9f

Please sign in to comment.