diff --git a/Documentation/dev-tools/kcov.rst b/Documentation/dev-tools/kcov.rst index 6611434e2dd247..6d5ea3e5cd76ff 100644 --- a/Documentation/dev-tools/kcov.rst +++ b/Documentation/dev-tools/kcov.rst @@ -137,6 +137,54 @@ mmaps coverage buffer, and then forks child processes in a loop. The child processes only need to enable coverage (it gets disabled automatically when a thread exits). +Unique coverage collection +--------------------------- + +Instead of collecting raw PCs, KCOV can deduplicate them on the fly. +This mode is enabled by the ``KCOV_UNIQUE_ENABLE`` ioctl (only available if +``CONFIG_KCOV_ENABLE_GUARDS`` is on). + +.. code-block:: c + + /* Same includes and defines as above. */ + #define KCOV_UNIQUE_ENABLE _IOW('c', 103, unsigned long) + #define BITMAP_SIZE (4<<10) + + /* Instead of KCOV_ENABLE, enable unique coverage collection. */ + if (ioctl(fd, KCOV_UNIQUE_ENABLE, BITMAP_SIZE)) + perror("ioctl"), exit(1); + /* Reset the coverage from the tail of the ioctl() call. */ + __atomic_store_n(&cover[BITMAP_SIZE], 0, __ATOMIC_RELAXED); + memset(cover, 0, BITMAP_SIZE * sizeof(unsigned long)); + + /* Call the target syscall call. */ + /* ... */ + + /* Read the number of collected PCs. */ + n = __atomic_load_n(&cover[BITMAP_SIZE], __ATOMIC_RELAXED); + /* Disable the coverage collection. */ + if (ioctl(fd, KCOV_DISABLE, 0)) + perror("ioctl"), exit(1); + +Calling ``ioctl(fd, KCOV_UNIQUE_ENABLE, bitmap_size)`` carves out ``bitmap_size`` +words from those allocated by ``KCOV_INIT_TRACE`` to keep an opaque bitmap that +prevents the kernel from storing the same PC twice. The remaining part of the +trace is used to collect PCs, like in other modes. + +If ``bitmap_size`` is equal to the trace size, kcov only records the bits, but +not the actual PCs. + +If ``bitmap_size`` is zero, kcov treats the whole trace as a sparse array where +each PC occurs only once, but there can be holes between PCs. + +The mapping between a PC and its position in the bitmap is persistent during the +kernel lifetime, so it is possible for the callers to directly use the bitmap +contents as a coverage signal (like when fuzzing userspace with AFL). + +In order to reset the coverage between the runs, the user needs to rewind the +trace (by writing 0 into the first word past ``bitmap_size``) and wipe the whole +bitmap. + Comparison operands collection ------------------------------ diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 0deb4887d6e96a..2acfbbde338208 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -390,6 +390,7 @@ SECTIONS . = ALIGN(PAGE_SIZE); __bss_stop = .; } + SANCOV_GUARDS_BSS /* * The memory occupied from _text to here, __end_of_kernel_reserve, is diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 54504013c74915..6dcdce2a823f3d 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -102,7 +102,7 @@ * sections to be brought in with rodata. */ #if defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || defined(CONFIG_LTO_CLANG) || \ -defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG) +defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG) || defined(CONFIG_KCOV_ENABLE_GUARDS) #define TEXT_MAIN .text .text.[0-9a-zA-Z_]* #else #define TEXT_MAIN .text @@ -121,6 +121,17 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG) #define SBSS_MAIN .sbss #endif +#if defined(CONFIG_KCOV_ENABLE_GUARDS) +#define SANCOV_GUARDS_BSS \ + __sancov_guards(NOLOAD) : { \ + __sancov_guards_start = .; \ + *(__sancov_guards); \ + __sancov_guards_end = .; \ + } +#else +#define SANCOV_GUARDS_BSS +#endif + /* * GCC 4.5 and later have a 32 bytes section alignment for structures. * Except GCC 4.9, that feels the need to align on 64 bytes. diff --git a/include/linux/kcov-state.h b/include/linux/kcov-state.h new file mode 100644 index 00000000000000..26e275fe906849 --- /dev/null +++ b/include/linux/kcov-state.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_KCOV_STATE_H +#define _LINUX_KCOV_STATE_H + +#ifdef CONFIG_KCOV +struct kcov_state { + /* See kernel/kcov.c for more details. */ + /* + * Coverage collection mode enabled for this task (0 if disabled). + * This field is used for synchronization, so it is kept outside of + * the below struct. + */ + unsigned int mode; + + struct { + /* Size of the area (in long's). */ + unsigned int size; + /* + * Pointer to user-provided memory used by kcov. This memory may + * contain multiple buffers. + */ + void *area; + + /* Size of the trace (in long's). */ + unsigned int trace_size; + /* Buffer for coverage collection, shared with the userspace. */ + unsigned long *trace; + + /* Size of the bitmap (in bits). */ + unsigned int bitmap_size; + /* + * Bitmap for coverage deduplication, shared with the + * userspace. + */ + unsigned long *bitmap; + + /* + * KCOV sequence number: incremented each time kcov is + * reenabled, used by kcov_remote_stop(), see the comment there. + */ + int sequence; + } s; +}; +#endif /* CONFIG_KCOV */ + +#endif /* _LINUX_KCOV_STATE_H */ diff --git a/include/linux/kcov.h b/include/linux/kcov.h index 75a2fb8b16c329..b6f6651f9ce3a5 100644 --- a/include/linux/kcov.h +++ b/include/linux/kcov.h @@ -2,7 +2,7 @@ #ifndef _LINUX_KCOV_H #define _LINUX_KCOV_H -#include +#include #include struct task_struct; @@ -23,22 +23,23 @@ enum kcov_mode { KCOV_MODE_TRACE_CMP = 3, /* The process owns a KCOV remote reference. */ KCOV_MODE_REMOTE = 4, + KCOV_MODE_TRACE_UNIQUE_PC = 5, }; -#define KCOV_IN_CTXSW (1 << 30) +#define KCOV_IN_CTXSW (1 << 30) void kcov_task_init(struct task_struct *t); void kcov_task_exit(struct task_struct *t); -#define kcov_prepare_switch(t) \ -do { \ - (t)->kcov_mode |= KCOV_IN_CTXSW; \ -} while (0) +#define kcov_prepare_switch(t) \ + do { \ + (t)->kcov_state.mode |= KCOV_IN_CTXSW; \ + } while (0) -#define kcov_finish_switch(t) \ -do { \ - (t)->kcov_mode &= ~KCOV_IN_CTXSW; \ -} while (0) +#define kcov_finish_switch(t) \ + do { \ + (t)->kcov_state.mode &= ~KCOV_IN_CTXSW; \ + } while (0) /* See Documentation/dev-tools/kcov.rst for usage details. */ void kcov_remote_start(u64 handle); @@ -107,6 +108,8 @@ typedef unsigned long long kcov_u64; #endif void __sanitizer_cov_trace_pc(void); +void __sanitizer_cov_trace_pc_guard(u32 *guard); +void __sanitizer_cov_trace_pc_guard_init(uint32_t *start, uint32_t *stop); void __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2); void __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2); void __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2); @@ -119,23 +122,41 @@ void __sanitizer_cov_trace_switch(kcov_u64 val, void *cases); #else -static inline void kcov_task_init(struct task_struct *t) {} -static inline void kcov_task_exit(struct task_struct *t) {} -static inline void kcov_prepare_switch(struct task_struct *t) {} -static inline void kcov_finish_switch(struct task_struct *t) {} -static inline void kcov_remote_start(u64 handle) {} -static inline void kcov_remote_stop(void) {} +static inline void kcov_task_init(struct task_struct *t) +{ +} +static inline void kcov_task_exit(struct task_struct *t) +{ +} +static inline void kcov_prepare_switch(struct task_struct *t) +{ +} +static inline void kcov_finish_switch(struct task_struct *t) +{ +} +static inline void kcov_remote_start(u64 handle) +{ +} +static inline void kcov_remote_stop(void) +{ +} static inline u64 kcov_common_handle(void) { return 0; } -static inline void kcov_remote_start_common(u64 id) {} -static inline void kcov_remote_start_usb(u64 id) {} +static inline void kcov_remote_start_common(u64 id) +{ +} +static inline void kcov_remote_start_usb(u64 id) +{ +} static inline unsigned long kcov_remote_start_usb_softirq(u64 id) { return 0; } -static inline void kcov_remote_stop_softirq(unsigned long flags) {} +static inline void kcov_remote_stop_softirq(unsigned long flags) +{ +} #endif /* CONFIG_KCOV */ #endif /* _LINUX_KCOV_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 9632e3318e0d6b..3c39fc1782a61c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -42,6 +42,7 @@ #include #include #include +#include #include #include #include @@ -1485,26 +1486,13 @@ struct task_struct { #endif /* CONFIG_TRACING */ #ifdef CONFIG_KCOV - /* See kernel/kcov.c for more details. */ - - /* Coverage collection mode enabled for this task (0 if disabled): */ - unsigned int kcov_mode; - - /* Size of the kcov_area: */ - unsigned int kcov_size; - - /* Buffer for coverage collection: */ - void *kcov_area; - + struct kcov_state kcov_state; /* KCOV descriptor wired with this task or NULL: */ struct kcov *kcov; /* KCOV common handle for remote coverage collection: */ u64 kcov_handle; - /* KCOV sequence number: */ - int kcov_sequence; - /* Collect coverage from softirq context: */ unsigned int kcov_softirq; #endif diff --git a/include/uapi/linux/kcov.h b/include/uapi/linux/kcov.h index ed95dba9fa37e2..5cfe76c7d484ee 100644 --- a/include/uapi/linux/kcov.h +++ b/include/uapi/linux/kcov.h @@ -22,6 +22,7 @@ struct kcov_remote_arg { #define KCOV_ENABLE _IO('c', 100) #define KCOV_DISABLE _IO('c', 101) #define KCOV_REMOTE_ENABLE _IOW('c', 102, struct kcov_remote_arg) +#define KCOV_UNIQUE_ENABLE _IOR('c', 103, unsigned long) enum { /* @@ -35,6 +36,8 @@ enum { KCOV_TRACE_PC = 0, /* Collecting comparison operands mode. */ KCOV_TRACE_CMP = 1, + /* Deduplicate collected PCs. */ + KCOV_TRACE_UNIQUE_PC = 2, }; /* diff --git a/kernel/kcov.c b/kernel/kcov.c index 187ba1b80bda16..39d8db2676f7a1 100644 --- a/kernel/kcov.c +++ b/kernel/kcov.c @@ -4,29 +4,36 @@ #define DISABLE_BRANCH_PROFILING #include #include +#include #include #include -#include #include #include #include #include #include +#include +#include #include +#include #include #include #include +#include #include #include #include -#include -#include +#include #include -#include -#include -#include +#include + #include +#ifdef CONFIG_KCOV_ENABLE_GUARDS +atomic_t kcov_guard_max_index = ATOMIC_INIT(1); +extern u32 __sancov_guards_start, __sancov_guards_end; +#endif + #define kcov_debug(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__) /* Number of 64-bit words written per one comparison: */ @@ -52,36 +59,28 @@ struct kcov { * - task with enabled coverage (we can't unwire it from another task) * - each code section for remote coverage collection */ - refcount_t refcount; - /* The lock protects mode, size, area and t. */ - spinlock_t lock; - enum kcov_mode mode; - /* Size of arena (in long's). */ - unsigned int size; - /* Coverage buffer shared with user space. */ - void *area; + refcount_t refcount; + /* The lock protects state and t. */ + spinlock_t lock; + struct kcov_state state; + /* Task for which we collect coverage, or NULL. */ - struct task_struct *t; + struct task_struct *t; /* Collecting coverage from remote (background) threads. */ - bool remote; + bool remote; /* Size of remote area (in long's). */ - unsigned int remote_size; - /* - * Sequence is incremented each time kcov is reenabled, used by - * kcov_remote_stop(), see the comment there. - */ - int sequence; + unsigned int remote_size; }; struct kcov_remote_area { - struct list_head list; - unsigned int size; + struct list_head list; + unsigned int size; }; struct kcov_remote { - u64 handle; - struct kcov *kcov; - struct hlist_node hnode; + u64 handle; + struct kcov *kcov; + struct hlist_node hnode; }; static DEFINE_SPINLOCK(kcov_remote_lock); @@ -89,14 +88,10 @@ static DEFINE_HASHTABLE(kcov_remote_map, 4); static struct list_head kcov_remote_areas = LIST_HEAD_INIT(kcov_remote_areas); struct kcov_percpu_data { - void *irq_area; - local_lock_t lock; - - unsigned int saved_mode; - unsigned int saved_size; - void *saved_area; - struct kcov *saved_kcov; - int saved_sequence; + void *irq_area; + local_lock_t lock; + struct kcov *saved_kcov; + struct kcov_state saved_state; }; static DEFINE_PER_CPU(struct kcov_percpu_data, kcov_percpu_data) = { @@ -149,7 +144,7 @@ static struct kcov_remote_area *kcov_remote_area_get(unsigned int size) /* Must be called with kcov_remote_lock locked. */ static void kcov_remote_area_put(struct kcov_remote_area *area, - unsigned int size) + unsigned int size) { INIT_LIST_HEAD(&area->list); area->size = size; @@ -171,7 +166,8 @@ static __always_inline bool in_softirq_really(void) return in_serving_softirq() && !in_hardirq() && !in_nmi(); } -static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t) +static notrace bool check_kcov_mode(enum kcov_mode needed_mode, + struct task_struct *t) { unsigned int mode; @@ -182,7 +178,7 @@ static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_stru */ if (!in_task() && !(in_softirq_really() && t->kcov_softirq)) return false; - mode = READ_ONCE(t->kcov_mode); + mode = READ_ONCE(t->kcov_state.mode); /* * There is some code that runs in interrupts but for which * in_interrupt() returns false (e.g. preempt_schedule_irq()). @@ -202,45 +198,145 @@ static notrace unsigned long canonicalize_ip(unsigned long ip) return ip; } +static void sanitizer_cov_write_subsequent(unsigned long *trace, int size, + unsigned long ip) +{ + /* The first 64-bit word is the number of subsequent PCs. */ + unsigned long pos = READ_ONCE(trace[0]) + 1; + + if (likely(pos < size)) { + /* + * Some early interrupt code could bypass check_kcov_mode() check + * and invoke __sanitizer_cov_trace_pc(). If such interrupt is + * raised between writing pc and updating pos, the pc could be + * overitten by the recursive __sanitizer_cov_trace_pc(). + * Update pos before writing pc to avoid such interleaving. + */ + WRITE_ONCE(trace[0], pos); + barrier(); + trace[pos] = ip; + } +} + /* * Entry point from instrumented code. * This is called once per basic-block/edge. */ +#ifndef CONFIG_KCOV_ENABLE_GUARDS void notrace __sanitizer_cov_trace_pc(void) { - struct task_struct *t; - unsigned long *area; + if (!check_kcov_mode(KCOV_MODE_TRACE_PC, current)) + return; + + sanitizer_cov_write_subsequent(current->kcov_state.s.trace, + current->kcov_state.s.trace_size, + canonicalize_ip(_RET_IP_)); +} +EXPORT_SYMBOL(__sanitizer_cov_trace_pc); +#else + +DEFINE_PER_CPU(u32, saved_index); +/* + * Assign an index to a guard variable that does not have one yet. + * For an unlikely case of a race with another task executing the same basic + * block, we keep a free index in a per-cpu variable. + * In an even less likely case a task can lose a race and get rescheduled onto a + * CPU that already has a saved index, discarding that index. This will result + * in an unused hole in a bitmap, but such events should not impact the overall + * memory consumption. + */ +static notrace u32 init_pc_guard(u32 *guard) +{ + /* If current CPU has a free index from the previous call, take it. */ + u32 index = this_cpu_xchg(saved_index, 0); + u32 old_guard; + + /* Otherwise, allocate a new index. */ + if (!index) + index = atomic_inc_return(&kcov_guard_max_index) - 1; + + /* Index cannot overflow. */ + WARN_ON(!index); + /* + * Make sure another task is not initializing the same guard + * concurrently. + */ + old_guard = cmpxchg(guard, 0, index); + if (old_guard) { + /* We lost the race, save the index for future use. */ + this_cpu_write(saved_index, index); + return old_guard; + } + return index; +} + +void notrace __sanitizer_cov_trace_pc_guard(u32 *guard) +{ + struct task_struct *t = current; unsigned long ip = canonicalize_ip(_RET_IP_); - unsigned long pos; + u32 pc_index; - t = current; - if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t)) + /* + * In KCOV_MODE_TRACE_PC mode, behave similarly to + * __sanitizer_cov_trace_pc(). + */ + if (check_kcov_mode(KCOV_MODE_TRACE_PC, t)) { + sanitizer_cov_write_subsequent(t->kcov_state.s.trace, + t->kcov_state.s.trace_size, ip); + return; + } + /* + * In KCOV_MODE_TRACE_UNIQUE_PC, deduplicate coverage on the fly. + * + * TODO: when collecting only sparse coverage (if exactly one of + * t->kcov_state.s.trace or t->kcov_state.s.bitmap is NULL), there is + * no easy way to snapshot the coverage map before calling + * ioctl(KCOV_DISABLE), and the latter may pollute the map. + * We may need a flag to atomically enable/disable coverage collection. + */ + if (!check_kcov_mode(KCOV_MODE_TRACE_UNIQUE_PC, t)) return; - area = t->kcov_area; - /* The first 64-bit word is the number of subsequent PCs. */ - pos = READ_ONCE(area[0]) + 1; - if (likely(pos < t->kcov_size)) { - /* Previously we write pc before updating pos. However, some - * early interrupt code could bypass check_kcov_mode() check - * and invoke __sanitizer_cov_trace_pc(). If such interrupt is - * raised between writing pc and updating pos, the pc could be - * overitten by the recursive __sanitizer_cov_trace_pc(). - * Update pos before writing pc to avoid such interleaving. - */ - WRITE_ONCE(area[0], pos); - barrier(); - area[pos] = ip; + pc_index = READ_ONCE(*guard); + if (!pc_index) + pc_index = init_pc_guard(guard); + + /* Use a bitmap for coverage deduplication. */ + if (t->kcov_state.s.bitmap) { + /* If this is known coverage, do not write the trace. */ + if (likely(pc_index < t->kcov_state.s.bitmap_size)) + if (test_and_set_bit(pc_index, t->kcov_state.s.bitmap)) + return; + /* If we got here and trace is allocated, write the new PC to it. */ + if (t->kcov_state.s.trace) + sanitizer_cov_write_subsequent( + t->kcov_state.s.trace, + t->kcov_state.s.trace_size, ip); + return; + } + /* + * At this point, trace must be valid. Since there is no bitmap, use the + * trace itself as a sparse array. + */ + if (pc_index < t->kcov_state.s.trace_size) { + t->kcov_state.s.trace[pc_index] = ip; } } -EXPORT_SYMBOL(__sanitizer_cov_trace_pc); +EXPORT_SYMBOL(__sanitizer_cov_trace_pc_guard); + +void notrace __sanitizer_cov_trace_pc_guard_init(uint32_t *start, + uint32_t *stop) +{ +} +EXPORT_SYMBOL(__sanitizer_cov_trace_pc_guard_init); +#endif #ifdef CONFIG_KCOV_ENABLE_COMPARISONS static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip) { - struct task_struct *t; - u64 *area; u64 count, start_index, end_pos, max_pos; + struct task_struct *t; + u64 *trace; t = current; if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t)) @@ -250,24 +346,24 @@ static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip) /* * We write all comparison arguments and types as u64. - * The buffer was allocated for t->kcov_size unsigned longs. + * The buffer was allocated for t->kcov_state.size unsigned longs. */ - area = (u64 *)t->kcov_area; - max_pos = t->kcov_size * sizeof(unsigned long); + trace = (u64 *)t->kcov_state.s.trace; + max_pos = t->kcov_state.s.size * sizeof(unsigned long); - count = READ_ONCE(area[0]); + count = READ_ONCE(trace[0]); /* Every record is KCOV_WORDS_PER_CMP 64-bit words. */ start_index = 1 + count * KCOV_WORDS_PER_CMP; end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64); if (likely(end_pos <= max_pos)) { - /* See comment in __sanitizer_cov_trace_pc(). */ - WRITE_ONCE(area[0], count + 1); + /* See comment in sanitizer_cov_write_subsequent(). */ + WRITE_ONCE(trace[0], count + 1); barrier(); - area[start_index] = type; - area[start_index + 1] = arg1; - area[start_index + 2] = arg2; - area[start_index + 3] = ip; + trace[start_index] = type; + trace[start_index + 1] = arg1; + trace[start_index + 2] = arg2; + trace[start_index + 3] = ip; } } @@ -354,33 +450,33 @@ EXPORT_SYMBOL(__sanitizer_cov_trace_switch); #endif /* ifdef CONFIG_KCOV_ENABLE_COMPARISONS */ static void kcov_start(struct task_struct *t, struct kcov *kcov, - unsigned int size, void *area, enum kcov_mode mode, - int sequence) + struct kcov_state *state) { - kcov_debug("t = %px, size = %u, area = %px\n", t, size, area); + kcov_debug("t = %px, size = %u, area = %px\n", t, state->s.size, + state->s.area); t->kcov = kcov; /* Cache in task struct for performance. */ - t->kcov_size = size; - t->kcov_area = area; - t->kcov_sequence = sequence; - /* See comment in check_kcov_mode(). */ + t->kcov_state.s = state->s; barrier(); - WRITE_ONCE(t->kcov_mode, mode); + /* See comment in check_kcov_mode(). */ + WRITE_ONCE(t->kcov_state.mode, state->mode); } static void kcov_stop(struct task_struct *t) { - WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED); + int saved_sequence = t->kcov_state.s.sequence; + + WRITE_ONCE(t->kcov_state.mode, KCOV_MODE_DISABLED); barrier(); t->kcov = NULL; - t->kcov_size = 0; - t->kcov_area = NULL; + t->kcov_state.s = (typeof(t->kcov_state.s)){ 0 }; + t->kcov_state.s.sequence = saved_sequence; } static void kcov_task_reset(struct task_struct *t) { kcov_stop(t); - t->kcov_sequence = 0; + t->kcov_state.s.sequence = 0; t->kcov_handle = 0; } @@ -393,10 +489,12 @@ void kcov_task_init(struct task_struct *t) static void kcov_reset(struct kcov *kcov) { kcov->t = NULL; - kcov->mode = KCOV_MODE_INIT; + kcov->state.mode = KCOV_MODE_INIT; kcov->remote = false; kcov->remote_size = 0; - kcov->sequence++; + kcov->state.s.bitmap = NULL; + kcov->state.s.bitmap_size = 0; + kcov->state.s.sequence++; } static void kcov_remote_reset(struct kcov *kcov) @@ -436,7 +534,7 @@ static void kcov_put(struct kcov *kcov) { if (refcount_dec_and_test(&kcov->refcount)) { kcov_remote_reset(kcov); - vfree(kcov->area); + vfree(kcov->state.s.area); kfree(kcov); } } @@ -493,8 +591,8 @@ static int kcov_mmap(struct file *filep, struct vm_area_struct *vma) unsigned long flags; spin_lock_irqsave(&kcov->lock, flags); - size = kcov->size * sizeof(unsigned long); - if (kcov->area == NULL || vma->vm_pgoff != 0 || + size = kcov->state.s.size * sizeof(unsigned long); + if (kcov->state.s.area == NULL || vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != size) { res = -EINVAL; goto exit; @@ -502,7 +600,7 @@ static int kcov_mmap(struct file *filep, struct vm_area_struct *vma) spin_unlock_irqrestore(&kcov->lock, flags); vm_flags_set(vma, VM_DONTEXPAND); for (off = 0; off < size; off += PAGE_SIZE) { - page = vmalloc_to_page(kcov->area + off); + page = vmalloc_to_page(kcov->state.s.area + off); res = vm_insert_page(vma, vma->vm_start + off, page); if (res) { pr_warn_once("kcov: vm_insert_page() failed\n"); @@ -522,8 +620,8 @@ static int kcov_open(struct inode *inode, struct file *filep) kcov = kzalloc(sizeof(*kcov), GFP_KERNEL); if (!kcov) return -ENOMEM; - kcov->mode = KCOV_MODE_DISABLED; - kcov->sequence = 1; + kcov->state.mode = KCOV_MODE_DISABLED; + kcov->state.s.sequence = 1; refcount_set(&kcov->refcount, 1); spin_lock_init(&kcov->lock); filep->private_data = kcov; @@ -540,6 +638,12 @@ static int kcov_get_mode(unsigned long arg) { if (arg == KCOV_TRACE_PC) return KCOV_MODE_TRACE_PC; + else if (arg == KCOV_TRACE_UNIQUE_PC) +#ifdef CONFIG_KCOV_ENABLE_GUARDS + return KCOV_MODE_TRACE_UNIQUE_PC; +#else + return -ENOTSUPP; +#endif else if (arg == KCOV_TRACE_CMP) #ifdef CONFIG_KCOV_ENABLE_COMPARISONS return KCOV_MODE_TRACE_CMP; @@ -558,22 +662,22 @@ static int kcov_get_mode(unsigned long arg) static void kcov_fault_in_area(struct kcov *kcov) { unsigned long stride = PAGE_SIZE / sizeof(unsigned long); - unsigned long *area = kcov->area; + unsigned long *area = kcov->state.s.area; unsigned long offset; - for (offset = 0; offset < kcov->size; offset += stride) + for (offset = 0; offset < kcov->state.s.size; offset += stride) READ_ONCE(area[offset]); } static inline bool kcov_check_handle(u64 handle, bool common_valid, - bool uncommon_valid, bool zero_valid) + bool uncommon_valid, bool zero_valid) { if (handle & ~(KCOV_SUBSYSTEM_MASK | KCOV_INSTANCE_MASK)) return false; switch (handle & KCOV_SUBSYSTEM_MASK) { case KCOV_SUBSYSTEM_COMMON: - return (handle & KCOV_INSTANCE_MASK) ? - common_valid : zero_valid; + return (handle & KCOV_INSTANCE_MASK) ? common_valid : + zero_valid; case KCOV_SUBSYSTEM_USB: return uncommon_valid; default: @@ -582,6 +686,53 @@ static inline bool kcov_check_handle(u64 handle, bool common_valid, return false; } +static long kcov_handle_unique_enable(struct kcov *kcov, + unsigned long bitmap_words) +{ + u32 total_bytes = 0, bitmap_bytes = 0; + struct task_struct *t; + + if (!IS_ENABLED(CONFIG_KCOV_ENABLE_GUARDS)) + return -ENOTSUPP; + if (kcov->state.mode != KCOV_MODE_INIT || !kcov->state.s.area) + return -EINVAL; + t = current; + if (kcov->t != NULL || t->kcov != NULL) + return -EBUSY; + + if (bitmap_words) { + bitmap_bytes = (u32)(bitmap_words * sizeof(unsigned long)); + if (bitmap_bytes > kcov->state.s.size) { + return -EINVAL; + } + kcov->state.s.bitmap_size = bitmap_bytes * 8; + kcov->state.s.bitmap = kcov->state.s.area; + total_bytes += bitmap_bytes; + } else { + kcov->state.s.bitmap_size = 0; + kcov->state.s.bitmap = NULL; + } + if (bitmap_bytes < kcov->state.s.size) { + kcov->state.s.trace_size = (kcov->state.s.size - bitmap_bytes) / + sizeof(unsigned long); + kcov->state.s.trace = + (unsigned long *)((char *)kcov->state.s.area + + bitmap_bytes); + } else { + kcov->state.s.trace_size = 0; + kcov->state.s.trace = NULL; + } + + kcov_fault_in_area(kcov); + kcov->state.mode = KCOV_MODE_TRACE_UNIQUE_PC; + kcov_start(t, kcov, &kcov->state); + kcov->t = t; + /* Put either in kcov_task_exit() or in KCOV_DISABLE. */ + kcov_get(kcov); + + return 0; +} + static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd, unsigned long arg) { @@ -600,7 +751,7 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd, * at task exit or voluntary by KCOV_DISABLE. After that it can * be enabled for another task. */ - if (kcov->mode != KCOV_MODE_INIT || !kcov->area) + if (kcov->state.mode != KCOV_MODE_INIT || !kcov->state.s.area) return -EINVAL; t = current; if (kcov->t != NULL || t->kcov != NULL) @@ -609,13 +760,14 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd, if (mode < 0) return mode; kcov_fault_in_area(kcov); - kcov->mode = mode; - kcov_start(t, kcov, kcov->size, kcov->area, kcov->mode, - kcov->sequence); + kcov->state.mode = mode; + kcov_start(t, kcov, &kcov->state); kcov->t = t; /* Put either in kcov_task_exit() or in KCOV_DISABLE. */ kcov_get(kcov); return 0; + case KCOV_UNIQUE_ENABLE: + return kcov_handle_unique_enable(kcov, arg); case KCOV_DISABLE: /* Disable coverage for the current task. */ unused = arg; @@ -628,7 +780,7 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd, kcov_put(kcov); return 0; case KCOV_REMOTE_ENABLE: - if (kcov->mode != KCOV_MODE_INIT || !kcov->area) + if (kcov->state.mode != KCOV_MODE_INIT || !kcov->state.s.area) return -EINVAL; t = current; if (kcov->t != NULL || t->kcov != NULL) @@ -640,42 +792,42 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd, if ((unsigned long)remote_arg->area_size > LONG_MAX / sizeof(unsigned long)) return -EINVAL; - kcov->mode = mode; + kcov->state.mode = mode; t->kcov = kcov; - t->kcov_mode = KCOV_MODE_REMOTE; + t->kcov_state.mode = KCOV_MODE_REMOTE; kcov->t = t; kcov->remote = true; kcov->remote_size = remote_arg->area_size; spin_lock_irqsave(&kcov_remote_lock, flags); for (i = 0; i < remote_arg->num_handles; i++) { - if (!kcov_check_handle(remote_arg->handles[i], - false, true, false)) { + if (!kcov_check_handle(remote_arg->handles[i], false, + true, false)) { spin_unlock_irqrestore(&kcov_remote_lock, - flags); + flags); kcov_disable(t, kcov); return -EINVAL; } remote = kcov_remote_add(kcov, remote_arg->handles[i]); if (IS_ERR(remote)) { spin_unlock_irqrestore(&kcov_remote_lock, - flags); + flags); kcov_disable(t, kcov); return PTR_ERR(remote); } } if (remote_arg->common_handle) { - if (!kcov_check_handle(remote_arg->common_handle, - true, false, false)) { + if (!kcov_check_handle(remote_arg->common_handle, true, + false, false)) { spin_unlock_irqrestore(&kcov_remote_lock, - flags); + flags); kcov_disable(t, kcov); return -EINVAL; } remote = kcov_remote_add(kcov, - remote_arg->common_handle); + remote_arg->common_handle); if (IS_ERR(remote)) { spin_unlock_irqrestore(&kcov_remote_lock, - flags); + flags); kcov_disable(t, kcov); return PTR_ERR(remote); } @@ -717,24 +869,28 @@ static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) if (area == NULL) return -ENOMEM; spin_lock_irqsave(&kcov->lock, flags); - if (kcov->mode != KCOV_MODE_DISABLED) { + if (kcov->state.mode != KCOV_MODE_DISABLED) { spin_unlock_irqrestore(&kcov->lock, flags); vfree(area); return -EBUSY; } - kcov->area = area; - kcov->size = size; - kcov->mode = KCOV_MODE_INIT; + kcov->state.s.area = area; + kcov->state.s.size = size; + kcov->state.s.trace = area; + kcov->state.s.trace_size = size; + kcov->state.mode = KCOV_MODE_INIT; spin_unlock_irqrestore(&kcov->lock, flags); return 0; case KCOV_REMOTE_ENABLE: - if (get_user(remote_num_handles, (unsigned __user *)(arg + - offsetof(struct kcov_remote_arg, num_handles)))) + if (get_user(remote_num_handles, + (unsigned __user *)(arg + + offsetof(struct kcov_remote_arg, + num_handles)))) return -EFAULT; if (remote_num_handles > KCOV_REMOTE_MAX_HANDLES) return -EINVAL; - remote_arg_size = struct_size(remote_arg, handles, - remote_num_handles); + remote_arg_size = + struct_size(remote_arg, handles, remote_num_handles); remote_arg = memdup_user((void __user *)arg, remote_arg_size); if (IS_ERR(remote_arg)) return PTR_ERR(remote_arg); @@ -758,11 +914,11 @@ static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) } static const struct file_operations kcov_fops = { - .open = kcov_open, - .unlocked_ioctl = kcov_ioctl, - .compat_ioctl = kcov_ioctl, - .mmap = kcov_mmap, - .release = kcov_close, + .open = kcov_open, + .unlocked_ioctl = kcov_ioctl, + .compat_ioctl = kcov_ioctl, + .mmap = kcov_mmap, + .release = kcov_close, }; /* @@ -818,13 +974,11 @@ static void kcov_remote_softirq_start(struct task_struct *t) struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data); unsigned int mode; - mode = READ_ONCE(t->kcov_mode); + mode = READ_ONCE(t->kcov_state.mode); barrier(); if (kcov_mode_enabled(mode)) { - data->saved_mode = mode; - data->saved_size = t->kcov_size; - data->saved_area = t->kcov_area; - data->saved_sequence = t->kcov_sequence; + data->saved_state.s = t->kcov_state.s; + data->saved_state.mode = mode; data->saved_kcov = t->kcov; kcov_stop(t); } @@ -835,13 +989,8 @@ static void kcov_remote_softirq_stop(struct task_struct *t) struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data); if (data->saved_kcov) { - kcov_start(t, data->saved_kcov, data->saved_size, - data->saved_area, data->saved_mode, - data->saved_sequence); - data->saved_mode = 0; - data->saved_size = 0; - data->saved_area = NULL; - data->saved_sequence = 0; + kcov_start(t, data->saved_kcov, &data->saved_state); + data->saved_state = (struct kcov_state){ 0 }; data->saved_kcov = NULL; } } @@ -850,12 +999,11 @@ void kcov_remote_start(u64 handle) { struct task_struct *t = current; struct kcov_remote *remote; + struct kcov_state state; + unsigned long flags; + unsigned int size; struct kcov *kcov; - unsigned int mode; void *area; - unsigned int size; - int sequence; - unsigned long flags; if (WARN_ON(!kcov_check_handle(handle, true, true, true))) return; @@ -868,8 +1016,8 @@ void kcov_remote_start(u64 handle) * Check that kcov_remote_start() is not called twice in background * threads nor called by user tasks (with enabled kcov). */ - mode = READ_ONCE(t->kcov_mode); - if (WARN_ON(in_task() && kcov_mode_enabled(mode))) { + state.mode = READ_ONCE(t->kcov_state.mode); + if (WARN_ON(in_task() && kcov_mode_enabled(state.mode))) { local_unlock_irqrestore(&kcov_percpu_data.lock, flags); return; } @@ -891,7 +1039,7 @@ void kcov_remote_start(u64 handle) return; } kcov_debug("handle = %llx, context: %s\n", handle, - in_task() ? "task" : "softirq"); + in_task() ? "task" : "softirq"); kcov = remote->kcov; /* Put in kcov_remote_stop(). */ kcov_get(kcov); @@ -899,8 +1047,8 @@ void kcov_remote_start(u64 handle) * Read kcov fields before unlock to prevent races with * KCOV_DISABLE / kcov_remote_reset(). */ - mode = kcov->mode; - sequence = kcov->sequence; + state.mode = kcov->state.mode; + state.s.sequence = kcov->state.s.sequence; if (in_task()) { size = kcov->remote_size; area = kcov_remote_area_get(size); @@ -921,22 +1069,25 @@ void kcov_remote_start(u64 handle) local_lock_irqsave(&kcov_percpu_data.lock, flags); } + state.s.area = area; + state.s.size = size; + state.s.trace = area; + state.s.trace_size = size; /* Reset coverage size. */ - *(u64 *)area = 0; + state.s.trace[0] = 0; if (in_serving_softirq()) { kcov_remote_softirq_start(t); t->kcov_softirq = 1; } - kcov_start(t, kcov, size, area, mode, sequence); + kcov_start(t, kcov, &state); local_unlock_irqrestore(&kcov_percpu_data.lock, flags); - } EXPORT_SYMBOL(kcov_remote_start); static void kcov_move_area(enum kcov_mode mode, void *dst_area, - unsigned int dst_area_size, void *src_area) + unsigned int dst_area_size, void *src_area) { u64 word_size = sizeof(unsigned long); u64 count_size, entry_size_log; @@ -944,8 +1095,8 @@ static void kcov_move_area(enum kcov_mode mode, void *dst_area, void *dst_entries, *src_entries; u64 dst_occupied, dst_free, bytes_to_move, entries_moved; - kcov_debug("%px %u <= %px %lu\n", - dst_area, dst_area_size, src_area, *(unsigned long *)src_area); + kcov_debug("%px %u <= %px %lu\n", dst_area, dst_area_size, src_area, + *(unsigned long *)src_area); switch (mode) { case KCOV_MODE_TRACE_PC: @@ -967,8 +1118,8 @@ static void kcov_move_area(enum kcov_mode mode, void *dst_area, } /* As arm can't divide u64 integers use log of entry size. */ - if (dst_len > ((dst_area_size * word_size - count_size) >> - entry_size_log)) + if (dst_len > + ((dst_area_size * word_size - count_size) >> entry_size_log)) return; dst_occupied = count_size + (dst_len << entry_size_log); dst_free = dst_area_size * word_size - dst_occupied; @@ -996,8 +1147,8 @@ void kcov_remote_stop(void) struct task_struct *t = current; struct kcov *kcov; unsigned int mode; - void *area; - unsigned int size; + void *area, *trace; + unsigned int size, trace_size; int sequence; unsigned long flags; @@ -1006,7 +1157,7 @@ void kcov_remote_stop(void) local_lock_irqsave(&kcov_percpu_data.lock, flags); - mode = READ_ONCE(t->kcov_mode); + mode = READ_ONCE(t->kcov_state.mode); barrier(); if (!kcov_mode_enabled(mode)) { local_unlock_irqrestore(&kcov_percpu_data.lock, flags); @@ -1027,9 +1178,11 @@ void kcov_remote_stop(void) } kcov = t->kcov; - area = t->kcov_area; - size = t->kcov_size; - sequence = t->kcov_sequence; + area = t->kcov_state.s.area; + size = t->kcov_state.s.size; + trace = t->kcov_state.s.trace; + trace_size = t->kcov_state.s.trace_size; + sequence = t->kcov_state.s.sequence; kcov_stop(t); if (in_serving_softirq()) { @@ -1042,8 +1195,9 @@ void kcov_remote_stop(void) * KCOV_DISABLE could have been called between kcov_remote_start() * and kcov_remote_stop(), hence the sequence check. */ - if (sequence == kcov->sequence && kcov->remote) - kcov_move_area(kcov->mode, kcov->area, kcov->size, area); + if (sequence == kcov->state.s.sequence && kcov->remote) + kcov_move_area(kcov->state.mode, kcov->state.s.area, + kcov->state.s.size, area); spin_unlock(&kcov->lock); if (in_task()) { @@ -1086,10 +1240,10 @@ static void __init selftest(void) * potentially traced functions in this region. */ start = jiffies; - current->kcov_mode = KCOV_MODE_TRACE_PC; + current->kcov_state.mode = KCOV_MODE_TRACE_PC; while ((jiffies - start) * MSEC_PER_SEC / HZ < 300) ; - current->kcov_mode = 0; + current->kcov_state.mode = 0; pr_err("done running self test\n"); } #endif @@ -1100,7 +1254,8 @@ static int __init kcov_init(void) for_each_possible_cpu(cpu) { void *area = vmalloc_node(CONFIG_KCOV_IRQ_AREA_SIZE * - sizeof(unsigned long), cpu_to_node(cpu)); + sizeof(unsigned long), + cpu_to_node(cpu)); if (!area) return -ENOMEM; per_cpu_ptr(&kcov_percpu_data, cpu)->irq_area = area; diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1af972a92d06f6..6712d74ba7e1d1 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2135,6 +2135,8 @@ config ARCH_HAS_KCOV config CC_HAS_SANCOV_TRACE_PC def_bool $(cc-option,-fsanitize-coverage=trace-pc) +config CC_HAS_SANCOV_TRACE_PC_GUARD + def_bool $(cc-option,-fsanitize-coverage=trace-pc-guard) config KCOV bool "Code coverage for fuzzing" @@ -2151,6 +2153,20 @@ config KCOV For more details, see Documentation/dev-tools/kcov.rst. +config KCOV_ENABLE_GUARDS + depends on KCOV + depends on CC_HAS_SANCOV_TRACE_PC_GUARD + bool "Use fsanitize-coverage=trace-pc-guard for kcov" + help + Use coverage guards instrumentation for kcov, passing + -fsanitize-coverage=trace-pc-guard to the compiler. + + Every coverage callback is associated with a global variable that + allows to efficiently deduplicate coverage at collection time. + + This comes at a cost of increased binary size (4 bytes of .bss + per basic block, plus 1-2 instructions to pass an extra parameter). + config KCOV_ENABLE_COMPARISONS bool "Enable comparison operands collection by KCOV" depends on KCOV diff --git a/scripts/Makefile.kcov b/scripts/Makefile.kcov index 67e8cfe3474b7d..ec63d471d57739 100644 --- a/scripts/Makefile.kcov +++ b/scripts/Makefile.kcov @@ -1,5 +1,9 @@ # SPDX-License-Identifier: GPL-2.0-only +ifeq ($(CONFIG_KCOV_ENABLE_GUARDS),y) +kcov-flags-$(CONFIG_CC_HAS_SANCOV_TRACE_PC_GUARD) += -fsanitize-coverage=trace-pc-guard +else kcov-flags-$(CONFIG_CC_HAS_SANCOV_TRACE_PC) += -fsanitize-coverage=trace-pc +endif kcov-flags-$(CONFIG_KCOV_ENABLE_COMPARISONS) += -fsanitize-coverage=trace-cmp kcov-flags-$(CONFIG_GCC_PLUGIN_SANCOV) += -fplugin=$(objtree)/scripts/gcc-plugins/sancov_plugin.so diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 753dbc4f819853..a201957482bf48 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -1148,6 +1148,7 @@ static const char *uaccess_safe_builtin[] = { "write_comp_data", "check_kcov_mode", "__sanitizer_cov_trace_pc", + "__sanitizer_cov_trace_pc_guard", "__sanitizer_cov_trace_const_cmp1", "__sanitizer_cov_trace_const_cmp2", "__sanitizer_cov_trace_const_cmp4",