Skip to content

Commit

Permalink
kcov: add ioctl(KCOV_UNIQUE_ENABLE)
Browse files Browse the repository at this point in the history
ioctl(KCOV_UNIQUE_ENABLE) enables collection of deduplicated coverage
in the presence of CONFIG_KCOV_ENABLE_GUARDS.

The buffer shared with the userspace is divided in two parts, one holding
a bitmap, and the other one being the trace. The single parameter of
ioctl(KCOV_UNIQUE_ENABLE) determines the number of words used for the bitmap.

Each __sanitizer_cov_trace_pc_guard() instrumentation hook receives a
pointer to a unique guard variable. Upon the first call of each hook,
the guard variable is initialized with a unique integer, which is used
to map those hooks to bits in the bitmap. In the new coverage collection mode,
the kernel first checks whether the bit corresponding to a particular hook
is set, and then, if it is not, the PC is written into the trace buffer,
and the bit is set.

Signed-off-by: Alexander Potapenko <[email protected]>
  • Loading branch information
ramosian-glider committed Mar 4, 2025
1 parent 40153c0 commit 127e5b3
Show file tree
Hide file tree
Showing 4 changed files with 161 additions and 4 deletions.
8 changes: 8 additions & 0 deletions include/linux/kcov-state.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,14 @@ struct kcov_state {
/* Buffer for coverage collection, shared with the userspace. */
unsigned long *trace;

/* Size of the bitmap (in bits). */
unsigned int bitmap_size;
/*
* Bitmap for coverage deduplication, shared with the
* userspace.
*/
unsigned long *bitmap;

/*
* KCOV sequence number: incremented each time kcov is
* reenabled, used by kcov_remote_stop(), see the comment there.
Expand Down
1 change: 1 addition & 0 deletions include/linux/kcov.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ enum kcov_mode {
KCOV_MODE_TRACE_CMP = 3,
/* The process owns a KCOV remote reference. */
KCOV_MODE_REMOTE = 4,
KCOV_MODE_TRACE_UNIQUE_PC = 5,
};

#define KCOV_IN_CTXSW (1 << 30)
Expand Down
3 changes: 3 additions & 0 deletions include/uapi/linux/kcov.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ struct kcov_remote_arg {
#define KCOV_ENABLE _IO('c', 100)
#define KCOV_DISABLE _IO('c', 101)
#define KCOV_REMOTE_ENABLE _IOW('c', 102, struct kcov_remote_arg)
#define KCOV_UNIQUE_ENABLE _IOR('c', 103, unsigned long)

enum {
/*
Expand All @@ -35,6 +36,8 @@ enum {
KCOV_TRACE_PC = 0,
/* Collecting comparison operands mode. */
KCOV_TRACE_CMP = 1,
/* Deduplicate collected PCs. */
KCOV_TRACE_UNIQUE_PC = 2,
};

/*
Expand Down
153 changes: 149 additions & 4 deletions kernel/kcov.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,11 @@

#include <asm/setup.h>

#ifdef CONFIG_KCOV_ENABLE_GUARDS
atomic_t kcov_guard_max_index = ATOMIC_INIT(1);
extern u32 __sancov_guards_start, __sancov_guards_end;
#endif

#define kcov_debug(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__)

/* Number of 64-bit words written per one comparison: */
Expand Down Expand Up @@ -229,14 +234,98 @@ void notrace __sanitizer_cov_trace_pc(void)
}
EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
#else

DEFINE_PER_CPU(u32, saved_index);
/*
* Assign an index to a guard variable that does not have one yet.
* For an unlikely case of a race with another task executing the same basic
* block, we keep a free index in a per-cpu variable.
* In an even less likely case a task can lose a race and get rescheduled onto a
* CPU that already has a saved index, discarding that index. This will result
* in an unused hole in a bitmap, but such events should not impact the overall
* memory consumption.
*/
static notrace u32 init_pc_guard(u32 *guard)
{
preempt_disable_notrace();
/* If current CPU has a free index from the previous call, take it. */
u32 index = this_cpu_read(saved_index);
u32 old_guard;

/* Otherwise, allocate a new index. */
if (!index)
index = atomic_inc_return(&kcov_guard_max_index) - 1;

/* Index cannot overflow. */
WARN_ON(!index);
/*
* Make sure another task is not initializing the same guard
* concurrently.
*/
old_guard = cmpxchg(guard, 0, index);
if (old_guard) {
/* We lost the race, save the index for future use. */
this_cpu_write(saved_index, index);
preempt_enable_notrace();
return old_guard;
}
/* Otherwise we won the race, discard the saved index. */
this_cpu_write(saved_index, 0);
preempt_enable_notrace();
return index;
}

void notrace __sanitizer_cov_trace_pc_guard(u32 *guard)
{
if (!check_kcov_mode(KCOV_MODE_TRACE_PC, current))
struct task_struct *t = current;
unsigned long ip = canonicalize_ip(_RET_IP_);
u32 pc_index;

/*
* In KCOV_MODE_TRACE_PC mode, behave similarly to
* __sanitizer_cov_trace_pc().
*/
if (check_kcov_mode(KCOV_MODE_TRACE_PC, t)) {
sanitizer_cov_write_subsequent(t->kcov_state.s.trace,
t->kcov_state.s.trace_size, ip);
return;
}
/*
* In KCOV_MODE_TRACE_UNIQUE_PC, deduplicate coverage on the fly.
*
* TODO: when collecting only sparse coverage (if exactly one of
* t->kcov_state.s.trace or t->kcov_state.s.bitmap is NULL), there is
* no easy way to snapshot the coverage map before calling
* ioctl(KCOV_DISABLE), and the latter may pollute the map.
* We may need a flag to atomically enable/disable coverage collection.
*/
if (!check_kcov_mode(KCOV_MODE_TRACE_UNIQUE_PC, t))
return;

sanitizer_cov_write_subsequent(current->kcov_state.s.trace,
current->kcov_state.s.trace_size,
canonicalize_ip(_RET_IP_));
pc_index = READ_ONCE(*guard);
if (!pc_index)
pc_index = init_pc_guard(guard);

/* Use a bitmap for coverage deduplication. */
if (t->kcov_state.s.bitmap) {
/* If this is known coverage, do not write the trace. */
if (likely(pc_index < t->kcov_state.s.bitmap_size))
if (test_and_set_bit(pc_index, t->kcov_state.s.bitmap))
return;
/* If we got here and trace is allocated, write the new PC to it. */
if (t->kcov_state.s.trace)
sanitizer_cov_write_subsequent(
t->kcov_state.s.trace,
t->kcov_state.s.trace_size, ip);
return;
}
/*
* At this point, trace must be valid. Since there is no bitmap, use the
* trace itself as a sparse array.
*/
if (pc_index < t->kcov_state.s.trace_size) {
t->kcov_state.s.trace[pc_index] = ip;
}
}
EXPORT_SYMBOL(__sanitizer_cov_trace_pc_guard);

Expand Down Expand Up @@ -552,6 +641,13 @@ static int kcov_get_mode(unsigned long arg)
{
if (arg == KCOV_TRACE_PC)
return KCOV_MODE_TRACE_PC;
else if (arg == KCOV_TRACE_UNIQUE_PC)
#ifdef CONFIG_KCOV_ENABLE_GUARDS
return KCOV_MODE_TRACE_UNIQUE_PC;
#else
return -ENOTSUPP;

#endif
else if (arg == KCOV_TRACE_CMP)
#ifdef CONFIG_KCOV_ENABLE_COMPARISONS
return KCOV_MODE_TRACE_CMP;
Expand Down Expand Up @@ -594,6 +690,53 @@ static inline bool kcov_check_handle(u64 handle, bool common_valid,
return false;
}

static long kcov_handle_unique_enable(struct kcov *kcov,
unsigned long bitmap_words)
{
u32 total_bytes = 0, bitmap_bytes = 0;
struct task_struct *t;

if (!IS_ENABLED(CONFIG_KCOV_ENABLE_GUARDS))
return -ENOTSUPP;
if (kcov->state.mode != KCOV_MODE_INIT || !kcov->state.s.area)
return -EINVAL;
t = current;
if (kcov->t != NULL || t->kcov != NULL)
return -EBUSY;

if (bitmap_words) {
bitmap_bytes = (u32)(bitmap_words * sizeof(unsigned long));
if (bitmap_bytes > kcov->state.s.size) {
return -EINVAL;
}
kcov->state.s.bitmap_size = bitmap_bytes * 8;
kcov->state.s.bitmap = kcov->state.s.area;
total_bytes += bitmap_bytes;
} else {
kcov->state.s.bitmap_size = 0;
kcov->state.s.bitmap = NULL;
}
if (bitmap_bytes < kcov->state.s.size) {
kcov->state.s.trace_size = (kcov->state.s.size - bitmap_bytes) /
sizeof(unsigned long);
kcov->state.s.trace =
(unsigned long *)((char *)kcov->state.s.area +
bitmap_bytes);
} else {
kcov->state.s.trace_size = 0;
kcov->state.s.trace = NULL;
}

kcov_fault_in_area(kcov);
kcov->state.mode = KCOV_MODE_TRACE_UNIQUE_PC;
kcov_start(t, kcov, &kcov->state);
kcov->t = t;
/* Put either in kcov_task_exit() or in KCOV_DISABLE. */
kcov_get(kcov);

return 0;
}

static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
unsigned long arg)
{
Expand Down Expand Up @@ -627,6 +770,8 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
/* Put either in kcov_task_exit() or in KCOV_DISABLE. */
kcov_get(kcov);
return 0;
case KCOV_UNIQUE_ENABLE:
return kcov_handle_unique_enable(kcov, arg);
case KCOV_DISABLE:
/* Disable coverage for the current task. */
unused = arg;
Expand Down

0 comments on commit 127e5b3

Please sign in to comment.