1*4882a593Smuzhiyunkcov: code coverage for fuzzing 2*4882a593Smuzhiyun=============================== 3*4882a593Smuzhiyun 4*4882a593Smuzhiyunkcov exposes kernel code coverage information in a form suitable for coverage- 5*4882a593Smuzhiyunguided fuzzing (randomized testing). Coverage data of a running kernel is 6*4882a593Smuzhiyunexported via the "kcov" debugfs file. Coverage collection is enabled on a task 7*4882a593Smuzhiyunbasis, and thus it can capture precise coverage of a single system call. 8*4882a593Smuzhiyun 9*4882a593SmuzhiyunNote that kcov does not aim to collect as much coverage as possible. It aims 10*4882a593Smuzhiyunto collect more or less stable coverage that is function of syscall inputs. 11*4882a593SmuzhiyunTo achieve this goal it does not collect coverage in soft/hard interrupts 12*4882a593Smuzhiyunand instrumentation of some inherently non-deterministic parts of kernel is 13*4882a593Smuzhiyundisabled (e.g. scheduler, locking). 14*4882a593Smuzhiyun 15*4882a593Smuzhiyunkcov is also able to collect comparison operands from the instrumented code 16*4882a593Smuzhiyun(this feature currently requires that the kernel is compiled with clang). 17*4882a593Smuzhiyun 18*4882a593SmuzhiyunPrerequisites 19*4882a593Smuzhiyun------------- 20*4882a593Smuzhiyun 21*4882a593SmuzhiyunConfigure the kernel with:: 22*4882a593Smuzhiyun 23*4882a593Smuzhiyun CONFIG_KCOV=y 24*4882a593Smuzhiyun 25*4882a593SmuzhiyunCONFIG_KCOV requires gcc 6.1.0 or later. 26*4882a593Smuzhiyun 27*4882a593SmuzhiyunIf the comparison operands need to be collected, set:: 28*4882a593Smuzhiyun 29*4882a593Smuzhiyun CONFIG_KCOV_ENABLE_COMPARISONS=y 30*4882a593Smuzhiyun 31*4882a593SmuzhiyunProfiling data will only become accessible once debugfs has been mounted:: 32*4882a593Smuzhiyun 33*4882a593Smuzhiyun mount -t debugfs none /sys/kernel/debug 34*4882a593Smuzhiyun 35*4882a593SmuzhiyunCoverage collection 36*4882a593Smuzhiyun------------------- 37*4882a593Smuzhiyun 38*4882a593SmuzhiyunThe following program demonstrates coverage collection from within a test 39*4882a593Smuzhiyunprogram using kcov: 40*4882a593Smuzhiyun 41*4882a593Smuzhiyun.. code-block:: c 42*4882a593Smuzhiyun 43*4882a593Smuzhiyun #include <stdio.h> 44*4882a593Smuzhiyun #include <stddef.h> 45*4882a593Smuzhiyun #include <stdint.h> 46*4882a593Smuzhiyun #include <stdlib.h> 47*4882a593Smuzhiyun #include <sys/types.h> 48*4882a593Smuzhiyun #include <sys/stat.h> 49*4882a593Smuzhiyun #include <sys/ioctl.h> 50*4882a593Smuzhiyun #include <sys/mman.h> 51*4882a593Smuzhiyun #include <unistd.h> 52*4882a593Smuzhiyun #include <fcntl.h> 53*4882a593Smuzhiyun 54*4882a593Smuzhiyun #define KCOV_INIT_TRACE _IOR('c', 1, unsigned long) 55*4882a593Smuzhiyun #define KCOV_ENABLE _IO('c', 100) 56*4882a593Smuzhiyun #define KCOV_DISABLE _IO('c', 101) 57*4882a593Smuzhiyun #define COVER_SIZE (64<<10) 58*4882a593Smuzhiyun 59*4882a593Smuzhiyun #define KCOV_TRACE_PC 0 60*4882a593Smuzhiyun #define KCOV_TRACE_CMP 1 61*4882a593Smuzhiyun 62*4882a593Smuzhiyun int main(int argc, char **argv) 63*4882a593Smuzhiyun { 64*4882a593Smuzhiyun int fd; 65*4882a593Smuzhiyun unsigned long *cover, n, i; 66*4882a593Smuzhiyun 67*4882a593Smuzhiyun /* A single fd descriptor allows coverage collection on a single 68*4882a593Smuzhiyun * thread. 69*4882a593Smuzhiyun */ 70*4882a593Smuzhiyun fd = open("/sys/kernel/debug/kcov", O_RDWR); 71*4882a593Smuzhiyun if (fd == -1) 72*4882a593Smuzhiyun perror("open"), exit(1); 73*4882a593Smuzhiyun /* Setup trace mode and trace size. */ 74*4882a593Smuzhiyun if (ioctl(fd, KCOV_INIT_TRACE, COVER_SIZE)) 75*4882a593Smuzhiyun perror("ioctl"), exit(1); 76*4882a593Smuzhiyun /* Mmap buffer shared between kernel- and user-space. */ 77*4882a593Smuzhiyun cover = (unsigned long*)mmap(NULL, COVER_SIZE * sizeof(unsigned long), 78*4882a593Smuzhiyun PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); 79*4882a593Smuzhiyun if ((void*)cover == MAP_FAILED) 80*4882a593Smuzhiyun perror("mmap"), exit(1); 81*4882a593Smuzhiyun /* Enable coverage collection on the current thread. */ 82*4882a593Smuzhiyun if (ioctl(fd, KCOV_ENABLE, KCOV_TRACE_PC)) 83*4882a593Smuzhiyun perror("ioctl"), exit(1); 84*4882a593Smuzhiyun /* Reset coverage from the tail of the ioctl() call. */ 85*4882a593Smuzhiyun __atomic_store_n(&cover[0], 0, __ATOMIC_RELAXED); 86*4882a593Smuzhiyun /* That's the target syscal call. */ 87*4882a593Smuzhiyun read(-1, NULL, 0); 88*4882a593Smuzhiyun /* Read number of PCs collected. */ 89*4882a593Smuzhiyun n = __atomic_load_n(&cover[0], __ATOMIC_RELAXED); 90*4882a593Smuzhiyun for (i = 0; i < n; i++) 91*4882a593Smuzhiyun printf("0x%lx\n", cover[i + 1]); 92*4882a593Smuzhiyun /* Disable coverage collection for the current thread. After this call 93*4882a593Smuzhiyun * coverage can be enabled for a different thread. 94*4882a593Smuzhiyun */ 95*4882a593Smuzhiyun if (ioctl(fd, KCOV_DISABLE, 0)) 96*4882a593Smuzhiyun perror("ioctl"), exit(1); 97*4882a593Smuzhiyun /* Free resources. */ 98*4882a593Smuzhiyun if (munmap(cover, COVER_SIZE * sizeof(unsigned long))) 99*4882a593Smuzhiyun perror("munmap"), exit(1); 100*4882a593Smuzhiyun if (close(fd)) 101*4882a593Smuzhiyun perror("close"), exit(1); 102*4882a593Smuzhiyun return 0; 103*4882a593Smuzhiyun } 104*4882a593Smuzhiyun 105*4882a593SmuzhiyunAfter piping through addr2line output of the program looks as follows:: 106*4882a593Smuzhiyun 107*4882a593Smuzhiyun SyS_read 108*4882a593Smuzhiyun fs/read_write.c:562 109*4882a593Smuzhiyun __fdget_pos 110*4882a593Smuzhiyun fs/file.c:774 111*4882a593Smuzhiyun __fget_light 112*4882a593Smuzhiyun fs/file.c:746 113*4882a593Smuzhiyun __fget_light 114*4882a593Smuzhiyun fs/file.c:750 115*4882a593Smuzhiyun __fget_light 116*4882a593Smuzhiyun fs/file.c:760 117*4882a593Smuzhiyun __fdget_pos 118*4882a593Smuzhiyun fs/file.c:784 119*4882a593Smuzhiyun SyS_read 120*4882a593Smuzhiyun fs/read_write.c:562 121*4882a593Smuzhiyun 122*4882a593SmuzhiyunIf a program needs to collect coverage from several threads (independently), 123*4882a593Smuzhiyunit needs to open /sys/kernel/debug/kcov in each thread separately. 124*4882a593Smuzhiyun 125*4882a593SmuzhiyunThe interface is fine-grained to allow efficient forking of test processes. 126*4882a593SmuzhiyunThat is, a parent process opens /sys/kernel/debug/kcov, enables trace mode, 127*4882a593Smuzhiyunmmaps coverage buffer and then forks child processes in a loop. Child processes 128*4882a593Smuzhiyunonly need to enable coverage (disable happens automatically on thread end). 129*4882a593Smuzhiyun 130*4882a593SmuzhiyunComparison operands collection 131*4882a593Smuzhiyun------------------------------ 132*4882a593Smuzhiyun 133*4882a593SmuzhiyunComparison operands collection is similar to coverage collection: 134*4882a593Smuzhiyun 135*4882a593Smuzhiyun.. code-block:: c 136*4882a593Smuzhiyun 137*4882a593Smuzhiyun /* Same includes and defines as above. */ 138*4882a593Smuzhiyun 139*4882a593Smuzhiyun /* Number of 64-bit words per record. */ 140*4882a593Smuzhiyun #define KCOV_WORDS_PER_CMP 4 141*4882a593Smuzhiyun 142*4882a593Smuzhiyun /* 143*4882a593Smuzhiyun * The format for the types of collected comparisons. 144*4882a593Smuzhiyun * 145*4882a593Smuzhiyun * Bit 0 shows whether one of the arguments is a compile-time constant. 146*4882a593Smuzhiyun * Bits 1 & 2 contain log2 of the argument size, up to 8 bytes. 147*4882a593Smuzhiyun */ 148*4882a593Smuzhiyun 149*4882a593Smuzhiyun #define KCOV_CMP_CONST (1 << 0) 150*4882a593Smuzhiyun #define KCOV_CMP_SIZE(n) ((n) << 1) 151*4882a593Smuzhiyun #define KCOV_CMP_MASK KCOV_CMP_SIZE(3) 152*4882a593Smuzhiyun 153*4882a593Smuzhiyun int main(int argc, char **argv) 154*4882a593Smuzhiyun { 155*4882a593Smuzhiyun int fd; 156*4882a593Smuzhiyun uint64_t *cover, type, arg1, arg2, is_const, size; 157*4882a593Smuzhiyun unsigned long n, i; 158*4882a593Smuzhiyun 159*4882a593Smuzhiyun fd = open("/sys/kernel/debug/kcov", O_RDWR); 160*4882a593Smuzhiyun if (fd == -1) 161*4882a593Smuzhiyun perror("open"), exit(1); 162*4882a593Smuzhiyun if (ioctl(fd, KCOV_INIT_TRACE, COVER_SIZE)) 163*4882a593Smuzhiyun perror("ioctl"), exit(1); 164*4882a593Smuzhiyun /* 165*4882a593Smuzhiyun * Note that the buffer pointer is of type uint64_t*, because all 166*4882a593Smuzhiyun * the comparison operands are promoted to uint64_t. 167*4882a593Smuzhiyun */ 168*4882a593Smuzhiyun cover = (uint64_t *)mmap(NULL, COVER_SIZE * sizeof(unsigned long), 169*4882a593Smuzhiyun PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); 170*4882a593Smuzhiyun if ((void*)cover == MAP_FAILED) 171*4882a593Smuzhiyun perror("mmap"), exit(1); 172*4882a593Smuzhiyun /* Note KCOV_TRACE_CMP instead of KCOV_TRACE_PC. */ 173*4882a593Smuzhiyun if (ioctl(fd, KCOV_ENABLE, KCOV_TRACE_CMP)) 174*4882a593Smuzhiyun perror("ioctl"), exit(1); 175*4882a593Smuzhiyun __atomic_store_n(&cover[0], 0, __ATOMIC_RELAXED); 176*4882a593Smuzhiyun read(-1, NULL, 0); 177*4882a593Smuzhiyun /* Read number of comparisons collected. */ 178*4882a593Smuzhiyun n = __atomic_load_n(&cover[0], __ATOMIC_RELAXED); 179*4882a593Smuzhiyun for (i = 0; i < n; i++) { 180*4882a593Smuzhiyun type = cover[i * KCOV_WORDS_PER_CMP + 1]; 181*4882a593Smuzhiyun /* arg1 and arg2 - operands of the comparison. */ 182*4882a593Smuzhiyun arg1 = cover[i * KCOV_WORDS_PER_CMP + 2]; 183*4882a593Smuzhiyun arg2 = cover[i * KCOV_WORDS_PER_CMP + 3]; 184*4882a593Smuzhiyun /* ip - caller address. */ 185*4882a593Smuzhiyun ip = cover[i * KCOV_WORDS_PER_CMP + 4]; 186*4882a593Smuzhiyun /* size of the operands. */ 187*4882a593Smuzhiyun size = 1 << ((type & KCOV_CMP_MASK) >> 1); 188*4882a593Smuzhiyun /* is_const - true if either operand is a compile-time constant.*/ 189*4882a593Smuzhiyun is_const = type & KCOV_CMP_CONST; 190*4882a593Smuzhiyun printf("ip: 0x%lx type: 0x%lx, arg1: 0x%lx, arg2: 0x%lx, " 191*4882a593Smuzhiyun "size: %lu, %s\n", 192*4882a593Smuzhiyun ip, type, arg1, arg2, size, 193*4882a593Smuzhiyun is_const ? "const" : "non-const"); 194*4882a593Smuzhiyun } 195*4882a593Smuzhiyun if (ioctl(fd, KCOV_DISABLE, 0)) 196*4882a593Smuzhiyun perror("ioctl"), exit(1); 197*4882a593Smuzhiyun /* Free resources. */ 198*4882a593Smuzhiyun if (munmap(cover, COVER_SIZE * sizeof(unsigned long))) 199*4882a593Smuzhiyun perror("munmap"), exit(1); 200*4882a593Smuzhiyun if (close(fd)) 201*4882a593Smuzhiyun perror("close"), exit(1); 202*4882a593Smuzhiyun return 0; 203*4882a593Smuzhiyun } 204*4882a593Smuzhiyun 205*4882a593SmuzhiyunNote that the kcov modes (coverage collection or comparison operands) are 206*4882a593Smuzhiyunmutually exclusive. 207*4882a593Smuzhiyun 208*4882a593SmuzhiyunRemote coverage collection 209*4882a593Smuzhiyun-------------------------- 210*4882a593Smuzhiyun 211*4882a593SmuzhiyunWith KCOV_ENABLE coverage is collected only for syscalls that are issued 212*4882a593Smuzhiyunfrom the current process. With KCOV_REMOTE_ENABLE it's possible to collect 213*4882a593Smuzhiyuncoverage for arbitrary parts of the kernel code, provided that those parts 214*4882a593Smuzhiyunare annotated with kcov_remote_start()/kcov_remote_stop(). 215*4882a593Smuzhiyun 216*4882a593SmuzhiyunThis allows to collect coverage from two types of kernel background 217*4882a593Smuzhiyunthreads: the global ones, that are spawned during kernel boot in a limited 218*4882a593Smuzhiyunnumber of instances (e.g. one USB hub_event() worker thread is spawned per 219*4882a593SmuzhiyunUSB HCD); and the local ones, that are spawned when a user interacts with 220*4882a593Smuzhiyunsome kernel interface (e.g. vhost workers); as well as from soft 221*4882a593Smuzhiyuninterrupts. 222*4882a593Smuzhiyun 223*4882a593SmuzhiyunTo enable collecting coverage from a global background thread or from a 224*4882a593Smuzhiyunsoftirq, a unique global handle must be assigned and passed to the 225*4882a593Smuzhiyuncorresponding kcov_remote_start() call. Then a userspace process can pass 226*4882a593Smuzhiyuna list of such handles to the KCOV_REMOTE_ENABLE ioctl in the handles 227*4882a593Smuzhiyunarray field of the kcov_remote_arg struct. This will attach the used kcov 228*4882a593Smuzhiyundevice to the code sections, that are referenced by those handles. 229*4882a593Smuzhiyun 230*4882a593SmuzhiyunSince there might be many local background threads spawned from different 231*4882a593Smuzhiyunuserspace processes, we can't use a single global handle per annotation. 232*4882a593SmuzhiyunInstead, the userspace process passes a non-zero handle through the 233*4882a593Smuzhiyuncommon_handle field of the kcov_remote_arg struct. This common handle gets 234*4882a593Smuzhiyunsaved to the kcov_handle field in the current task_struct and needs to be 235*4882a593Smuzhiyunpassed to the newly spawned threads via custom annotations. Those threads 236*4882a593Smuzhiyunshould in turn be annotated with kcov_remote_start()/kcov_remote_stop(). 237*4882a593Smuzhiyun 238*4882a593SmuzhiyunInternally kcov stores handles as u64 integers. The top byte of a handle 239*4882a593Smuzhiyunis used to denote the id of a subsystem that this handle belongs to, and 240*4882a593Smuzhiyunthe lower 4 bytes are used to denote the id of a thread instance within 241*4882a593Smuzhiyunthat subsystem. A reserved value 0 is used as a subsystem id for common 242*4882a593Smuzhiyunhandles as they don't belong to a particular subsystem. The bytes 4-7 are 243*4882a593Smuzhiyuncurrently reserved and must be zero. In the future the number of bytes 244*4882a593Smuzhiyunused for the subsystem or handle ids might be increased. 245*4882a593Smuzhiyun 246*4882a593SmuzhiyunWhen a particular userspace proccess collects coverage via a common 247*4882a593Smuzhiyunhandle, kcov will collect coverage for each code section that is annotated 248*4882a593Smuzhiyunto use the common handle obtained as kcov_handle from the current 249*4882a593Smuzhiyuntask_struct. However non common handles allow to collect coverage 250*4882a593Smuzhiyunselectively from different subsystems. 251*4882a593Smuzhiyun 252*4882a593Smuzhiyun.. code-block:: c 253*4882a593Smuzhiyun 254*4882a593Smuzhiyun struct kcov_remote_arg { 255*4882a593Smuzhiyun __u32 trace_mode; 256*4882a593Smuzhiyun __u32 area_size; 257*4882a593Smuzhiyun __u32 num_handles; 258*4882a593Smuzhiyun __aligned_u64 common_handle; 259*4882a593Smuzhiyun __aligned_u64 handles[0]; 260*4882a593Smuzhiyun }; 261*4882a593Smuzhiyun 262*4882a593Smuzhiyun #define KCOV_INIT_TRACE _IOR('c', 1, unsigned long) 263*4882a593Smuzhiyun #define KCOV_DISABLE _IO('c', 101) 264*4882a593Smuzhiyun #define KCOV_REMOTE_ENABLE _IOW('c', 102, struct kcov_remote_arg) 265*4882a593Smuzhiyun 266*4882a593Smuzhiyun #define COVER_SIZE (64 << 10) 267*4882a593Smuzhiyun 268*4882a593Smuzhiyun #define KCOV_TRACE_PC 0 269*4882a593Smuzhiyun 270*4882a593Smuzhiyun #define KCOV_SUBSYSTEM_COMMON (0x00ull << 56) 271*4882a593Smuzhiyun #define KCOV_SUBSYSTEM_USB (0x01ull << 56) 272*4882a593Smuzhiyun 273*4882a593Smuzhiyun #define KCOV_SUBSYSTEM_MASK (0xffull << 56) 274*4882a593Smuzhiyun #define KCOV_INSTANCE_MASK (0xffffffffull) 275*4882a593Smuzhiyun 276*4882a593Smuzhiyun static inline __u64 kcov_remote_handle(__u64 subsys, __u64 inst) 277*4882a593Smuzhiyun { 278*4882a593Smuzhiyun if (subsys & ~KCOV_SUBSYSTEM_MASK || inst & ~KCOV_INSTANCE_MASK) 279*4882a593Smuzhiyun return 0; 280*4882a593Smuzhiyun return subsys | inst; 281*4882a593Smuzhiyun } 282*4882a593Smuzhiyun 283*4882a593Smuzhiyun #define KCOV_COMMON_ID 0x42 284*4882a593Smuzhiyun #define KCOV_USB_BUS_NUM 1 285*4882a593Smuzhiyun 286*4882a593Smuzhiyun int main(int argc, char **argv) 287*4882a593Smuzhiyun { 288*4882a593Smuzhiyun int fd; 289*4882a593Smuzhiyun unsigned long *cover, n, i; 290*4882a593Smuzhiyun struct kcov_remote_arg *arg; 291*4882a593Smuzhiyun 292*4882a593Smuzhiyun fd = open("/sys/kernel/debug/kcov", O_RDWR); 293*4882a593Smuzhiyun if (fd == -1) 294*4882a593Smuzhiyun perror("open"), exit(1); 295*4882a593Smuzhiyun if (ioctl(fd, KCOV_INIT_TRACE, COVER_SIZE)) 296*4882a593Smuzhiyun perror("ioctl"), exit(1); 297*4882a593Smuzhiyun cover = (unsigned long*)mmap(NULL, COVER_SIZE * sizeof(unsigned long), 298*4882a593Smuzhiyun PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); 299*4882a593Smuzhiyun if ((void*)cover == MAP_FAILED) 300*4882a593Smuzhiyun perror("mmap"), exit(1); 301*4882a593Smuzhiyun 302*4882a593Smuzhiyun /* Enable coverage collection via common handle and from USB bus #1. */ 303*4882a593Smuzhiyun arg = calloc(1, sizeof(*arg) + sizeof(uint64_t)); 304*4882a593Smuzhiyun if (!arg) 305*4882a593Smuzhiyun perror("calloc"), exit(1); 306*4882a593Smuzhiyun arg->trace_mode = KCOV_TRACE_PC; 307*4882a593Smuzhiyun arg->area_size = COVER_SIZE; 308*4882a593Smuzhiyun arg->num_handles = 1; 309*4882a593Smuzhiyun arg->common_handle = kcov_remote_handle(KCOV_SUBSYSTEM_COMMON, 310*4882a593Smuzhiyun KCOV_COMMON_ID); 311*4882a593Smuzhiyun arg->handles[0] = kcov_remote_handle(KCOV_SUBSYSTEM_USB, 312*4882a593Smuzhiyun KCOV_USB_BUS_NUM); 313*4882a593Smuzhiyun if (ioctl(fd, KCOV_REMOTE_ENABLE, arg)) 314*4882a593Smuzhiyun perror("ioctl"), free(arg), exit(1); 315*4882a593Smuzhiyun free(arg); 316*4882a593Smuzhiyun 317*4882a593Smuzhiyun /* 318*4882a593Smuzhiyun * Here the user needs to trigger execution of a kernel code section 319*4882a593Smuzhiyun * that is either annotated with the common handle, or to trigger some 320*4882a593Smuzhiyun * activity on USB bus #1. 321*4882a593Smuzhiyun */ 322*4882a593Smuzhiyun sleep(2); 323*4882a593Smuzhiyun 324*4882a593Smuzhiyun n = __atomic_load_n(&cover[0], __ATOMIC_RELAXED); 325*4882a593Smuzhiyun for (i = 0; i < n; i++) 326*4882a593Smuzhiyun printf("0x%lx\n", cover[i + 1]); 327*4882a593Smuzhiyun if (ioctl(fd, KCOV_DISABLE, 0)) 328*4882a593Smuzhiyun perror("ioctl"), exit(1); 329*4882a593Smuzhiyun if (munmap(cover, COVER_SIZE * sizeof(unsigned long))) 330*4882a593Smuzhiyun perror("munmap"), exit(1); 331*4882a593Smuzhiyun if (close(fd)) 332*4882a593Smuzhiyun perror("close"), exit(1); 333*4882a593Smuzhiyun return 0; 334*4882a593Smuzhiyun } 335