1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2*4882a593Smuzhiyun /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3*4882a593Smuzhiyun * 4*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or 5*4882a593Smuzhiyun * modify it under the terms of version 2 of the GNU General Public 6*4882a593Smuzhiyun * License as published by the Free Software Foundation. 7*4882a593Smuzhiyun */ 8*4882a593Smuzhiyun #ifndef _UAPI__LINUX_BPF_H__ 9*4882a593Smuzhiyun #define _UAPI__LINUX_BPF_H__ 10*4882a593Smuzhiyun 11*4882a593Smuzhiyun #include <linux/types.h> 12*4882a593Smuzhiyun #include <linux/bpf_common.h> 13*4882a593Smuzhiyun 14*4882a593Smuzhiyun /* Extended instruction set based on top of classic BPF */ 15*4882a593Smuzhiyun 16*4882a593Smuzhiyun /* instruction classes */ 17*4882a593Smuzhiyun #define BPF_JMP32 0x06 /* jmp mode in word width */ 18*4882a593Smuzhiyun #define BPF_ALU64 0x07 /* alu mode in double word width */ 19*4882a593Smuzhiyun 20*4882a593Smuzhiyun /* ld/ldx fields */ 21*4882a593Smuzhiyun #define BPF_DW 0x18 /* double word (64-bit) */ 22*4882a593Smuzhiyun #define BPF_XADD 0xc0 /* exclusive add */ 23*4882a593Smuzhiyun 24*4882a593Smuzhiyun /* alu/jmp fields */ 25*4882a593Smuzhiyun #define BPF_MOV 0xb0 /* mov reg to reg */ 26*4882a593Smuzhiyun #define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */ 27*4882a593Smuzhiyun 28*4882a593Smuzhiyun /* change endianness of a register */ 29*4882a593Smuzhiyun #define BPF_END 0xd0 /* flags for endianness conversion: */ 30*4882a593Smuzhiyun #define BPF_TO_LE 0x00 /* convert to little-endian */ 31*4882a593Smuzhiyun #define BPF_TO_BE 0x08 /* convert to big-endian */ 32*4882a593Smuzhiyun #define BPF_FROM_LE BPF_TO_LE 33*4882a593Smuzhiyun #define BPF_FROM_BE BPF_TO_BE 34*4882a593Smuzhiyun 35*4882a593Smuzhiyun /* jmp encodings */ 36*4882a593Smuzhiyun #define BPF_JNE 0x50 /* jump != */ 37*4882a593Smuzhiyun #define BPF_JLT 0xa0 /* LT is unsigned, '<' */ 38*4882a593Smuzhiyun #define BPF_JLE 0xb0 /* LE is unsigned, '<=' */ 39*4882a593Smuzhiyun #define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */ 40*4882a593Smuzhiyun #define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ 41*4882a593Smuzhiyun #define BPF_JSLT 0xc0 /* SLT is signed, '<' */ 42*4882a593Smuzhiyun #define BPF_JSLE 0xd0 /* SLE is signed, '<=' */ 43*4882a593Smuzhiyun #define BPF_CALL 0x80 /* function call */ 44*4882a593Smuzhiyun #define BPF_EXIT 0x90 /* function return */ 45*4882a593Smuzhiyun 46*4882a593Smuzhiyun /* Register numbers */ 47*4882a593Smuzhiyun enum { 48*4882a593Smuzhiyun BPF_REG_0 = 0, 49*4882a593Smuzhiyun BPF_REG_1, 50*4882a593Smuzhiyun BPF_REG_2, 51*4882a593Smuzhiyun BPF_REG_3, 52*4882a593Smuzhiyun BPF_REG_4, 53*4882a593Smuzhiyun BPF_REG_5, 54*4882a593Smuzhiyun BPF_REG_6, 55*4882a593Smuzhiyun BPF_REG_7, 56*4882a593Smuzhiyun BPF_REG_8, 57*4882a593Smuzhiyun BPF_REG_9, 58*4882a593Smuzhiyun BPF_REG_10, 59*4882a593Smuzhiyun __MAX_BPF_REG, 60*4882a593Smuzhiyun }; 61*4882a593Smuzhiyun 62*4882a593Smuzhiyun /* BPF has 10 general purpose 64-bit registers and stack frame. */ 63*4882a593Smuzhiyun #define MAX_BPF_REG __MAX_BPF_REG 64*4882a593Smuzhiyun 65*4882a593Smuzhiyun struct bpf_insn { 66*4882a593Smuzhiyun __u8 code; /* opcode */ 67*4882a593Smuzhiyun __u8 dst_reg:4; /* dest register */ 68*4882a593Smuzhiyun __u8 src_reg:4; /* source register */ 69*4882a593Smuzhiyun __s16 off; /* signed offset */ 70*4882a593Smuzhiyun __s32 imm; /* signed immediate constant */ 71*4882a593Smuzhiyun }; 72*4882a593Smuzhiyun 73*4882a593Smuzhiyun /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */ 74*4882a593Smuzhiyun struct bpf_lpm_trie_key { 75*4882a593Smuzhiyun __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */ 76*4882a593Smuzhiyun __u8 data[0]; /* Arbitrary size */ 77*4882a593Smuzhiyun }; 78*4882a593Smuzhiyun 79*4882a593Smuzhiyun struct bpf_cgroup_storage_key { 80*4882a593Smuzhiyun __u64 cgroup_inode_id; /* cgroup inode id */ 81*4882a593Smuzhiyun __u32 attach_type; /* program attach type */ 82*4882a593Smuzhiyun }; 83*4882a593Smuzhiyun 84*4882a593Smuzhiyun union bpf_iter_link_info { 85*4882a593Smuzhiyun struct { 86*4882a593Smuzhiyun __u32 map_fd; 87*4882a593Smuzhiyun } map; 88*4882a593Smuzhiyun }; 89*4882a593Smuzhiyun 90*4882a593Smuzhiyun /* BPF syscall commands, see bpf(2) man-page for details. */ 91*4882a593Smuzhiyun enum bpf_cmd { 92*4882a593Smuzhiyun BPF_MAP_CREATE, 93*4882a593Smuzhiyun BPF_MAP_LOOKUP_ELEM, 94*4882a593Smuzhiyun BPF_MAP_UPDATE_ELEM, 95*4882a593Smuzhiyun BPF_MAP_DELETE_ELEM, 96*4882a593Smuzhiyun BPF_MAP_GET_NEXT_KEY, 97*4882a593Smuzhiyun BPF_PROG_LOAD, 98*4882a593Smuzhiyun BPF_OBJ_PIN, 99*4882a593Smuzhiyun BPF_OBJ_GET, 100*4882a593Smuzhiyun BPF_PROG_ATTACH, 101*4882a593Smuzhiyun BPF_PROG_DETACH, 102*4882a593Smuzhiyun BPF_PROG_TEST_RUN, 103*4882a593Smuzhiyun BPF_PROG_GET_NEXT_ID, 104*4882a593Smuzhiyun BPF_MAP_GET_NEXT_ID, 105*4882a593Smuzhiyun BPF_PROG_GET_FD_BY_ID, 106*4882a593Smuzhiyun BPF_MAP_GET_FD_BY_ID, 107*4882a593Smuzhiyun BPF_OBJ_GET_INFO_BY_FD, 108*4882a593Smuzhiyun BPF_PROG_QUERY, 109*4882a593Smuzhiyun BPF_RAW_TRACEPOINT_OPEN, 110*4882a593Smuzhiyun BPF_BTF_LOAD, 111*4882a593Smuzhiyun BPF_BTF_GET_FD_BY_ID, 112*4882a593Smuzhiyun BPF_TASK_FD_QUERY, 113*4882a593Smuzhiyun BPF_MAP_LOOKUP_AND_DELETE_ELEM, 114*4882a593Smuzhiyun BPF_MAP_FREEZE, 115*4882a593Smuzhiyun BPF_BTF_GET_NEXT_ID, 116*4882a593Smuzhiyun BPF_MAP_LOOKUP_BATCH, 117*4882a593Smuzhiyun BPF_MAP_LOOKUP_AND_DELETE_BATCH, 118*4882a593Smuzhiyun BPF_MAP_UPDATE_BATCH, 119*4882a593Smuzhiyun BPF_MAP_DELETE_BATCH, 120*4882a593Smuzhiyun BPF_LINK_CREATE, 121*4882a593Smuzhiyun BPF_LINK_UPDATE, 122*4882a593Smuzhiyun BPF_LINK_GET_FD_BY_ID, 123*4882a593Smuzhiyun BPF_LINK_GET_NEXT_ID, 124*4882a593Smuzhiyun BPF_ENABLE_STATS, 125*4882a593Smuzhiyun BPF_ITER_CREATE, 126*4882a593Smuzhiyun BPF_LINK_DETACH, 127*4882a593Smuzhiyun BPF_PROG_BIND_MAP, 128*4882a593Smuzhiyun }; 129*4882a593Smuzhiyun 130*4882a593Smuzhiyun enum bpf_map_type { 131*4882a593Smuzhiyun BPF_MAP_TYPE_UNSPEC, 132*4882a593Smuzhiyun BPF_MAP_TYPE_HASH, 133*4882a593Smuzhiyun BPF_MAP_TYPE_ARRAY, 134*4882a593Smuzhiyun BPF_MAP_TYPE_PROG_ARRAY, 135*4882a593Smuzhiyun BPF_MAP_TYPE_PERF_EVENT_ARRAY, 136*4882a593Smuzhiyun BPF_MAP_TYPE_PERCPU_HASH, 137*4882a593Smuzhiyun BPF_MAP_TYPE_PERCPU_ARRAY, 138*4882a593Smuzhiyun BPF_MAP_TYPE_STACK_TRACE, 139*4882a593Smuzhiyun BPF_MAP_TYPE_CGROUP_ARRAY, 140*4882a593Smuzhiyun BPF_MAP_TYPE_LRU_HASH, 141*4882a593Smuzhiyun BPF_MAP_TYPE_LRU_PERCPU_HASH, 142*4882a593Smuzhiyun BPF_MAP_TYPE_LPM_TRIE, 143*4882a593Smuzhiyun BPF_MAP_TYPE_ARRAY_OF_MAPS, 144*4882a593Smuzhiyun BPF_MAP_TYPE_HASH_OF_MAPS, 145*4882a593Smuzhiyun BPF_MAP_TYPE_DEVMAP, 146*4882a593Smuzhiyun BPF_MAP_TYPE_SOCKMAP, 147*4882a593Smuzhiyun BPF_MAP_TYPE_CPUMAP, 148*4882a593Smuzhiyun BPF_MAP_TYPE_XSKMAP, 149*4882a593Smuzhiyun BPF_MAP_TYPE_SOCKHASH, 150*4882a593Smuzhiyun BPF_MAP_TYPE_CGROUP_STORAGE, 151*4882a593Smuzhiyun BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, 152*4882a593Smuzhiyun BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, 153*4882a593Smuzhiyun BPF_MAP_TYPE_QUEUE, 154*4882a593Smuzhiyun BPF_MAP_TYPE_STACK, 155*4882a593Smuzhiyun BPF_MAP_TYPE_SK_STORAGE, 156*4882a593Smuzhiyun BPF_MAP_TYPE_DEVMAP_HASH, 157*4882a593Smuzhiyun BPF_MAP_TYPE_STRUCT_OPS, 158*4882a593Smuzhiyun BPF_MAP_TYPE_RINGBUF, 159*4882a593Smuzhiyun BPF_MAP_TYPE_INODE_STORAGE, 160*4882a593Smuzhiyun }; 161*4882a593Smuzhiyun 162*4882a593Smuzhiyun /* Note that tracing related programs such as 163*4882a593Smuzhiyun * BPF_PROG_TYPE_{KPROBE,TRACEPOINT,PERF_EVENT,RAW_TRACEPOINT} 164*4882a593Smuzhiyun * are not subject to a stable API since kernel internal data 165*4882a593Smuzhiyun * structures can change from release to release and may 166*4882a593Smuzhiyun * therefore break existing tracing BPF programs. Tracing BPF 167*4882a593Smuzhiyun * programs correspond to /a/ specific kernel which is to be 168*4882a593Smuzhiyun * analyzed, and not /a/ specific kernel /and/ all future ones. 169*4882a593Smuzhiyun */ 170*4882a593Smuzhiyun enum bpf_prog_type { 171*4882a593Smuzhiyun BPF_PROG_TYPE_UNSPEC, 172*4882a593Smuzhiyun BPF_PROG_TYPE_SOCKET_FILTER, 173*4882a593Smuzhiyun BPF_PROG_TYPE_KPROBE, 174*4882a593Smuzhiyun BPF_PROG_TYPE_SCHED_CLS, 175*4882a593Smuzhiyun BPF_PROG_TYPE_SCHED_ACT, 176*4882a593Smuzhiyun BPF_PROG_TYPE_TRACEPOINT, 177*4882a593Smuzhiyun BPF_PROG_TYPE_XDP, 178*4882a593Smuzhiyun BPF_PROG_TYPE_PERF_EVENT, 179*4882a593Smuzhiyun BPF_PROG_TYPE_CGROUP_SKB, 180*4882a593Smuzhiyun BPF_PROG_TYPE_CGROUP_SOCK, 181*4882a593Smuzhiyun BPF_PROG_TYPE_LWT_IN, 182*4882a593Smuzhiyun BPF_PROG_TYPE_LWT_OUT, 183*4882a593Smuzhiyun BPF_PROG_TYPE_LWT_XMIT, 184*4882a593Smuzhiyun BPF_PROG_TYPE_SOCK_OPS, 185*4882a593Smuzhiyun BPF_PROG_TYPE_SK_SKB, 186*4882a593Smuzhiyun BPF_PROG_TYPE_CGROUP_DEVICE, 187*4882a593Smuzhiyun BPF_PROG_TYPE_SK_MSG, 188*4882a593Smuzhiyun BPF_PROG_TYPE_RAW_TRACEPOINT, 189*4882a593Smuzhiyun BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 190*4882a593Smuzhiyun BPF_PROG_TYPE_LWT_SEG6LOCAL, 191*4882a593Smuzhiyun BPF_PROG_TYPE_LIRC_MODE2, 192*4882a593Smuzhiyun BPF_PROG_TYPE_SK_REUSEPORT, 193*4882a593Smuzhiyun BPF_PROG_TYPE_FLOW_DISSECTOR, 194*4882a593Smuzhiyun BPF_PROG_TYPE_CGROUP_SYSCTL, 195*4882a593Smuzhiyun BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, 196*4882a593Smuzhiyun BPF_PROG_TYPE_CGROUP_SOCKOPT, 197*4882a593Smuzhiyun BPF_PROG_TYPE_TRACING, 198*4882a593Smuzhiyun BPF_PROG_TYPE_STRUCT_OPS, 199*4882a593Smuzhiyun BPF_PROG_TYPE_EXT, 200*4882a593Smuzhiyun BPF_PROG_TYPE_LSM, 201*4882a593Smuzhiyun BPF_PROG_TYPE_SK_LOOKUP, 202*4882a593Smuzhiyun }; 203*4882a593Smuzhiyun 204*4882a593Smuzhiyun enum bpf_attach_type { 205*4882a593Smuzhiyun BPF_CGROUP_INET_INGRESS, 206*4882a593Smuzhiyun BPF_CGROUP_INET_EGRESS, 207*4882a593Smuzhiyun BPF_CGROUP_INET_SOCK_CREATE, 208*4882a593Smuzhiyun BPF_CGROUP_SOCK_OPS, 209*4882a593Smuzhiyun BPF_SK_SKB_STREAM_PARSER, 210*4882a593Smuzhiyun BPF_SK_SKB_STREAM_VERDICT, 211*4882a593Smuzhiyun BPF_CGROUP_DEVICE, 212*4882a593Smuzhiyun BPF_SK_MSG_VERDICT, 213*4882a593Smuzhiyun BPF_CGROUP_INET4_BIND, 214*4882a593Smuzhiyun BPF_CGROUP_INET6_BIND, 215*4882a593Smuzhiyun BPF_CGROUP_INET4_CONNECT, 216*4882a593Smuzhiyun BPF_CGROUP_INET6_CONNECT, 217*4882a593Smuzhiyun BPF_CGROUP_INET4_POST_BIND, 218*4882a593Smuzhiyun BPF_CGROUP_INET6_POST_BIND, 219*4882a593Smuzhiyun BPF_CGROUP_UDP4_SENDMSG, 220*4882a593Smuzhiyun BPF_CGROUP_UDP6_SENDMSG, 221*4882a593Smuzhiyun BPF_LIRC_MODE2, 222*4882a593Smuzhiyun BPF_FLOW_DISSECTOR, 223*4882a593Smuzhiyun BPF_CGROUP_SYSCTL, 224*4882a593Smuzhiyun BPF_CGROUP_UDP4_RECVMSG, 225*4882a593Smuzhiyun BPF_CGROUP_UDP6_RECVMSG, 226*4882a593Smuzhiyun BPF_CGROUP_GETSOCKOPT, 227*4882a593Smuzhiyun BPF_CGROUP_SETSOCKOPT, 228*4882a593Smuzhiyun BPF_TRACE_RAW_TP, 229*4882a593Smuzhiyun BPF_TRACE_FENTRY, 230*4882a593Smuzhiyun BPF_TRACE_FEXIT, 231*4882a593Smuzhiyun BPF_MODIFY_RETURN, 232*4882a593Smuzhiyun BPF_LSM_MAC, 233*4882a593Smuzhiyun BPF_TRACE_ITER, 234*4882a593Smuzhiyun BPF_CGROUP_INET4_GETPEERNAME, 235*4882a593Smuzhiyun BPF_CGROUP_INET6_GETPEERNAME, 236*4882a593Smuzhiyun BPF_CGROUP_INET4_GETSOCKNAME, 237*4882a593Smuzhiyun BPF_CGROUP_INET6_GETSOCKNAME, 238*4882a593Smuzhiyun BPF_XDP_DEVMAP, 239*4882a593Smuzhiyun BPF_CGROUP_INET_SOCK_RELEASE, 240*4882a593Smuzhiyun BPF_XDP_CPUMAP, 241*4882a593Smuzhiyun BPF_SK_LOOKUP, 242*4882a593Smuzhiyun BPF_XDP, 243*4882a593Smuzhiyun __MAX_BPF_ATTACH_TYPE 244*4882a593Smuzhiyun }; 245*4882a593Smuzhiyun 246*4882a593Smuzhiyun #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE 247*4882a593Smuzhiyun 248*4882a593Smuzhiyun enum bpf_link_type { 249*4882a593Smuzhiyun BPF_LINK_TYPE_UNSPEC = 0, 250*4882a593Smuzhiyun BPF_LINK_TYPE_RAW_TRACEPOINT = 1, 251*4882a593Smuzhiyun BPF_LINK_TYPE_TRACING = 2, 252*4882a593Smuzhiyun BPF_LINK_TYPE_CGROUP = 3, 253*4882a593Smuzhiyun BPF_LINK_TYPE_ITER = 4, 254*4882a593Smuzhiyun BPF_LINK_TYPE_NETNS = 5, 255*4882a593Smuzhiyun BPF_LINK_TYPE_XDP = 6, 256*4882a593Smuzhiyun 257*4882a593Smuzhiyun MAX_BPF_LINK_TYPE, 258*4882a593Smuzhiyun }; 259*4882a593Smuzhiyun 260*4882a593Smuzhiyun /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command 261*4882a593Smuzhiyun * 262*4882a593Smuzhiyun * NONE(default): No further bpf programs allowed in the subtree. 263*4882a593Smuzhiyun * 264*4882a593Smuzhiyun * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program, 265*4882a593Smuzhiyun * the program in this cgroup yields to sub-cgroup program. 266*4882a593Smuzhiyun * 267*4882a593Smuzhiyun * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program, 268*4882a593Smuzhiyun * that cgroup program gets run in addition to the program in this cgroup. 269*4882a593Smuzhiyun * 270*4882a593Smuzhiyun * Only one program is allowed to be attached to a cgroup with 271*4882a593Smuzhiyun * NONE or BPF_F_ALLOW_OVERRIDE flag. 272*4882a593Smuzhiyun * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will 273*4882a593Smuzhiyun * release old program and attach the new one. Attach flags has to match. 274*4882a593Smuzhiyun * 275*4882a593Smuzhiyun * Multiple programs are allowed to be attached to a cgroup with 276*4882a593Smuzhiyun * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order 277*4882a593Smuzhiyun * (those that were attached first, run first) 278*4882a593Smuzhiyun * The programs of sub-cgroup are executed first, then programs of 279*4882a593Smuzhiyun * this cgroup and then programs of parent cgroup. 280*4882a593Smuzhiyun * When children program makes decision (like picking TCP CA or sock bind) 281*4882a593Smuzhiyun * parent program has a chance to override it. 282*4882a593Smuzhiyun * 283*4882a593Smuzhiyun * With BPF_F_ALLOW_MULTI a new program is added to the end of the list of 284*4882a593Smuzhiyun * programs for a cgroup. Though it's possible to replace an old program at 285*4882a593Smuzhiyun * any position by also specifying BPF_F_REPLACE flag and position itself in 286*4882a593Smuzhiyun * replace_bpf_fd attribute. Old program at this position will be released. 287*4882a593Smuzhiyun * 288*4882a593Smuzhiyun * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups. 289*4882a593Smuzhiyun * A cgroup with NONE doesn't allow any programs in sub-cgroups. 290*4882a593Smuzhiyun * Ex1: 291*4882a593Smuzhiyun * cgrp1 (MULTI progs A, B) -> 292*4882a593Smuzhiyun * cgrp2 (OVERRIDE prog C) -> 293*4882a593Smuzhiyun * cgrp3 (MULTI prog D) -> 294*4882a593Smuzhiyun * cgrp4 (OVERRIDE prog E) -> 295*4882a593Smuzhiyun * cgrp5 (NONE prog F) 296*4882a593Smuzhiyun * the event in cgrp5 triggers execution of F,D,A,B in that order. 297*4882a593Smuzhiyun * if prog F is detached, the execution is E,D,A,B 298*4882a593Smuzhiyun * if prog F and D are detached, the execution is E,A,B 299*4882a593Smuzhiyun * if prog F, E and D are detached, the execution is C,A,B 300*4882a593Smuzhiyun * 301*4882a593Smuzhiyun * All eligible programs are executed regardless of return code from 302*4882a593Smuzhiyun * earlier programs. 303*4882a593Smuzhiyun */ 304*4882a593Smuzhiyun #define BPF_F_ALLOW_OVERRIDE (1U << 0) 305*4882a593Smuzhiyun #define BPF_F_ALLOW_MULTI (1U << 1) 306*4882a593Smuzhiyun #define BPF_F_REPLACE (1U << 2) 307*4882a593Smuzhiyun 308*4882a593Smuzhiyun /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the 309*4882a593Smuzhiyun * verifier will perform strict alignment checking as if the kernel 310*4882a593Smuzhiyun * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set, 311*4882a593Smuzhiyun * and NET_IP_ALIGN defined to 2. 312*4882a593Smuzhiyun */ 313*4882a593Smuzhiyun #define BPF_F_STRICT_ALIGNMENT (1U << 0) 314*4882a593Smuzhiyun 315*4882a593Smuzhiyun /* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the 316*4882a593Smuzhiyun * verifier will allow any alignment whatsoever. On platforms 317*4882a593Smuzhiyun * with strict alignment requirements for loads ands stores (such 318*4882a593Smuzhiyun * as sparc and mips) the verifier validates that all loads and 319*4882a593Smuzhiyun * stores provably follow this requirement. This flag turns that 320*4882a593Smuzhiyun * checking and enforcement off. 321*4882a593Smuzhiyun * 322*4882a593Smuzhiyun * It is mostly used for testing when we want to validate the 323*4882a593Smuzhiyun * context and memory access aspects of the verifier, but because 324*4882a593Smuzhiyun * of an unaligned access the alignment check would trigger before 325*4882a593Smuzhiyun * the one we are interested in. 326*4882a593Smuzhiyun */ 327*4882a593Smuzhiyun #define BPF_F_ANY_ALIGNMENT (1U << 1) 328*4882a593Smuzhiyun 329*4882a593Smuzhiyun /* BPF_F_TEST_RND_HI32 is used in BPF_PROG_LOAD command for testing purpose. 330*4882a593Smuzhiyun * Verifier does sub-register def/use analysis and identifies instructions whose 331*4882a593Smuzhiyun * def only matters for low 32-bit, high 32-bit is never referenced later 332*4882a593Smuzhiyun * through implicit zero extension. Therefore verifier notifies JIT back-ends 333*4882a593Smuzhiyun * that it is safe to ignore clearing high 32-bit for these instructions. This 334*4882a593Smuzhiyun * saves some back-ends a lot of code-gen. However such optimization is not 335*4882a593Smuzhiyun * necessary on some arches, for example x86_64, arm64 etc, whose JIT back-ends 336*4882a593Smuzhiyun * hence hasn't used verifier's analysis result. But, we really want to have a 337*4882a593Smuzhiyun * way to be able to verify the correctness of the described optimization on 338*4882a593Smuzhiyun * x86_64 on which testsuites are frequently exercised. 339*4882a593Smuzhiyun * 340*4882a593Smuzhiyun * So, this flag is introduced. Once it is set, verifier will randomize high 341*4882a593Smuzhiyun * 32-bit for those instructions who has been identified as safe to ignore them. 342*4882a593Smuzhiyun * Then, if verifier is not doing correct analysis, such randomization will 343*4882a593Smuzhiyun * regress tests to expose bugs. 344*4882a593Smuzhiyun */ 345*4882a593Smuzhiyun #define BPF_F_TEST_RND_HI32 (1U << 2) 346*4882a593Smuzhiyun 347*4882a593Smuzhiyun /* The verifier internal test flag. Behavior is undefined */ 348*4882a593Smuzhiyun #define BPF_F_TEST_STATE_FREQ (1U << 3) 349*4882a593Smuzhiyun 350*4882a593Smuzhiyun /* If BPF_F_SLEEPABLE is used in BPF_PROG_LOAD command, the verifier will 351*4882a593Smuzhiyun * restrict map and helper usage for such programs. Sleepable BPF programs can 352*4882a593Smuzhiyun * only be attached to hooks where kernel execution context allows sleeping. 353*4882a593Smuzhiyun * Such programs are allowed to use helpers that may sleep like 354*4882a593Smuzhiyun * bpf_copy_from_user(). 355*4882a593Smuzhiyun */ 356*4882a593Smuzhiyun #define BPF_F_SLEEPABLE (1U << 4) 357*4882a593Smuzhiyun 358*4882a593Smuzhiyun /* When BPF ldimm64's insn[0].src_reg != 0 then this can have 359*4882a593Smuzhiyun * the following extensions: 360*4882a593Smuzhiyun * 361*4882a593Smuzhiyun * insn[0].src_reg: BPF_PSEUDO_MAP_FD 362*4882a593Smuzhiyun * insn[0].imm: map fd 363*4882a593Smuzhiyun * insn[1].imm: 0 364*4882a593Smuzhiyun * insn[0].off: 0 365*4882a593Smuzhiyun * insn[1].off: 0 366*4882a593Smuzhiyun * ldimm64 rewrite: address of map 367*4882a593Smuzhiyun * verifier type: CONST_PTR_TO_MAP 368*4882a593Smuzhiyun */ 369*4882a593Smuzhiyun #define BPF_PSEUDO_MAP_FD 1 370*4882a593Smuzhiyun /* insn[0].src_reg: BPF_PSEUDO_MAP_VALUE 371*4882a593Smuzhiyun * insn[0].imm: map fd 372*4882a593Smuzhiyun * insn[1].imm: offset into value 373*4882a593Smuzhiyun * insn[0].off: 0 374*4882a593Smuzhiyun * insn[1].off: 0 375*4882a593Smuzhiyun * ldimm64 rewrite: address of map[0]+offset 376*4882a593Smuzhiyun * verifier type: PTR_TO_MAP_VALUE 377*4882a593Smuzhiyun */ 378*4882a593Smuzhiyun #define BPF_PSEUDO_MAP_VALUE 2 379*4882a593Smuzhiyun /* insn[0].src_reg: BPF_PSEUDO_BTF_ID 380*4882a593Smuzhiyun * insn[0].imm: kernel btd id of VAR 381*4882a593Smuzhiyun * insn[1].imm: 0 382*4882a593Smuzhiyun * insn[0].off: 0 383*4882a593Smuzhiyun * insn[1].off: 0 384*4882a593Smuzhiyun * ldimm64 rewrite: address of the kernel variable 385*4882a593Smuzhiyun * verifier type: PTR_TO_BTF_ID or PTR_TO_MEM, depending on whether the var 386*4882a593Smuzhiyun * is struct/union. 387*4882a593Smuzhiyun */ 388*4882a593Smuzhiyun #define BPF_PSEUDO_BTF_ID 3 389*4882a593Smuzhiyun 390*4882a593Smuzhiyun /* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative 391*4882a593Smuzhiyun * offset to another bpf function 392*4882a593Smuzhiyun */ 393*4882a593Smuzhiyun #define BPF_PSEUDO_CALL 1 394*4882a593Smuzhiyun 395*4882a593Smuzhiyun /* flags for BPF_MAP_UPDATE_ELEM command */ 396*4882a593Smuzhiyun enum { 397*4882a593Smuzhiyun BPF_ANY = 0, /* create new element or update existing */ 398*4882a593Smuzhiyun BPF_NOEXIST = 1, /* create new element if it didn't exist */ 399*4882a593Smuzhiyun BPF_EXIST = 2, /* update existing element */ 400*4882a593Smuzhiyun BPF_F_LOCK = 4, /* spin_lock-ed map_lookup/map_update */ 401*4882a593Smuzhiyun }; 402*4882a593Smuzhiyun 403*4882a593Smuzhiyun /* flags for BPF_MAP_CREATE command */ 404*4882a593Smuzhiyun enum { 405*4882a593Smuzhiyun BPF_F_NO_PREALLOC = (1U << 0), 406*4882a593Smuzhiyun /* Instead of having one common LRU list in the 407*4882a593Smuzhiyun * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list 408*4882a593Smuzhiyun * which can scale and perform better. 409*4882a593Smuzhiyun * Note, the LRU nodes (including free nodes) cannot be moved 410*4882a593Smuzhiyun * across different LRU lists. 411*4882a593Smuzhiyun */ 412*4882a593Smuzhiyun BPF_F_NO_COMMON_LRU = (1U << 1), 413*4882a593Smuzhiyun /* Specify numa node during map creation */ 414*4882a593Smuzhiyun BPF_F_NUMA_NODE = (1U << 2), 415*4882a593Smuzhiyun 416*4882a593Smuzhiyun /* Flags for accessing BPF object from syscall side. */ 417*4882a593Smuzhiyun BPF_F_RDONLY = (1U << 3), 418*4882a593Smuzhiyun BPF_F_WRONLY = (1U << 4), 419*4882a593Smuzhiyun 420*4882a593Smuzhiyun /* Flag for stack_map, store build_id+offset instead of pointer */ 421*4882a593Smuzhiyun BPF_F_STACK_BUILD_ID = (1U << 5), 422*4882a593Smuzhiyun 423*4882a593Smuzhiyun /* Zero-initialize hash function seed. This should only be used for testing. */ 424*4882a593Smuzhiyun BPF_F_ZERO_SEED = (1U << 6), 425*4882a593Smuzhiyun 426*4882a593Smuzhiyun /* Flags for accessing BPF object from program side. */ 427*4882a593Smuzhiyun BPF_F_RDONLY_PROG = (1U << 7), 428*4882a593Smuzhiyun BPF_F_WRONLY_PROG = (1U << 8), 429*4882a593Smuzhiyun 430*4882a593Smuzhiyun /* Clone map from listener for newly accepted socket */ 431*4882a593Smuzhiyun BPF_F_CLONE = (1U << 9), 432*4882a593Smuzhiyun 433*4882a593Smuzhiyun /* Enable memory-mapping BPF map */ 434*4882a593Smuzhiyun BPF_F_MMAPABLE = (1U << 10), 435*4882a593Smuzhiyun 436*4882a593Smuzhiyun /* Share perf_event among processes */ 437*4882a593Smuzhiyun BPF_F_PRESERVE_ELEMS = (1U << 11), 438*4882a593Smuzhiyun 439*4882a593Smuzhiyun /* Create a map that is suitable to be an inner map with dynamic max entries */ 440*4882a593Smuzhiyun BPF_F_INNER_MAP = (1U << 12), 441*4882a593Smuzhiyun }; 442*4882a593Smuzhiyun 443*4882a593Smuzhiyun /* Flags for BPF_PROG_QUERY. */ 444*4882a593Smuzhiyun 445*4882a593Smuzhiyun /* Query effective (directly attached + inherited from ancestor cgroups) 446*4882a593Smuzhiyun * programs that will be executed for events within a cgroup. 447*4882a593Smuzhiyun * attach_flags with this flag are returned only for directly attached programs. 448*4882a593Smuzhiyun */ 449*4882a593Smuzhiyun #define BPF_F_QUERY_EFFECTIVE (1U << 0) 450*4882a593Smuzhiyun 451*4882a593Smuzhiyun /* Flags for BPF_PROG_TEST_RUN */ 452*4882a593Smuzhiyun 453*4882a593Smuzhiyun /* If set, run the test on the cpu specified by bpf_attr.test.cpu */ 454*4882a593Smuzhiyun #define BPF_F_TEST_RUN_ON_CPU (1U << 0) 455*4882a593Smuzhiyun 456*4882a593Smuzhiyun /* type for BPF_ENABLE_STATS */ 457*4882a593Smuzhiyun enum bpf_stats_type { 458*4882a593Smuzhiyun /* enabled run_time_ns and run_cnt */ 459*4882a593Smuzhiyun BPF_STATS_RUN_TIME = 0, 460*4882a593Smuzhiyun }; 461*4882a593Smuzhiyun 462*4882a593Smuzhiyun enum bpf_stack_build_id_status { 463*4882a593Smuzhiyun /* user space need an empty entry to identify end of a trace */ 464*4882a593Smuzhiyun BPF_STACK_BUILD_ID_EMPTY = 0, 465*4882a593Smuzhiyun /* with valid build_id and offset */ 466*4882a593Smuzhiyun BPF_STACK_BUILD_ID_VALID = 1, 467*4882a593Smuzhiyun /* couldn't get build_id, fallback to ip */ 468*4882a593Smuzhiyun BPF_STACK_BUILD_ID_IP = 2, 469*4882a593Smuzhiyun }; 470*4882a593Smuzhiyun 471*4882a593Smuzhiyun #define BPF_BUILD_ID_SIZE 20 472*4882a593Smuzhiyun struct bpf_stack_build_id { 473*4882a593Smuzhiyun __s32 status; 474*4882a593Smuzhiyun unsigned char build_id[BPF_BUILD_ID_SIZE]; 475*4882a593Smuzhiyun union { 476*4882a593Smuzhiyun __u64 offset; 477*4882a593Smuzhiyun __u64 ip; 478*4882a593Smuzhiyun }; 479*4882a593Smuzhiyun }; 480*4882a593Smuzhiyun 481*4882a593Smuzhiyun #define BPF_OBJ_NAME_LEN 16U 482*4882a593Smuzhiyun 483*4882a593Smuzhiyun union bpf_attr { 484*4882a593Smuzhiyun struct { /* anonymous struct used by BPF_MAP_CREATE command */ 485*4882a593Smuzhiyun __u32 map_type; /* one of enum bpf_map_type */ 486*4882a593Smuzhiyun __u32 key_size; /* size of key in bytes */ 487*4882a593Smuzhiyun __u32 value_size; /* size of value in bytes */ 488*4882a593Smuzhiyun __u32 max_entries; /* max number of entries in a map */ 489*4882a593Smuzhiyun __u32 map_flags; /* BPF_MAP_CREATE related 490*4882a593Smuzhiyun * flags defined above. 491*4882a593Smuzhiyun */ 492*4882a593Smuzhiyun __u32 inner_map_fd; /* fd pointing to the inner map */ 493*4882a593Smuzhiyun __u32 numa_node; /* numa node (effective only if 494*4882a593Smuzhiyun * BPF_F_NUMA_NODE is set). 495*4882a593Smuzhiyun */ 496*4882a593Smuzhiyun char map_name[BPF_OBJ_NAME_LEN]; 497*4882a593Smuzhiyun __u32 map_ifindex; /* ifindex of netdev to create on */ 498*4882a593Smuzhiyun __u32 btf_fd; /* fd pointing to a BTF type data */ 499*4882a593Smuzhiyun __u32 btf_key_type_id; /* BTF type_id of the key */ 500*4882a593Smuzhiyun __u32 btf_value_type_id; /* BTF type_id of the value */ 501*4882a593Smuzhiyun __u32 btf_vmlinux_value_type_id;/* BTF type_id of a kernel- 502*4882a593Smuzhiyun * struct stored as the 503*4882a593Smuzhiyun * map value 504*4882a593Smuzhiyun */ 505*4882a593Smuzhiyun }; 506*4882a593Smuzhiyun 507*4882a593Smuzhiyun struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ 508*4882a593Smuzhiyun __u32 map_fd; 509*4882a593Smuzhiyun __aligned_u64 key; 510*4882a593Smuzhiyun union { 511*4882a593Smuzhiyun __aligned_u64 value; 512*4882a593Smuzhiyun __aligned_u64 next_key; 513*4882a593Smuzhiyun }; 514*4882a593Smuzhiyun __u64 flags; 515*4882a593Smuzhiyun }; 516*4882a593Smuzhiyun 517*4882a593Smuzhiyun struct { /* struct used by BPF_MAP_*_BATCH commands */ 518*4882a593Smuzhiyun __aligned_u64 in_batch; /* start batch, 519*4882a593Smuzhiyun * NULL to start from beginning 520*4882a593Smuzhiyun */ 521*4882a593Smuzhiyun __aligned_u64 out_batch; /* output: next start batch */ 522*4882a593Smuzhiyun __aligned_u64 keys; 523*4882a593Smuzhiyun __aligned_u64 values; 524*4882a593Smuzhiyun __u32 count; /* input/output: 525*4882a593Smuzhiyun * input: # of key/value 526*4882a593Smuzhiyun * elements 527*4882a593Smuzhiyun * output: # of filled elements 528*4882a593Smuzhiyun */ 529*4882a593Smuzhiyun __u32 map_fd; 530*4882a593Smuzhiyun __u64 elem_flags; 531*4882a593Smuzhiyun __u64 flags; 532*4882a593Smuzhiyun } batch; 533*4882a593Smuzhiyun 534*4882a593Smuzhiyun struct { /* anonymous struct used by BPF_PROG_LOAD command */ 535*4882a593Smuzhiyun __u32 prog_type; /* one of enum bpf_prog_type */ 536*4882a593Smuzhiyun __u32 insn_cnt; 537*4882a593Smuzhiyun __aligned_u64 insns; 538*4882a593Smuzhiyun __aligned_u64 license; 539*4882a593Smuzhiyun __u32 log_level; /* verbosity level of verifier */ 540*4882a593Smuzhiyun __u32 log_size; /* size of user buffer */ 541*4882a593Smuzhiyun __aligned_u64 log_buf; /* user supplied buffer */ 542*4882a593Smuzhiyun __u32 kern_version; /* not used */ 543*4882a593Smuzhiyun __u32 prog_flags; 544*4882a593Smuzhiyun char prog_name[BPF_OBJ_NAME_LEN]; 545*4882a593Smuzhiyun __u32 prog_ifindex; /* ifindex of netdev to prep for */ 546*4882a593Smuzhiyun /* For some prog types expected attach type must be known at 547*4882a593Smuzhiyun * load time to verify attach type specific parts of prog 548*4882a593Smuzhiyun * (context accesses, allowed helpers, etc). 549*4882a593Smuzhiyun */ 550*4882a593Smuzhiyun __u32 expected_attach_type; 551*4882a593Smuzhiyun __u32 prog_btf_fd; /* fd pointing to BTF type data */ 552*4882a593Smuzhiyun __u32 func_info_rec_size; /* userspace bpf_func_info size */ 553*4882a593Smuzhiyun __aligned_u64 func_info; /* func info */ 554*4882a593Smuzhiyun __u32 func_info_cnt; /* number of bpf_func_info records */ 555*4882a593Smuzhiyun __u32 line_info_rec_size; /* userspace bpf_line_info size */ 556*4882a593Smuzhiyun __aligned_u64 line_info; /* line info */ 557*4882a593Smuzhiyun __u32 line_info_cnt; /* number of bpf_line_info records */ 558*4882a593Smuzhiyun __u32 attach_btf_id; /* in-kernel BTF type id to attach to */ 559*4882a593Smuzhiyun __u32 attach_prog_fd; /* 0 to attach to vmlinux */ 560*4882a593Smuzhiyun }; 561*4882a593Smuzhiyun 562*4882a593Smuzhiyun struct { /* anonymous struct used by BPF_OBJ_* commands */ 563*4882a593Smuzhiyun __aligned_u64 pathname; 564*4882a593Smuzhiyun __u32 bpf_fd; 565*4882a593Smuzhiyun __u32 file_flags; 566*4882a593Smuzhiyun }; 567*4882a593Smuzhiyun 568*4882a593Smuzhiyun struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */ 569*4882a593Smuzhiyun __u32 target_fd; /* container object to attach to */ 570*4882a593Smuzhiyun __u32 attach_bpf_fd; /* eBPF program to attach */ 571*4882a593Smuzhiyun __u32 attach_type; 572*4882a593Smuzhiyun __u32 attach_flags; 573*4882a593Smuzhiyun __u32 replace_bpf_fd; /* previously attached eBPF 574*4882a593Smuzhiyun * program to replace if 575*4882a593Smuzhiyun * BPF_F_REPLACE is used 576*4882a593Smuzhiyun */ 577*4882a593Smuzhiyun }; 578*4882a593Smuzhiyun 579*4882a593Smuzhiyun struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */ 580*4882a593Smuzhiyun __u32 prog_fd; 581*4882a593Smuzhiyun __u32 retval; 582*4882a593Smuzhiyun __u32 data_size_in; /* input: len of data_in */ 583*4882a593Smuzhiyun __u32 data_size_out; /* input/output: len of data_out 584*4882a593Smuzhiyun * returns ENOSPC if data_out 585*4882a593Smuzhiyun * is too small. 586*4882a593Smuzhiyun */ 587*4882a593Smuzhiyun __aligned_u64 data_in; 588*4882a593Smuzhiyun __aligned_u64 data_out; 589*4882a593Smuzhiyun __u32 repeat; 590*4882a593Smuzhiyun __u32 duration; 591*4882a593Smuzhiyun __u32 ctx_size_in; /* input: len of ctx_in */ 592*4882a593Smuzhiyun __u32 ctx_size_out; /* input/output: len of ctx_out 593*4882a593Smuzhiyun * returns ENOSPC if ctx_out 594*4882a593Smuzhiyun * is too small. 595*4882a593Smuzhiyun */ 596*4882a593Smuzhiyun __aligned_u64 ctx_in; 597*4882a593Smuzhiyun __aligned_u64 ctx_out; 598*4882a593Smuzhiyun __u32 flags; 599*4882a593Smuzhiyun __u32 cpu; 600*4882a593Smuzhiyun } test; 601*4882a593Smuzhiyun 602*4882a593Smuzhiyun struct { /* anonymous struct used by BPF_*_GET_*_ID */ 603*4882a593Smuzhiyun union { 604*4882a593Smuzhiyun __u32 start_id; 605*4882a593Smuzhiyun __u32 prog_id; 606*4882a593Smuzhiyun __u32 map_id; 607*4882a593Smuzhiyun __u32 btf_id; 608*4882a593Smuzhiyun __u32 link_id; 609*4882a593Smuzhiyun }; 610*4882a593Smuzhiyun __u32 next_id; 611*4882a593Smuzhiyun __u32 open_flags; 612*4882a593Smuzhiyun }; 613*4882a593Smuzhiyun 614*4882a593Smuzhiyun struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */ 615*4882a593Smuzhiyun __u32 bpf_fd; 616*4882a593Smuzhiyun __u32 info_len; 617*4882a593Smuzhiyun __aligned_u64 info; 618*4882a593Smuzhiyun } info; 619*4882a593Smuzhiyun 620*4882a593Smuzhiyun struct { /* anonymous struct used by BPF_PROG_QUERY command */ 621*4882a593Smuzhiyun __u32 target_fd; /* container object to query */ 622*4882a593Smuzhiyun __u32 attach_type; 623*4882a593Smuzhiyun __u32 query_flags; 624*4882a593Smuzhiyun __u32 attach_flags; 625*4882a593Smuzhiyun __aligned_u64 prog_ids; 626*4882a593Smuzhiyun __u32 prog_cnt; 627*4882a593Smuzhiyun } query; 628*4882a593Smuzhiyun 629*4882a593Smuzhiyun struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */ 630*4882a593Smuzhiyun __u64 name; 631*4882a593Smuzhiyun __u32 prog_fd; 632*4882a593Smuzhiyun } raw_tracepoint; 633*4882a593Smuzhiyun 634*4882a593Smuzhiyun struct { /* anonymous struct for BPF_BTF_LOAD */ 635*4882a593Smuzhiyun __aligned_u64 btf; 636*4882a593Smuzhiyun __aligned_u64 btf_log_buf; 637*4882a593Smuzhiyun __u32 btf_size; 638*4882a593Smuzhiyun __u32 btf_log_size; 639*4882a593Smuzhiyun __u32 btf_log_level; 640*4882a593Smuzhiyun }; 641*4882a593Smuzhiyun 642*4882a593Smuzhiyun struct { 643*4882a593Smuzhiyun __u32 pid; /* input: pid */ 644*4882a593Smuzhiyun __u32 fd; /* input: fd */ 645*4882a593Smuzhiyun __u32 flags; /* input: flags */ 646*4882a593Smuzhiyun __u32 buf_len; /* input/output: buf len */ 647*4882a593Smuzhiyun __aligned_u64 buf; /* input/output: 648*4882a593Smuzhiyun * tp_name for tracepoint 649*4882a593Smuzhiyun * symbol for kprobe 650*4882a593Smuzhiyun * filename for uprobe 651*4882a593Smuzhiyun */ 652*4882a593Smuzhiyun __u32 prog_id; /* output: prod_id */ 653*4882a593Smuzhiyun __u32 fd_type; /* output: BPF_FD_TYPE_* */ 654*4882a593Smuzhiyun __u64 probe_offset; /* output: probe_offset */ 655*4882a593Smuzhiyun __u64 probe_addr; /* output: probe_addr */ 656*4882a593Smuzhiyun } task_fd_query; 657*4882a593Smuzhiyun 658*4882a593Smuzhiyun struct { /* struct used by BPF_LINK_CREATE command */ 659*4882a593Smuzhiyun __u32 prog_fd; /* eBPF program to attach */ 660*4882a593Smuzhiyun union { 661*4882a593Smuzhiyun __u32 target_fd; /* object to attach to */ 662*4882a593Smuzhiyun __u32 target_ifindex; /* target ifindex */ 663*4882a593Smuzhiyun }; 664*4882a593Smuzhiyun __u32 attach_type; /* attach type */ 665*4882a593Smuzhiyun __u32 flags; /* extra flags */ 666*4882a593Smuzhiyun union { 667*4882a593Smuzhiyun __u32 target_btf_id; /* btf_id of target to attach to */ 668*4882a593Smuzhiyun struct { 669*4882a593Smuzhiyun __aligned_u64 iter_info; /* extra bpf_iter_link_info */ 670*4882a593Smuzhiyun __u32 iter_info_len; /* iter_info length */ 671*4882a593Smuzhiyun }; 672*4882a593Smuzhiyun }; 673*4882a593Smuzhiyun } link_create; 674*4882a593Smuzhiyun 675*4882a593Smuzhiyun struct { /* struct used by BPF_LINK_UPDATE command */ 676*4882a593Smuzhiyun __u32 link_fd; /* link fd */ 677*4882a593Smuzhiyun /* new program fd to update link with */ 678*4882a593Smuzhiyun __u32 new_prog_fd; 679*4882a593Smuzhiyun __u32 flags; /* extra flags */ 680*4882a593Smuzhiyun /* expected link's program fd; is specified only if 681*4882a593Smuzhiyun * BPF_F_REPLACE flag is set in flags */ 682*4882a593Smuzhiyun __u32 old_prog_fd; 683*4882a593Smuzhiyun } link_update; 684*4882a593Smuzhiyun 685*4882a593Smuzhiyun struct { 686*4882a593Smuzhiyun __u32 link_fd; 687*4882a593Smuzhiyun } link_detach; 688*4882a593Smuzhiyun 689*4882a593Smuzhiyun struct { /* struct used by BPF_ENABLE_STATS command */ 690*4882a593Smuzhiyun __u32 type; 691*4882a593Smuzhiyun } enable_stats; 692*4882a593Smuzhiyun 693*4882a593Smuzhiyun struct { /* struct used by BPF_ITER_CREATE command */ 694*4882a593Smuzhiyun __u32 link_fd; 695*4882a593Smuzhiyun __u32 flags; 696*4882a593Smuzhiyun } iter_create; 697*4882a593Smuzhiyun 698*4882a593Smuzhiyun struct { /* struct used by BPF_PROG_BIND_MAP command */ 699*4882a593Smuzhiyun __u32 prog_fd; 700*4882a593Smuzhiyun __u32 map_fd; 701*4882a593Smuzhiyun __u32 flags; /* extra flags */ 702*4882a593Smuzhiyun } prog_bind_map; 703*4882a593Smuzhiyun 704*4882a593Smuzhiyun } __attribute__((aligned(8))); 705*4882a593Smuzhiyun 706*4882a593Smuzhiyun /* The description below is an attempt at providing documentation to eBPF 707*4882a593Smuzhiyun * developers about the multiple available eBPF helper functions. It can be 708*4882a593Smuzhiyun * parsed and used to produce a manual page. The workflow is the following, 709*4882a593Smuzhiyun * and requires the rst2man utility: 710*4882a593Smuzhiyun * 711*4882a593Smuzhiyun * $ ./scripts/bpf_helpers_doc.py \ 712*4882a593Smuzhiyun * --filename include/uapi/linux/bpf.h > /tmp/bpf-helpers.rst 713*4882a593Smuzhiyun * $ rst2man /tmp/bpf-helpers.rst > /tmp/bpf-helpers.7 714*4882a593Smuzhiyun * $ man /tmp/bpf-helpers.7 715*4882a593Smuzhiyun * 716*4882a593Smuzhiyun * Note that in order to produce this external documentation, some RST 717*4882a593Smuzhiyun * formatting is used in the descriptions to get "bold" and "italics" in 718*4882a593Smuzhiyun * manual pages. Also note that the few trailing white spaces are 719*4882a593Smuzhiyun * intentional, removing them would break paragraphs for rst2man. 720*4882a593Smuzhiyun * 721*4882a593Smuzhiyun * Start of BPF helper function descriptions: 722*4882a593Smuzhiyun * 723*4882a593Smuzhiyun * void *bpf_map_lookup_elem(struct bpf_map *map, const void *key) 724*4882a593Smuzhiyun * Description 725*4882a593Smuzhiyun * Perform a lookup in *map* for an entry associated to *key*. 726*4882a593Smuzhiyun * Return 727*4882a593Smuzhiyun * Map value associated to *key*, or **NULL** if no entry was 728*4882a593Smuzhiyun * found. 729*4882a593Smuzhiyun * 730*4882a593Smuzhiyun * long bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags) 731*4882a593Smuzhiyun * Description 732*4882a593Smuzhiyun * Add or update the value of the entry associated to *key* in 733*4882a593Smuzhiyun * *map* with *value*. *flags* is one of: 734*4882a593Smuzhiyun * 735*4882a593Smuzhiyun * **BPF_NOEXIST** 736*4882a593Smuzhiyun * The entry for *key* must not exist in the map. 737*4882a593Smuzhiyun * **BPF_EXIST** 738*4882a593Smuzhiyun * The entry for *key* must already exist in the map. 739*4882a593Smuzhiyun * **BPF_ANY** 740*4882a593Smuzhiyun * No condition on the existence of the entry for *key*. 741*4882a593Smuzhiyun * 742*4882a593Smuzhiyun * Flag value **BPF_NOEXIST** cannot be used for maps of types 743*4882a593Smuzhiyun * **BPF_MAP_TYPE_ARRAY** or **BPF_MAP_TYPE_PERCPU_ARRAY** (all 744*4882a593Smuzhiyun * elements always exist), the helper would return an error. 745*4882a593Smuzhiyun * Return 746*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 747*4882a593Smuzhiyun * 748*4882a593Smuzhiyun * long bpf_map_delete_elem(struct bpf_map *map, const void *key) 749*4882a593Smuzhiyun * Description 750*4882a593Smuzhiyun * Delete entry with *key* from *map*. 751*4882a593Smuzhiyun * Return 752*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 753*4882a593Smuzhiyun * 754*4882a593Smuzhiyun * long bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr) 755*4882a593Smuzhiyun * Description 756*4882a593Smuzhiyun * For tracing programs, safely attempt to read *size* bytes from 757*4882a593Smuzhiyun * kernel space address *unsafe_ptr* and store the data in *dst*. 758*4882a593Smuzhiyun * 759*4882a593Smuzhiyun * Generally, use **bpf_probe_read_user**\ () or 760*4882a593Smuzhiyun * **bpf_probe_read_kernel**\ () instead. 761*4882a593Smuzhiyun * Return 762*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 763*4882a593Smuzhiyun * 764*4882a593Smuzhiyun * u64 bpf_ktime_get_ns(void) 765*4882a593Smuzhiyun * Description 766*4882a593Smuzhiyun * Return the time elapsed since system boot, in nanoseconds. 767*4882a593Smuzhiyun * Does not include time the system was suspended. 768*4882a593Smuzhiyun * See: **clock_gettime**\ (**CLOCK_MONOTONIC**) 769*4882a593Smuzhiyun * Return 770*4882a593Smuzhiyun * Current *ktime*. 771*4882a593Smuzhiyun * 772*4882a593Smuzhiyun * long bpf_trace_printk(const char *fmt, u32 fmt_size, ...) 773*4882a593Smuzhiyun * Description 774*4882a593Smuzhiyun * This helper is a "printk()-like" facility for debugging. It 775*4882a593Smuzhiyun * prints a message defined by format *fmt* (of size *fmt_size*) 776*4882a593Smuzhiyun * to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if 777*4882a593Smuzhiyun * available. It can take up to three additional **u64** 778*4882a593Smuzhiyun * arguments (as an eBPF helpers, the total number of arguments is 779*4882a593Smuzhiyun * limited to five). 780*4882a593Smuzhiyun * 781*4882a593Smuzhiyun * Each time the helper is called, it appends a line to the trace. 782*4882a593Smuzhiyun * Lines are discarded while *\/sys/kernel/debug/tracing/trace* is 783*4882a593Smuzhiyun * open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this. 784*4882a593Smuzhiyun * The format of the trace is customizable, and the exact output 785*4882a593Smuzhiyun * one will get depends on the options set in 786*4882a593Smuzhiyun * *\/sys/kernel/debug/tracing/trace_options* (see also the 787*4882a593Smuzhiyun * *README* file under the same directory). However, it usually 788*4882a593Smuzhiyun * defaults to something like: 789*4882a593Smuzhiyun * 790*4882a593Smuzhiyun * :: 791*4882a593Smuzhiyun * 792*4882a593Smuzhiyun * telnet-470 [001] .N.. 419421.045894: 0x00000001: <formatted msg> 793*4882a593Smuzhiyun * 794*4882a593Smuzhiyun * In the above: 795*4882a593Smuzhiyun * 796*4882a593Smuzhiyun * * ``telnet`` is the name of the current task. 797*4882a593Smuzhiyun * * ``470`` is the PID of the current task. 798*4882a593Smuzhiyun * * ``001`` is the CPU number on which the task is 799*4882a593Smuzhiyun * running. 800*4882a593Smuzhiyun * * In ``.N..``, each character refers to a set of 801*4882a593Smuzhiyun * options (whether irqs are enabled, scheduling 802*4882a593Smuzhiyun * options, whether hard/softirqs are running, level of 803*4882a593Smuzhiyun * preempt_disabled respectively). **N** means that 804*4882a593Smuzhiyun * **TIF_NEED_RESCHED** and **PREEMPT_NEED_RESCHED** 805*4882a593Smuzhiyun * are set. 806*4882a593Smuzhiyun * * ``419421.045894`` is a timestamp. 807*4882a593Smuzhiyun * * ``0x00000001`` is a fake value used by BPF for the 808*4882a593Smuzhiyun * instruction pointer register. 809*4882a593Smuzhiyun * * ``<formatted msg>`` is the message formatted with 810*4882a593Smuzhiyun * *fmt*. 811*4882a593Smuzhiyun * 812*4882a593Smuzhiyun * The conversion specifiers supported by *fmt* are similar, but 813*4882a593Smuzhiyun * more limited than for printk(). They are **%d**, **%i**, 814*4882a593Smuzhiyun * **%u**, **%x**, **%ld**, **%li**, **%lu**, **%lx**, **%lld**, 815*4882a593Smuzhiyun * **%lli**, **%llu**, **%llx**, **%p**, **%s**. No modifier (size 816*4882a593Smuzhiyun * of field, padding with zeroes, etc.) is available, and the 817*4882a593Smuzhiyun * helper will return **-EINVAL** (but print nothing) if it 818*4882a593Smuzhiyun * encounters an unknown specifier. 819*4882a593Smuzhiyun * 820*4882a593Smuzhiyun * Also, note that **bpf_trace_printk**\ () is slow, and should 821*4882a593Smuzhiyun * only be used for debugging purposes. For this reason, a notice 822*4882a593Smuzhiyun * block (spanning several lines) is printed to kernel logs and 823*4882a593Smuzhiyun * states that the helper should not be used "for production use" 824*4882a593Smuzhiyun * the first time this helper is used (or more precisely, when 825*4882a593Smuzhiyun * **trace_printk**\ () buffers are allocated). For passing values 826*4882a593Smuzhiyun * to user space, perf events should be preferred. 827*4882a593Smuzhiyun * Return 828*4882a593Smuzhiyun * The number of bytes written to the buffer, or a negative error 829*4882a593Smuzhiyun * in case of failure. 830*4882a593Smuzhiyun * 831*4882a593Smuzhiyun * u32 bpf_get_prandom_u32(void) 832*4882a593Smuzhiyun * Description 833*4882a593Smuzhiyun * Get a pseudo-random number. 834*4882a593Smuzhiyun * 835*4882a593Smuzhiyun * From a security point of view, this helper uses its own 836*4882a593Smuzhiyun * pseudo-random internal state, and cannot be used to infer the 837*4882a593Smuzhiyun * seed of other random functions in the kernel. However, it is 838*4882a593Smuzhiyun * essential to note that the generator used by the helper is not 839*4882a593Smuzhiyun * cryptographically secure. 840*4882a593Smuzhiyun * Return 841*4882a593Smuzhiyun * A random 32-bit unsigned value. 842*4882a593Smuzhiyun * 843*4882a593Smuzhiyun * u32 bpf_get_smp_processor_id(void) 844*4882a593Smuzhiyun * Description 845*4882a593Smuzhiyun * Get the SMP (symmetric multiprocessing) processor id. Note that 846*4882a593Smuzhiyun * all programs run with preemption disabled, which means that the 847*4882a593Smuzhiyun * SMP processor id is stable during all the execution of the 848*4882a593Smuzhiyun * program. 849*4882a593Smuzhiyun * Return 850*4882a593Smuzhiyun * The SMP id of the processor running the program. 851*4882a593Smuzhiyun * 852*4882a593Smuzhiyun * long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) 853*4882a593Smuzhiyun * Description 854*4882a593Smuzhiyun * Store *len* bytes from address *from* into the packet 855*4882a593Smuzhiyun * associated to *skb*, at *offset*. *flags* are a combination of 856*4882a593Smuzhiyun * **BPF_F_RECOMPUTE_CSUM** (automatically recompute the 857*4882a593Smuzhiyun * checksum for the packet after storing the bytes) and 858*4882a593Smuzhiyun * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\ 859*4882a593Smuzhiyun * **->swhash** and *skb*\ **->l4hash** to 0). 860*4882a593Smuzhiyun * 861*4882a593Smuzhiyun * A call to this helper is susceptible to change the underlying 862*4882a593Smuzhiyun * packet buffer. Therefore, at load time, all checks on pointers 863*4882a593Smuzhiyun * previously done by the verifier are invalidated and must be 864*4882a593Smuzhiyun * performed again, if the helper is used in combination with 865*4882a593Smuzhiyun * direct packet access. 866*4882a593Smuzhiyun * Return 867*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 868*4882a593Smuzhiyun * 869*4882a593Smuzhiyun * long bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size) 870*4882a593Smuzhiyun * Description 871*4882a593Smuzhiyun * Recompute the layer 3 (e.g. IP) checksum for the packet 872*4882a593Smuzhiyun * associated to *skb*. Computation is incremental, so the helper 873*4882a593Smuzhiyun * must know the former value of the header field that was 874*4882a593Smuzhiyun * modified (*from*), the new value of this field (*to*), and the 875*4882a593Smuzhiyun * number of bytes (2 or 4) for this field, stored in *size*. 876*4882a593Smuzhiyun * Alternatively, it is possible to store the difference between 877*4882a593Smuzhiyun * the previous and the new values of the header field in *to*, by 878*4882a593Smuzhiyun * setting *from* and *size* to 0. For both methods, *offset* 879*4882a593Smuzhiyun * indicates the location of the IP checksum within the packet. 880*4882a593Smuzhiyun * 881*4882a593Smuzhiyun * This helper works in combination with **bpf_csum_diff**\ (), 882*4882a593Smuzhiyun * which does not update the checksum in-place, but offers more 883*4882a593Smuzhiyun * flexibility and can handle sizes larger than 2 or 4 for the 884*4882a593Smuzhiyun * checksum to update. 885*4882a593Smuzhiyun * 886*4882a593Smuzhiyun * A call to this helper is susceptible to change the underlying 887*4882a593Smuzhiyun * packet buffer. Therefore, at load time, all checks on pointers 888*4882a593Smuzhiyun * previously done by the verifier are invalidated and must be 889*4882a593Smuzhiyun * performed again, if the helper is used in combination with 890*4882a593Smuzhiyun * direct packet access. 891*4882a593Smuzhiyun * Return 892*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 893*4882a593Smuzhiyun * 894*4882a593Smuzhiyun * long bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags) 895*4882a593Smuzhiyun * Description 896*4882a593Smuzhiyun * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the 897*4882a593Smuzhiyun * packet associated to *skb*. Computation is incremental, so the 898*4882a593Smuzhiyun * helper must know the former value of the header field that was 899*4882a593Smuzhiyun * modified (*from*), the new value of this field (*to*), and the 900*4882a593Smuzhiyun * number of bytes (2 or 4) for this field, stored on the lowest 901*4882a593Smuzhiyun * four bits of *flags*. Alternatively, it is possible to store 902*4882a593Smuzhiyun * the difference between the previous and the new values of the 903*4882a593Smuzhiyun * header field in *to*, by setting *from* and the four lowest 904*4882a593Smuzhiyun * bits of *flags* to 0. For both methods, *offset* indicates the 905*4882a593Smuzhiyun * location of the IP checksum within the packet. In addition to 906*4882a593Smuzhiyun * the size of the field, *flags* can be added (bitwise OR) actual 907*4882a593Smuzhiyun * flags. With **BPF_F_MARK_MANGLED_0**, a null checksum is left 908*4882a593Smuzhiyun * untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and 909*4882a593Smuzhiyun * for updates resulting in a null checksum the value is set to 910*4882a593Smuzhiyun * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates 911*4882a593Smuzhiyun * the checksum is to be computed against a pseudo-header. 912*4882a593Smuzhiyun * 913*4882a593Smuzhiyun * This helper works in combination with **bpf_csum_diff**\ (), 914*4882a593Smuzhiyun * which does not update the checksum in-place, but offers more 915*4882a593Smuzhiyun * flexibility and can handle sizes larger than 2 or 4 for the 916*4882a593Smuzhiyun * checksum to update. 917*4882a593Smuzhiyun * 918*4882a593Smuzhiyun * A call to this helper is susceptible to change the underlying 919*4882a593Smuzhiyun * packet buffer. Therefore, at load time, all checks on pointers 920*4882a593Smuzhiyun * previously done by the verifier are invalidated and must be 921*4882a593Smuzhiyun * performed again, if the helper is used in combination with 922*4882a593Smuzhiyun * direct packet access. 923*4882a593Smuzhiyun * Return 924*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 925*4882a593Smuzhiyun * 926*4882a593Smuzhiyun * long bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index) 927*4882a593Smuzhiyun * Description 928*4882a593Smuzhiyun * This special helper is used to trigger a "tail call", or in 929*4882a593Smuzhiyun * other words, to jump into another eBPF program. The same stack 930*4882a593Smuzhiyun * frame is used (but values on stack and in registers for the 931*4882a593Smuzhiyun * caller are not accessible to the callee). This mechanism allows 932*4882a593Smuzhiyun * for program chaining, either for raising the maximum number of 933*4882a593Smuzhiyun * available eBPF instructions, or to execute given programs in 934*4882a593Smuzhiyun * conditional blocks. For security reasons, there is an upper 935*4882a593Smuzhiyun * limit to the number of successive tail calls that can be 936*4882a593Smuzhiyun * performed. 937*4882a593Smuzhiyun * 938*4882a593Smuzhiyun * Upon call of this helper, the program attempts to jump into a 939*4882a593Smuzhiyun * program referenced at index *index* in *prog_array_map*, a 940*4882a593Smuzhiyun * special map of type **BPF_MAP_TYPE_PROG_ARRAY**, and passes 941*4882a593Smuzhiyun * *ctx*, a pointer to the context. 942*4882a593Smuzhiyun * 943*4882a593Smuzhiyun * If the call succeeds, the kernel immediately runs the first 944*4882a593Smuzhiyun * instruction of the new program. This is not a function call, 945*4882a593Smuzhiyun * and it never returns to the previous program. If the call 946*4882a593Smuzhiyun * fails, then the helper has no effect, and the caller continues 947*4882a593Smuzhiyun * to run its subsequent instructions. A call can fail if the 948*4882a593Smuzhiyun * destination program for the jump does not exist (i.e. *index* 949*4882a593Smuzhiyun * is superior to the number of entries in *prog_array_map*), or 950*4882a593Smuzhiyun * if the maximum number of tail calls has been reached for this 951*4882a593Smuzhiyun * chain of programs. This limit is defined in the kernel by the 952*4882a593Smuzhiyun * macro **MAX_TAIL_CALL_CNT** (not accessible to user space), 953*4882a593Smuzhiyun * which is currently set to 32. 954*4882a593Smuzhiyun * Return 955*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 956*4882a593Smuzhiyun * 957*4882a593Smuzhiyun * long bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags) 958*4882a593Smuzhiyun * Description 959*4882a593Smuzhiyun * Clone and redirect the packet associated to *skb* to another 960*4882a593Smuzhiyun * net device of index *ifindex*. Both ingress and egress 961*4882a593Smuzhiyun * interfaces can be used for redirection. The **BPF_F_INGRESS** 962*4882a593Smuzhiyun * value in *flags* is used to make the distinction (ingress path 963*4882a593Smuzhiyun * is selected if the flag is present, egress path otherwise). 964*4882a593Smuzhiyun * This is the only flag supported for now. 965*4882a593Smuzhiyun * 966*4882a593Smuzhiyun * In comparison with **bpf_redirect**\ () helper, 967*4882a593Smuzhiyun * **bpf_clone_redirect**\ () has the associated cost of 968*4882a593Smuzhiyun * duplicating the packet buffer, but this can be executed out of 969*4882a593Smuzhiyun * the eBPF program. Conversely, **bpf_redirect**\ () is more 970*4882a593Smuzhiyun * efficient, but it is handled through an action code where the 971*4882a593Smuzhiyun * redirection happens only after the eBPF program has returned. 972*4882a593Smuzhiyun * 973*4882a593Smuzhiyun * A call to this helper is susceptible to change the underlying 974*4882a593Smuzhiyun * packet buffer. Therefore, at load time, all checks on pointers 975*4882a593Smuzhiyun * previously done by the verifier are invalidated and must be 976*4882a593Smuzhiyun * performed again, if the helper is used in combination with 977*4882a593Smuzhiyun * direct packet access. 978*4882a593Smuzhiyun * Return 979*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 980*4882a593Smuzhiyun * 981*4882a593Smuzhiyun * u64 bpf_get_current_pid_tgid(void) 982*4882a593Smuzhiyun * Return 983*4882a593Smuzhiyun * A 64-bit integer containing the current tgid and pid, and 984*4882a593Smuzhiyun * created as such: 985*4882a593Smuzhiyun * *current_task*\ **->tgid << 32 \|** 986*4882a593Smuzhiyun * *current_task*\ **->pid**. 987*4882a593Smuzhiyun * 988*4882a593Smuzhiyun * u64 bpf_get_current_uid_gid(void) 989*4882a593Smuzhiyun * Return 990*4882a593Smuzhiyun * A 64-bit integer containing the current GID and UID, and 991*4882a593Smuzhiyun * created as such: *current_gid* **<< 32 \|** *current_uid*. 992*4882a593Smuzhiyun * 993*4882a593Smuzhiyun * long bpf_get_current_comm(void *buf, u32 size_of_buf) 994*4882a593Smuzhiyun * Description 995*4882a593Smuzhiyun * Copy the **comm** attribute of the current task into *buf* of 996*4882a593Smuzhiyun * *size_of_buf*. The **comm** attribute contains the name of 997*4882a593Smuzhiyun * the executable (excluding the path) for the current task. The 998*4882a593Smuzhiyun * *size_of_buf* must be strictly positive. On success, the 999*4882a593Smuzhiyun * helper makes sure that the *buf* is NUL-terminated. On failure, 1000*4882a593Smuzhiyun * it is filled with zeroes. 1001*4882a593Smuzhiyun * Return 1002*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 1003*4882a593Smuzhiyun * 1004*4882a593Smuzhiyun * u32 bpf_get_cgroup_classid(struct sk_buff *skb) 1005*4882a593Smuzhiyun * Description 1006*4882a593Smuzhiyun * Retrieve the classid for the current task, i.e. for the net_cls 1007*4882a593Smuzhiyun * cgroup to which *skb* belongs. 1008*4882a593Smuzhiyun * 1009*4882a593Smuzhiyun * This helper can be used on TC egress path, but not on ingress. 1010*4882a593Smuzhiyun * 1011*4882a593Smuzhiyun * The net_cls cgroup provides an interface to tag network packets 1012*4882a593Smuzhiyun * based on a user-provided identifier for all traffic coming from 1013*4882a593Smuzhiyun * the tasks belonging to the related cgroup. See also the related 1014*4882a593Smuzhiyun * kernel documentation, available from the Linux sources in file 1015*4882a593Smuzhiyun * *Documentation/admin-guide/cgroup-v1/net_cls.rst*. 1016*4882a593Smuzhiyun * 1017*4882a593Smuzhiyun * The Linux kernel has two versions for cgroups: there are 1018*4882a593Smuzhiyun * cgroups v1 and cgroups v2. Both are available to users, who can 1019*4882a593Smuzhiyun * use a mixture of them, but note that the net_cls cgroup is for 1020*4882a593Smuzhiyun * cgroup v1 only. This makes it incompatible with BPF programs 1021*4882a593Smuzhiyun * run on cgroups, which is a cgroup-v2-only feature (a socket can 1022*4882a593Smuzhiyun * only hold data for one version of cgroups at a time). 1023*4882a593Smuzhiyun * 1024*4882a593Smuzhiyun * This helper is only available is the kernel was compiled with 1025*4882a593Smuzhiyun * the **CONFIG_CGROUP_NET_CLASSID** configuration option set to 1026*4882a593Smuzhiyun * "**y**" or to "**m**". 1027*4882a593Smuzhiyun * Return 1028*4882a593Smuzhiyun * The classid, or 0 for the default unconfigured classid. 1029*4882a593Smuzhiyun * 1030*4882a593Smuzhiyun * long bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) 1031*4882a593Smuzhiyun * Description 1032*4882a593Smuzhiyun * Push a *vlan_tci* (VLAN tag control information) of protocol 1033*4882a593Smuzhiyun * *vlan_proto* to the packet associated to *skb*, then update 1034*4882a593Smuzhiyun * the checksum. Note that if *vlan_proto* is different from 1035*4882a593Smuzhiyun * **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to 1036*4882a593Smuzhiyun * be **ETH_P_8021Q**. 1037*4882a593Smuzhiyun * 1038*4882a593Smuzhiyun * A call to this helper is susceptible to change the underlying 1039*4882a593Smuzhiyun * packet buffer. Therefore, at load time, all checks on pointers 1040*4882a593Smuzhiyun * previously done by the verifier are invalidated and must be 1041*4882a593Smuzhiyun * performed again, if the helper is used in combination with 1042*4882a593Smuzhiyun * direct packet access. 1043*4882a593Smuzhiyun * Return 1044*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 1045*4882a593Smuzhiyun * 1046*4882a593Smuzhiyun * long bpf_skb_vlan_pop(struct sk_buff *skb) 1047*4882a593Smuzhiyun * Description 1048*4882a593Smuzhiyun * Pop a VLAN header from the packet associated to *skb*. 1049*4882a593Smuzhiyun * 1050*4882a593Smuzhiyun * A call to this helper is susceptible to change the underlying 1051*4882a593Smuzhiyun * packet buffer. Therefore, at load time, all checks on pointers 1052*4882a593Smuzhiyun * previously done by the verifier are invalidated and must be 1053*4882a593Smuzhiyun * performed again, if the helper is used in combination with 1054*4882a593Smuzhiyun * direct packet access. 1055*4882a593Smuzhiyun * Return 1056*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 1057*4882a593Smuzhiyun * 1058*4882a593Smuzhiyun * long bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) 1059*4882a593Smuzhiyun * Description 1060*4882a593Smuzhiyun * Get tunnel metadata. This helper takes a pointer *key* to an 1061*4882a593Smuzhiyun * empty **struct bpf_tunnel_key** of **size**, that will be 1062*4882a593Smuzhiyun * filled with tunnel metadata for the packet associated to *skb*. 1063*4882a593Smuzhiyun * The *flags* can be set to **BPF_F_TUNINFO_IPV6**, which 1064*4882a593Smuzhiyun * indicates that the tunnel is based on IPv6 protocol instead of 1065*4882a593Smuzhiyun * IPv4. 1066*4882a593Smuzhiyun * 1067*4882a593Smuzhiyun * The **struct bpf_tunnel_key** is an object that generalizes the 1068*4882a593Smuzhiyun * principal parameters used by various tunneling protocols into a 1069*4882a593Smuzhiyun * single struct. This way, it can be used to easily make a 1070*4882a593Smuzhiyun * decision based on the contents of the encapsulation header, 1071*4882a593Smuzhiyun * "summarized" in this struct. In particular, it holds the IP 1072*4882a593Smuzhiyun * address of the remote end (IPv4 or IPv6, depending on the case) 1073*4882a593Smuzhiyun * in *key*\ **->remote_ipv4** or *key*\ **->remote_ipv6**. Also, 1074*4882a593Smuzhiyun * this struct exposes the *key*\ **->tunnel_id**, which is 1075*4882a593Smuzhiyun * generally mapped to a VNI (Virtual Network Identifier), making 1076*4882a593Smuzhiyun * it programmable together with the **bpf_skb_set_tunnel_key**\ 1077*4882a593Smuzhiyun * () helper. 1078*4882a593Smuzhiyun * 1079*4882a593Smuzhiyun * Let's imagine that the following code is part of a program 1080*4882a593Smuzhiyun * attached to the TC ingress interface, on one end of a GRE 1081*4882a593Smuzhiyun * tunnel, and is supposed to filter out all messages coming from 1082*4882a593Smuzhiyun * remote ends with IPv4 address other than 10.0.0.1: 1083*4882a593Smuzhiyun * 1084*4882a593Smuzhiyun * :: 1085*4882a593Smuzhiyun * 1086*4882a593Smuzhiyun * int ret; 1087*4882a593Smuzhiyun * struct bpf_tunnel_key key = {}; 1088*4882a593Smuzhiyun * 1089*4882a593Smuzhiyun * ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); 1090*4882a593Smuzhiyun * if (ret < 0) 1091*4882a593Smuzhiyun * return TC_ACT_SHOT; // drop packet 1092*4882a593Smuzhiyun * 1093*4882a593Smuzhiyun * if (key.remote_ipv4 != 0x0a000001) 1094*4882a593Smuzhiyun * return TC_ACT_SHOT; // drop packet 1095*4882a593Smuzhiyun * 1096*4882a593Smuzhiyun * return TC_ACT_OK; // accept packet 1097*4882a593Smuzhiyun * 1098*4882a593Smuzhiyun * This interface can also be used with all encapsulation devices 1099*4882a593Smuzhiyun * that can operate in "collect metadata" mode: instead of having 1100*4882a593Smuzhiyun * one network device per specific configuration, the "collect 1101*4882a593Smuzhiyun * metadata" mode only requires a single device where the 1102*4882a593Smuzhiyun * configuration can be extracted from this helper. 1103*4882a593Smuzhiyun * 1104*4882a593Smuzhiyun * This can be used together with various tunnels such as VXLan, 1105*4882a593Smuzhiyun * Geneve, GRE or IP in IP (IPIP). 1106*4882a593Smuzhiyun * Return 1107*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 1108*4882a593Smuzhiyun * 1109*4882a593Smuzhiyun * long bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) 1110*4882a593Smuzhiyun * Description 1111*4882a593Smuzhiyun * Populate tunnel metadata for packet associated to *skb.* The 1112*4882a593Smuzhiyun * tunnel metadata is set to the contents of *key*, of *size*. The 1113*4882a593Smuzhiyun * *flags* can be set to a combination of the following values: 1114*4882a593Smuzhiyun * 1115*4882a593Smuzhiyun * **BPF_F_TUNINFO_IPV6** 1116*4882a593Smuzhiyun * Indicate that the tunnel is based on IPv6 protocol 1117*4882a593Smuzhiyun * instead of IPv4. 1118*4882a593Smuzhiyun * **BPF_F_ZERO_CSUM_TX** 1119*4882a593Smuzhiyun * For IPv4 packets, add a flag to tunnel metadata 1120*4882a593Smuzhiyun * indicating that checksum computation should be skipped 1121*4882a593Smuzhiyun * and checksum set to zeroes. 1122*4882a593Smuzhiyun * **BPF_F_DONT_FRAGMENT** 1123*4882a593Smuzhiyun * Add a flag to tunnel metadata indicating that the 1124*4882a593Smuzhiyun * packet should not be fragmented. 1125*4882a593Smuzhiyun * **BPF_F_SEQ_NUMBER** 1126*4882a593Smuzhiyun * Add a flag to tunnel metadata indicating that a 1127*4882a593Smuzhiyun * sequence number should be added to tunnel header before 1128*4882a593Smuzhiyun * sending the packet. This flag was added for GRE 1129*4882a593Smuzhiyun * encapsulation, but might be used with other protocols 1130*4882a593Smuzhiyun * as well in the future. 1131*4882a593Smuzhiyun * 1132*4882a593Smuzhiyun * Here is a typical usage on the transmit path: 1133*4882a593Smuzhiyun * 1134*4882a593Smuzhiyun * :: 1135*4882a593Smuzhiyun * 1136*4882a593Smuzhiyun * struct bpf_tunnel_key key; 1137*4882a593Smuzhiyun * populate key ... 1138*4882a593Smuzhiyun * bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0); 1139*4882a593Smuzhiyun * bpf_clone_redirect(skb, vxlan_dev_ifindex, 0); 1140*4882a593Smuzhiyun * 1141*4882a593Smuzhiyun * See also the description of the **bpf_skb_get_tunnel_key**\ () 1142*4882a593Smuzhiyun * helper for additional information. 1143*4882a593Smuzhiyun * Return 1144*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 1145*4882a593Smuzhiyun * 1146*4882a593Smuzhiyun * u64 bpf_perf_event_read(struct bpf_map *map, u64 flags) 1147*4882a593Smuzhiyun * Description 1148*4882a593Smuzhiyun * Read the value of a perf event counter. This helper relies on a 1149*4882a593Smuzhiyun * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of 1150*4882a593Smuzhiyun * the perf event counter is selected when *map* is updated with 1151*4882a593Smuzhiyun * perf event file descriptors. The *map* is an array whose size 1152*4882a593Smuzhiyun * is the number of available CPUs, and each cell contains a value 1153*4882a593Smuzhiyun * relative to one CPU. The value to retrieve is indicated by 1154*4882a593Smuzhiyun * *flags*, that contains the index of the CPU to look up, masked 1155*4882a593Smuzhiyun * with **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to 1156*4882a593Smuzhiyun * **BPF_F_CURRENT_CPU** to indicate that the value for the 1157*4882a593Smuzhiyun * current CPU should be retrieved. 1158*4882a593Smuzhiyun * 1159*4882a593Smuzhiyun * Note that before Linux 4.13, only hardware perf event can be 1160*4882a593Smuzhiyun * retrieved. 1161*4882a593Smuzhiyun * 1162*4882a593Smuzhiyun * Also, be aware that the newer helper 1163*4882a593Smuzhiyun * **bpf_perf_event_read_value**\ () is recommended over 1164*4882a593Smuzhiyun * **bpf_perf_event_read**\ () in general. The latter has some ABI 1165*4882a593Smuzhiyun * quirks where error and counter value are used as a return code 1166*4882a593Smuzhiyun * (which is wrong to do since ranges may overlap). This issue is 1167*4882a593Smuzhiyun * fixed with **bpf_perf_event_read_value**\ (), which at the same 1168*4882a593Smuzhiyun * time provides more features over the **bpf_perf_event_read**\ 1169*4882a593Smuzhiyun * () interface. Please refer to the description of 1170*4882a593Smuzhiyun * **bpf_perf_event_read_value**\ () for details. 1171*4882a593Smuzhiyun * Return 1172*4882a593Smuzhiyun * The value of the perf event counter read from the map, or a 1173*4882a593Smuzhiyun * negative error code in case of failure. 1174*4882a593Smuzhiyun * 1175*4882a593Smuzhiyun * long bpf_redirect(u32 ifindex, u64 flags) 1176*4882a593Smuzhiyun * Description 1177*4882a593Smuzhiyun * Redirect the packet to another net device of index *ifindex*. 1178*4882a593Smuzhiyun * This helper is somewhat similar to **bpf_clone_redirect**\ 1179*4882a593Smuzhiyun * (), except that the packet is not cloned, which provides 1180*4882a593Smuzhiyun * increased performance. 1181*4882a593Smuzhiyun * 1182*4882a593Smuzhiyun * Except for XDP, both ingress and egress interfaces can be used 1183*4882a593Smuzhiyun * for redirection. The **BPF_F_INGRESS** value in *flags* is used 1184*4882a593Smuzhiyun * to make the distinction (ingress path is selected if the flag 1185*4882a593Smuzhiyun * is present, egress path otherwise). Currently, XDP only 1186*4882a593Smuzhiyun * supports redirection to the egress interface, and accepts no 1187*4882a593Smuzhiyun * flag at all. 1188*4882a593Smuzhiyun * 1189*4882a593Smuzhiyun * The same effect can also be attained with the more generic 1190*4882a593Smuzhiyun * **bpf_redirect_map**\ (), which uses a BPF map to store the 1191*4882a593Smuzhiyun * redirect target instead of providing it directly to the helper. 1192*4882a593Smuzhiyun * Return 1193*4882a593Smuzhiyun * For XDP, the helper returns **XDP_REDIRECT** on success or 1194*4882a593Smuzhiyun * **XDP_ABORTED** on error. For other program types, the values 1195*4882a593Smuzhiyun * are **TC_ACT_REDIRECT** on success or **TC_ACT_SHOT** on 1196*4882a593Smuzhiyun * error. 1197*4882a593Smuzhiyun * 1198*4882a593Smuzhiyun * u32 bpf_get_route_realm(struct sk_buff *skb) 1199*4882a593Smuzhiyun * Description 1200*4882a593Smuzhiyun * Retrieve the realm or the route, that is to say the 1201*4882a593Smuzhiyun * **tclassid** field of the destination for the *skb*. The 1202*4882a593Smuzhiyun * identifier retrieved is a user-provided tag, similar to the 1203*4882a593Smuzhiyun * one used with the net_cls cgroup (see description for 1204*4882a593Smuzhiyun * **bpf_get_cgroup_classid**\ () helper), but here this tag is 1205*4882a593Smuzhiyun * held by a route (a destination entry), not by a task. 1206*4882a593Smuzhiyun * 1207*4882a593Smuzhiyun * Retrieving this identifier works with the clsact TC egress hook 1208*4882a593Smuzhiyun * (see also **tc-bpf(8)**), or alternatively on conventional 1209*4882a593Smuzhiyun * classful egress qdiscs, but not on TC ingress path. In case of 1210*4882a593Smuzhiyun * clsact TC egress hook, this has the advantage that, internally, 1211*4882a593Smuzhiyun * the destination entry has not been dropped yet in the transmit 1212*4882a593Smuzhiyun * path. Therefore, the destination entry does not need to be 1213*4882a593Smuzhiyun * artificially held via **netif_keep_dst**\ () for a classful 1214*4882a593Smuzhiyun * qdisc until the *skb* is freed. 1215*4882a593Smuzhiyun * 1216*4882a593Smuzhiyun * This helper is available only if the kernel was compiled with 1217*4882a593Smuzhiyun * **CONFIG_IP_ROUTE_CLASSID** configuration option. 1218*4882a593Smuzhiyun * Return 1219*4882a593Smuzhiyun * The realm of the route for the packet associated to *skb*, or 0 1220*4882a593Smuzhiyun * if none was found. 1221*4882a593Smuzhiyun * 1222*4882a593Smuzhiyun * long bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) 1223*4882a593Smuzhiyun * Description 1224*4882a593Smuzhiyun * Write raw *data* blob into a special BPF perf event held by 1225*4882a593Smuzhiyun * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf 1226*4882a593Smuzhiyun * event must have the following attributes: **PERF_SAMPLE_RAW** 1227*4882a593Smuzhiyun * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and 1228*4882a593Smuzhiyun * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. 1229*4882a593Smuzhiyun * 1230*4882a593Smuzhiyun * The *flags* are used to indicate the index in *map* for which 1231*4882a593Smuzhiyun * the value must be put, masked with **BPF_F_INDEX_MASK**. 1232*4882a593Smuzhiyun * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** 1233*4882a593Smuzhiyun * to indicate that the index of the current CPU core should be 1234*4882a593Smuzhiyun * used. 1235*4882a593Smuzhiyun * 1236*4882a593Smuzhiyun * The value to write, of *size*, is passed through eBPF stack and 1237*4882a593Smuzhiyun * pointed by *data*. 1238*4882a593Smuzhiyun * 1239*4882a593Smuzhiyun * The context of the program *ctx* needs also be passed to the 1240*4882a593Smuzhiyun * helper. 1241*4882a593Smuzhiyun * 1242*4882a593Smuzhiyun * On user space, a program willing to read the values needs to 1243*4882a593Smuzhiyun * call **perf_event_open**\ () on the perf event (either for 1244*4882a593Smuzhiyun * one or for all CPUs) and to store the file descriptor into the 1245*4882a593Smuzhiyun * *map*. This must be done before the eBPF program can send data 1246*4882a593Smuzhiyun * into it. An example is available in file 1247*4882a593Smuzhiyun * *samples/bpf/trace_output_user.c* in the Linux kernel source 1248*4882a593Smuzhiyun * tree (the eBPF program counterpart is in 1249*4882a593Smuzhiyun * *samples/bpf/trace_output_kern.c*). 1250*4882a593Smuzhiyun * 1251*4882a593Smuzhiyun * **bpf_perf_event_output**\ () achieves better performance 1252*4882a593Smuzhiyun * than **bpf_trace_printk**\ () for sharing data with user 1253*4882a593Smuzhiyun * space, and is much better suitable for streaming data from eBPF 1254*4882a593Smuzhiyun * programs. 1255*4882a593Smuzhiyun * 1256*4882a593Smuzhiyun * Note that this helper is not restricted to tracing use cases 1257*4882a593Smuzhiyun * and can be used with programs attached to TC or XDP as well, 1258*4882a593Smuzhiyun * where it allows for passing data to user space listeners. Data 1259*4882a593Smuzhiyun * can be: 1260*4882a593Smuzhiyun * 1261*4882a593Smuzhiyun * * Only custom structs, 1262*4882a593Smuzhiyun * * Only the packet payload, or 1263*4882a593Smuzhiyun * * A combination of both. 1264*4882a593Smuzhiyun * Return 1265*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 1266*4882a593Smuzhiyun * 1267*4882a593Smuzhiyun * long bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len) 1268*4882a593Smuzhiyun * Description 1269*4882a593Smuzhiyun * This helper was provided as an easy way to load data from a 1270*4882a593Smuzhiyun * packet. It can be used to load *len* bytes from *offset* from 1271*4882a593Smuzhiyun * the packet associated to *skb*, into the buffer pointed by 1272*4882a593Smuzhiyun * *to*. 1273*4882a593Smuzhiyun * 1274*4882a593Smuzhiyun * Since Linux 4.7, usage of this helper has mostly been replaced 1275*4882a593Smuzhiyun * by "direct packet access", enabling packet data to be 1276*4882a593Smuzhiyun * manipulated with *skb*\ **->data** and *skb*\ **->data_end** 1277*4882a593Smuzhiyun * pointing respectively to the first byte of packet data and to 1278*4882a593Smuzhiyun * the byte after the last byte of packet data. However, it 1279*4882a593Smuzhiyun * remains useful if one wishes to read large quantities of data 1280*4882a593Smuzhiyun * at once from a packet into the eBPF stack. 1281*4882a593Smuzhiyun * Return 1282*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 1283*4882a593Smuzhiyun * 1284*4882a593Smuzhiyun * long bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags) 1285*4882a593Smuzhiyun * Description 1286*4882a593Smuzhiyun * Walk a user or a kernel stack and return its id. To achieve 1287*4882a593Smuzhiyun * this, the helper needs *ctx*, which is a pointer to the context 1288*4882a593Smuzhiyun * on which the tracing program is executed, and a pointer to a 1289*4882a593Smuzhiyun * *map* of type **BPF_MAP_TYPE_STACK_TRACE**. 1290*4882a593Smuzhiyun * 1291*4882a593Smuzhiyun * The last argument, *flags*, holds the number of stack frames to 1292*4882a593Smuzhiyun * skip (from 0 to 255), masked with 1293*4882a593Smuzhiyun * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set 1294*4882a593Smuzhiyun * a combination of the following flags: 1295*4882a593Smuzhiyun * 1296*4882a593Smuzhiyun * **BPF_F_USER_STACK** 1297*4882a593Smuzhiyun * Collect a user space stack instead of a kernel stack. 1298*4882a593Smuzhiyun * **BPF_F_FAST_STACK_CMP** 1299*4882a593Smuzhiyun * Compare stacks by hash only. 1300*4882a593Smuzhiyun * **BPF_F_REUSE_STACKID** 1301*4882a593Smuzhiyun * If two different stacks hash into the same *stackid*, 1302*4882a593Smuzhiyun * discard the old one. 1303*4882a593Smuzhiyun * 1304*4882a593Smuzhiyun * The stack id retrieved is a 32 bit long integer handle which 1305*4882a593Smuzhiyun * can be further combined with other data (including other stack 1306*4882a593Smuzhiyun * ids) and used as a key into maps. This can be useful for 1307*4882a593Smuzhiyun * generating a variety of graphs (such as flame graphs or off-cpu 1308*4882a593Smuzhiyun * graphs). 1309*4882a593Smuzhiyun * 1310*4882a593Smuzhiyun * For walking a stack, this helper is an improvement over 1311*4882a593Smuzhiyun * **bpf_probe_read**\ (), which can be used with unrolled loops 1312*4882a593Smuzhiyun * but is not efficient and consumes a lot of eBPF instructions. 1313*4882a593Smuzhiyun * Instead, **bpf_get_stackid**\ () can collect up to 1314*4882a593Smuzhiyun * **PERF_MAX_STACK_DEPTH** both kernel and user frames. Note that 1315*4882a593Smuzhiyun * this limit can be controlled with the **sysctl** program, and 1316*4882a593Smuzhiyun * that it should be manually increased in order to profile long 1317*4882a593Smuzhiyun * user stacks (such as stacks for Java programs). To do so, use: 1318*4882a593Smuzhiyun * 1319*4882a593Smuzhiyun * :: 1320*4882a593Smuzhiyun * 1321*4882a593Smuzhiyun * # sysctl kernel.perf_event_max_stack=<new value> 1322*4882a593Smuzhiyun * Return 1323*4882a593Smuzhiyun * The positive or null stack id on success, or a negative error 1324*4882a593Smuzhiyun * in case of failure. 1325*4882a593Smuzhiyun * 1326*4882a593Smuzhiyun * s64 bpf_csum_diff(__be32 *from, u32 from_size, __be32 *to, u32 to_size, __wsum seed) 1327*4882a593Smuzhiyun * Description 1328*4882a593Smuzhiyun * Compute a checksum difference, from the raw buffer pointed by 1329*4882a593Smuzhiyun * *from*, of length *from_size* (that must be a multiple of 4), 1330*4882a593Smuzhiyun * towards the raw buffer pointed by *to*, of size *to_size* 1331*4882a593Smuzhiyun * (same remark). An optional *seed* can be added to the value 1332*4882a593Smuzhiyun * (this can be cascaded, the seed may come from a previous call 1333*4882a593Smuzhiyun * to the helper). 1334*4882a593Smuzhiyun * 1335*4882a593Smuzhiyun * This is flexible enough to be used in several ways: 1336*4882a593Smuzhiyun * 1337*4882a593Smuzhiyun * * With *from_size* == 0, *to_size* > 0 and *seed* set to 1338*4882a593Smuzhiyun * checksum, it can be used when pushing new data. 1339*4882a593Smuzhiyun * * With *from_size* > 0, *to_size* == 0 and *seed* set to 1340*4882a593Smuzhiyun * checksum, it can be used when removing data from a packet. 1341*4882a593Smuzhiyun * * With *from_size* > 0, *to_size* > 0 and *seed* set to 0, it 1342*4882a593Smuzhiyun * can be used to compute a diff. Note that *from_size* and 1343*4882a593Smuzhiyun * *to_size* do not need to be equal. 1344*4882a593Smuzhiyun * 1345*4882a593Smuzhiyun * This helper can be used in combination with 1346*4882a593Smuzhiyun * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ (), to 1347*4882a593Smuzhiyun * which one can feed in the difference computed with 1348*4882a593Smuzhiyun * **bpf_csum_diff**\ (). 1349*4882a593Smuzhiyun * Return 1350*4882a593Smuzhiyun * The checksum result, or a negative error code in case of 1351*4882a593Smuzhiyun * failure. 1352*4882a593Smuzhiyun * 1353*4882a593Smuzhiyun * long bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) 1354*4882a593Smuzhiyun * Description 1355*4882a593Smuzhiyun * Retrieve tunnel options metadata for the packet associated to 1356*4882a593Smuzhiyun * *skb*, and store the raw tunnel option data to the buffer *opt* 1357*4882a593Smuzhiyun * of *size*. 1358*4882a593Smuzhiyun * 1359*4882a593Smuzhiyun * This helper can be used with encapsulation devices that can 1360*4882a593Smuzhiyun * operate in "collect metadata" mode (please refer to the related 1361*4882a593Smuzhiyun * note in the description of **bpf_skb_get_tunnel_key**\ () for 1362*4882a593Smuzhiyun * more details). A particular example where this can be used is 1363*4882a593Smuzhiyun * in combination with the Geneve encapsulation protocol, where it 1364*4882a593Smuzhiyun * allows for pushing (with **bpf_skb_get_tunnel_opt**\ () helper) 1365*4882a593Smuzhiyun * and retrieving arbitrary TLVs (Type-Length-Value headers) from 1366*4882a593Smuzhiyun * the eBPF program. This allows for full customization of these 1367*4882a593Smuzhiyun * headers. 1368*4882a593Smuzhiyun * Return 1369*4882a593Smuzhiyun * The size of the option data retrieved. 1370*4882a593Smuzhiyun * 1371*4882a593Smuzhiyun * long bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) 1372*4882a593Smuzhiyun * Description 1373*4882a593Smuzhiyun * Set tunnel options metadata for the packet associated to *skb* 1374*4882a593Smuzhiyun * to the option data contained in the raw buffer *opt* of *size*. 1375*4882a593Smuzhiyun * 1376*4882a593Smuzhiyun * See also the description of the **bpf_skb_get_tunnel_opt**\ () 1377*4882a593Smuzhiyun * helper for additional information. 1378*4882a593Smuzhiyun * Return 1379*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 1380*4882a593Smuzhiyun * 1381*4882a593Smuzhiyun * long bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags) 1382*4882a593Smuzhiyun * Description 1383*4882a593Smuzhiyun * Change the protocol of the *skb* to *proto*. Currently 1384*4882a593Smuzhiyun * supported are transition from IPv4 to IPv6, and from IPv6 to 1385*4882a593Smuzhiyun * IPv4. The helper takes care of the groundwork for the 1386*4882a593Smuzhiyun * transition, including resizing the socket buffer. The eBPF 1387*4882a593Smuzhiyun * program is expected to fill the new headers, if any, via 1388*4882a593Smuzhiyun * **skb_store_bytes**\ () and to recompute the checksums with 1389*4882a593Smuzhiyun * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ 1390*4882a593Smuzhiyun * (). The main case for this helper is to perform NAT64 1391*4882a593Smuzhiyun * operations out of an eBPF program. 1392*4882a593Smuzhiyun * 1393*4882a593Smuzhiyun * Internally, the GSO type is marked as dodgy so that headers are 1394*4882a593Smuzhiyun * checked and segments are recalculated by the GSO/GRO engine. 1395*4882a593Smuzhiyun * The size for GSO target is adapted as well. 1396*4882a593Smuzhiyun * 1397*4882a593Smuzhiyun * All values for *flags* are reserved for future usage, and must 1398*4882a593Smuzhiyun * be left at zero. 1399*4882a593Smuzhiyun * 1400*4882a593Smuzhiyun * A call to this helper is susceptible to change the underlying 1401*4882a593Smuzhiyun * packet buffer. Therefore, at load time, all checks on pointers 1402*4882a593Smuzhiyun * previously done by the verifier are invalidated and must be 1403*4882a593Smuzhiyun * performed again, if the helper is used in combination with 1404*4882a593Smuzhiyun * direct packet access. 1405*4882a593Smuzhiyun * Return 1406*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 1407*4882a593Smuzhiyun * 1408*4882a593Smuzhiyun * long bpf_skb_change_type(struct sk_buff *skb, u32 type) 1409*4882a593Smuzhiyun * Description 1410*4882a593Smuzhiyun * Change the packet type for the packet associated to *skb*. This 1411*4882a593Smuzhiyun * comes down to setting *skb*\ **->pkt_type** to *type*, except 1412*4882a593Smuzhiyun * the eBPF program does not have a write access to *skb*\ 1413*4882a593Smuzhiyun * **->pkt_type** beside this helper. Using a helper here allows 1414*4882a593Smuzhiyun * for graceful handling of errors. 1415*4882a593Smuzhiyun * 1416*4882a593Smuzhiyun * The major use case is to change incoming *skb*s to 1417*4882a593Smuzhiyun * **PACKET_HOST** in a programmatic way instead of having to 1418*4882a593Smuzhiyun * recirculate via **redirect**\ (..., **BPF_F_INGRESS**), for 1419*4882a593Smuzhiyun * example. 1420*4882a593Smuzhiyun * 1421*4882a593Smuzhiyun * Note that *type* only allows certain values. At this time, they 1422*4882a593Smuzhiyun * are: 1423*4882a593Smuzhiyun * 1424*4882a593Smuzhiyun * **PACKET_HOST** 1425*4882a593Smuzhiyun * Packet is for us. 1426*4882a593Smuzhiyun * **PACKET_BROADCAST** 1427*4882a593Smuzhiyun * Send packet to all. 1428*4882a593Smuzhiyun * **PACKET_MULTICAST** 1429*4882a593Smuzhiyun * Send packet to group. 1430*4882a593Smuzhiyun * **PACKET_OTHERHOST** 1431*4882a593Smuzhiyun * Send packet to someone else. 1432*4882a593Smuzhiyun * Return 1433*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 1434*4882a593Smuzhiyun * 1435*4882a593Smuzhiyun * long bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index) 1436*4882a593Smuzhiyun * Description 1437*4882a593Smuzhiyun * Check whether *skb* is a descendant of the cgroup2 held by 1438*4882a593Smuzhiyun * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. 1439*4882a593Smuzhiyun * Return 1440*4882a593Smuzhiyun * The return value depends on the result of the test, and can be: 1441*4882a593Smuzhiyun * 1442*4882a593Smuzhiyun * * 0, if the *skb* failed the cgroup2 descendant test. 1443*4882a593Smuzhiyun * * 1, if the *skb* succeeded the cgroup2 descendant test. 1444*4882a593Smuzhiyun * * A negative error code, if an error occurred. 1445*4882a593Smuzhiyun * 1446*4882a593Smuzhiyun * u32 bpf_get_hash_recalc(struct sk_buff *skb) 1447*4882a593Smuzhiyun * Description 1448*4882a593Smuzhiyun * Retrieve the hash of the packet, *skb*\ **->hash**. If it is 1449*4882a593Smuzhiyun * not set, in particular if the hash was cleared due to mangling, 1450*4882a593Smuzhiyun * recompute this hash. Later accesses to the hash can be done 1451*4882a593Smuzhiyun * directly with *skb*\ **->hash**. 1452*4882a593Smuzhiyun * 1453*4882a593Smuzhiyun * Calling **bpf_set_hash_invalid**\ (), changing a packet 1454*4882a593Smuzhiyun * prototype with **bpf_skb_change_proto**\ (), or calling 1455*4882a593Smuzhiyun * **bpf_skb_store_bytes**\ () with the 1456*4882a593Smuzhiyun * **BPF_F_INVALIDATE_HASH** are actions susceptible to clear 1457*4882a593Smuzhiyun * the hash and to trigger a new computation for the next call to 1458*4882a593Smuzhiyun * **bpf_get_hash_recalc**\ (). 1459*4882a593Smuzhiyun * Return 1460*4882a593Smuzhiyun * The 32-bit hash. 1461*4882a593Smuzhiyun * 1462*4882a593Smuzhiyun * u64 bpf_get_current_task(void) 1463*4882a593Smuzhiyun * Return 1464*4882a593Smuzhiyun * A pointer to the current task struct. 1465*4882a593Smuzhiyun * 1466*4882a593Smuzhiyun * long bpf_probe_write_user(void *dst, const void *src, u32 len) 1467*4882a593Smuzhiyun * Description 1468*4882a593Smuzhiyun * Attempt in a safe way to write *len* bytes from the buffer 1469*4882a593Smuzhiyun * *src* to *dst* in memory. It only works for threads that are in 1470*4882a593Smuzhiyun * user context, and *dst* must be a valid user space address. 1471*4882a593Smuzhiyun * 1472*4882a593Smuzhiyun * This helper should not be used to implement any kind of 1473*4882a593Smuzhiyun * security mechanism because of TOC-TOU attacks, but rather to 1474*4882a593Smuzhiyun * debug, divert, and manipulate execution of semi-cooperative 1475*4882a593Smuzhiyun * processes. 1476*4882a593Smuzhiyun * 1477*4882a593Smuzhiyun * Keep in mind that this feature is meant for experiments, and it 1478*4882a593Smuzhiyun * has a risk of crashing the system and running programs. 1479*4882a593Smuzhiyun * Therefore, when an eBPF program using this helper is attached, 1480*4882a593Smuzhiyun * a warning including PID and process name is printed to kernel 1481*4882a593Smuzhiyun * logs. 1482*4882a593Smuzhiyun * Return 1483*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 1484*4882a593Smuzhiyun * 1485*4882a593Smuzhiyun * long bpf_current_task_under_cgroup(struct bpf_map *map, u32 index) 1486*4882a593Smuzhiyun * Description 1487*4882a593Smuzhiyun * Check whether the probe is being run is the context of a given 1488*4882a593Smuzhiyun * subset of the cgroup2 hierarchy. The cgroup2 to test is held by 1489*4882a593Smuzhiyun * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. 1490*4882a593Smuzhiyun * Return 1491*4882a593Smuzhiyun * The return value depends on the result of the test, and can be: 1492*4882a593Smuzhiyun * 1493*4882a593Smuzhiyun * * 1, if current task belongs to the cgroup2. 1494*4882a593Smuzhiyun * * 0, if current task does not belong to the cgroup2. 1495*4882a593Smuzhiyun * * A negative error code, if an error occurred. 1496*4882a593Smuzhiyun * 1497*4882a593Smuzhiyun * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) 1498*4882a593Smuzhiyun * Description 1499*4882a593Smuzhiyun * Resize (trim or grow) the packet associated to *skb* to the 1500*4882a593Smuzhiyun * new *len*. The *flags* are reserved for future usage, and must 1501*4882a593Smuzhiyun * be left at zero. 1502*4882a593Smuzhiyun * 1503*4882a593Smuzhiyun * The basic idea is that the helper performs the needed work to 1504*4882a593Smuzhiyun * change the size of the packet, then the eBPF program rewrites 1505*4882a593Smuzhiyun * the rest via helpers like **bpf_skb_store_bytes**\ (), 1506*4882a593Smuzhiyun * **bpf_l3_csum_replace**\ (), **bpf_l3_csum_replace**\ () 1507*4882a593Smuzhiyun * and others. This helper is a slow path utility intended for 1508*4882a593Smuzhiyun * replies with control messages. And because it is targeted for 1509*4882a593Smuzhiyun * slow path, the helper itself can afford to be slow: it 1510*4882a593Smuzhiyun * implicitly linearizes, unclones and drops offloads from the 1511*4882a593Smuzhiyun * *skb*. 1512*4882a593Smuzhiyun * 1513*4882a593Smuzhiyun * A call to this helper is susceptible to change the underlying 1514*4882a593Smuzhiyun * packet buffer. Therefore, at load time, all checks on pointers 1515*4882a593Smuzhiyun * previously done by the verifier are invalidated and must be 1516*4882a593Smuzhiyun * performed again, if the helper is used in combination with 1517*4882a593Smuzhiyun * direct packet access. 1518*4882a593Smuzhiyun * Return 1519*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 1520*4882a593Smuzhiyun * 1521*4882a593Smuzhiyun * long bpf_skb_pull_data(struct sk_buff *skb, u32 len) 1522*4882a593Smuzhiyun * Description 1523*4882a593Smuzhiyun * Pull in non-linear data in case the *skb* is non-linear and not 1524*4882a593Smuzhiyun * all of *len* are part of the linear section. Make *len* bytes 1525*4882a593Smuzhiyun * from *skb* readable and writable. If a zero value is passed for 1526*4882a593Smuzhiyun * *len*, then the whole length of the *skb* is pulled. 1527*4882a593Smuzhiyun * 1528*4882a593Smuzhiyun * This helper is only needed for reading and writing with direct 1529*4882a593Smuzhiyun * packet access. 1530*4882a593Smuzhiyun * 1531*4882a593Smuzhiyun * For direct packet access, testing that offsets to access 1532*4882a593Smuzhiyun * are within packet boundaries (test on *skb*\ **->data_end**) is 1533*4882a593Smuzhiyun * susceptible to fail if offsets are invalid, or if the requested 1534*4882a593Smuzhiyun * data is in non-linear parts of the *skb*. On failure the 1535*4882a593Smuzhiyun * program can just bail out, or in the case of a non-linear 1536*4882a593Smuzhiyun * buffer, use a helper to make the data available. The 1537*4882a593Smuzhiyun * **bpf_skb_load_bytes**\ () helper is a first solution to access 1538*4882a593Smuzhiyun * the data. Another one consists in using **bpf_skb_pull_data** 1539*4882a593Smuzhiyun * to pull in once the non-linear parts, then retesting and 1540*4882a593Smuzhiyun * eventually access the data. 1541*4882a593Smuzhiyun * 1542*4882a593Smuzhiyun * At the same time, this also makes sure the *skb* is uncloned, 1543*4882a593Smuzhiyun * which is a necessary condition for direct write. As this needs 1544*4882a593Smuzhiyun * to be an invariant for the write part only, the verifier 1545*4882a593Smuzhiyun * detects writes and adds a prologue that is calling 1546*4882a593Smuzhiyun * **bpf_skb_pull_data()** to effectively unclone the *skb* from 1547*4882a593Smuzhiyun * the very beginning in case it is indeed cloned. 1548*4882a593Smuzhiyun * 1549*4882a593Smuzhiyun * A call to this helper is susceptible to change the underlying 1550*4882a593Smuzhiyun * packet buffer. Therefore, at load time, all checks on pointers 1551*4882a593Smuzhiyun * previously done by the verifier are invalidated and must be 1552*4882a593Smuzhiyun * performed again, if the helper is used in combination with 1553*4882a593Smuzhiyun * direct packet access. 1554*4882a593Smuzhiyun * Return 1555*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 1556*4882a593Smuzhiyun * 1557*4882a593Smuzhiyun * s64 bpf_csum_update(struct sk_buff *skb, __wsum csum) 1558*4882a593Smuzhiyun * Description 1559*4882a593Smuzhiyun * Add the checksum *csum* into *skb*\ **->csum** in case the 1560*4882a593Smuzhiyun * driver has supplied a checksum for the entire packet into that 1561*4882a593Smuzhiyun * field. Return an error otherwise. This helper is intended to be 1562*4882a593Smuzhiyun * used in combination with **bpf_csum_diff**\ (), in particular 1563*4882a593Smuzhiyun * when the checksum needs to be updated after data has been 1564*4882a593Smuzhiyun * written into the packet through direct packet access. 1565*4882a593Smuzhiyun * Return 1566*4882a593Smuzhiyun * The checksum on success, or a negative error code in case of 1567*4882a593Smuzhiyun * failure. 1568*4882a593Smuzhiyun * 1569*4882a593Smuzhiyun * void bpf_set_hash_invalid(struct sk_buff *skb) 1570*4882a593Smuzhiyun * Description 1571*4882a593Smuzhiyun * Invalidate the current *skb*\ **->hash**. It can be used after 1572*4882a593Smuzhiyun * mangling on headers through direct packet access, in order to 1573*4882a593Smuzhiyun * indicate that the hash is outdated and to trigger a 1574*4882a593Smuzhiyun * recalculation the next time the kernel tries to access this 1575*4882a593Smuzhiyun * hash or when the **bpf_get_hash_recalc**\ () helper is called. 1576*4882a593Smuzhiyun * 1577*4882a593Smuzhiyun * long bpf_get_numa_node_id(void) 1578*4882a593Smuzhiyun * Description 1579*4882a593Smuzhiyun * Return the id of the current NUMA node. The primary use case 1580*4882a593Smuzhiyun * for this helper is the selection of sockets for the local NUMA 1581*4882a593Smuzhiyun * node, when the program is attached to sockets using the 1582*4882a593Smuzhiyun * **SO_ATTACH_REUSEPORT_EBPF** option (see also **socket(7)**), 1583*4882a593Smuzhiyun * but the helper is also available to other eBPF program types, 1584*4882a593Smuzhiyun * similarly to **bpf_get_smp_processor_id**\ (). 1585*4882a593Smuzhiyun * Return 1586*4882a593Smuzhiyun * The id of current NUMA node. 1587*4882a593Smuzhiyun * 1588*4882a593Smuzhiyun * long bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags) 1589*4882a593Smuzhiyun * Description 1590*4882a593Smuzhiyun * Grows headroom of packet associated to *skb* and adjusts the 1591*4882a593Smuzhiyun * offset of the MAC header accordingly, adding *len* bytes of 1592*4882a593Smuzhiyun * space. It automatically extends and reallocates memory as 1593*4882a593Smuzhiyun * required. 1594*4882a593Smuzhiyun * 1595*4882a593Smuzhiyun * This helper can be used on a layer 3 *skb* to push a MAC header 1596*4882a593Smuzhiyun * for redirection into a layer 2 device. 1597*4882a593Smuzhiyun * 1598*4882a593Smuzhiyun * All values for *flags* are reserved for future usage, and must 1599*4882a593Smuzhiyun * be left at zero. 1600*4882a593Smuzhiyun * 1601*4882a593Smuzhiyun * A call to this helper is susceptible to change the underlying 1602*4882a593Smuzhiyun * packet buffer. Therefore, at load time, all checks on pointers 1603*4882a593Smuzhiyun * previously done by the verifier are invalidated and must be 1604*4882a593Smuzhiyun * performed again, if the helper is used in combination with 1605*4882a593Smuzhiyun * direct packet access. 1606*4882a593Smuzhiyun * Return 1607*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 1608*4882a593Smuzhiyun * 1609*4882a593Smuzhiyun * long bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta) 1610*4882a593Smuzhiyun * Description 1611*4882a593Smuzhiyun * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that 1612*4882a593Smuzhiyun * it is possible to use a negative value for *delta*. This helper 1613*4882a593Smuzhiyun * can be used to prepare the packet for pushing or popping 1614*4882a593Smuzhiyun * headers. 1615*4882a593Smuzhiyun * 1616*4882a593Smuzhiyun * A call to this helper is susceptible to change the underlying 1617*4882a593Smuzhiyun * packet buffer. Therefore, at load time, all checks on pointers 1618*4882a593Smuzhiyun * previously done by the verifier are invalidated and must be 1619*4882a593Smuzhiyun * performed again, if the helper is used in combination with 1620*4882a593Smuzhiyun * direct packet access. 1621*4882a593Smuzhiyun * Return 1622*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 1623*4882a593Smuzhiyun * 1624*4882a593Smuzhiyun * long bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr) 1625*4882a593Smuzhiyun * Description 1626*4882a593Smuzhiyun * Copy a NUL terminated string from an unsafe kernel address 1627*4882a593Smuzhiyun * *unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for 1628*4882a593Smuzhiyun * more details. 1629*4882a593Smuzhiyun * 1630*4882a593Smuzhiyun * Generally, use **bpf_probe_read_user_str**\ () or 1631*4882a593Smuzhiyun * **bpf_probe_read_kernel_str**\ () instead. 1632*4882a593Smuzhiyun * Return 1633*4882a593Smuzhiyun * On success, the strictly positive length of the string, 1634*4882a593Smuzhiyun * including the trailing NUL character. On error, a negative 1635*4882a593Smuzhiyun * value. 1636*4882a593Smuzhiyun * 1637*4882a593Smuzhiyun * u64 bpf_get_socket_cookie(struct sk_buff *skb) 1638*4882a593Smuzhiyun * Description 1639*4882a593Smuzhiyun * If the **struct sk_buff** pointed by *skb* has a known socket, 1640*4882a593Smuzhiyun * retrieve the cookie (generated by the kernel) of this socket. 1641*4882a593Smuzhiyun * If no cookie has been set yet, generate a new cookie. Once 1642*4882a593Smuzhiyun * generated, the socket cookie remains stable for the life of the 1643*4882a593Smuzhiyun * socket. This helper can be useful for monitoring per socket 1644*4882a593Smuzhiyun * networking traffic statistics as it provides a global socket 1645*4882a593Smuzhiyun * identifier that can be assumed unique. 1646*4882a593Smuzhiyun * Return 1647*4882a593Smuzhiyun * A 8-byte long non-decreasing number on success, or 0 if the 1648*4882a593Smuzhiyun * socket field is missing inside *skb*. 1649*4882a593Smuzhiyun * 1650*4882a593Smuzhiyun * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx) 1651*4882a593Smuzhiyun * Description 1652*4882a593Smuzhiyun * Equivalent to bpf_get_socket_cookie() helper that accepts 1653*4882a593Smuzhiyun * *skb*, but gets socket from **struct bpf_sock_addr** context. 1654*4882a593Smuzhiyun * Return 1655*4882a593Smuzhiyun * A 8-byte long non-decreasing number. 1656*4882a593Smuzhiyun * 1657*4882a593Smuzhiyun * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx) 1658*4882a593Smuzhiyun * Description 1659*4882a593Smuzhiyun * Equivalent to **bpf_get_socket_cookie**\ () helper that accepts 1660*4882a593Smuzhiyun * *skb*, but gets socket from **struct bpf_sock_ops** context. 1661*4882a593Smuzhiyun * Return 1662*4882a593Smuzhiyun * A 8-byte long non-decreasing number. 1663*4882a593Smuzhiyun * 1664*4882a593Smuzhiyun * u32 bpf_get_socket_uid(struct sk_buff *skb) 1665*4882a593Smuzhiyun * Return 1666*4882a593Smuzhiyun * The owner UID of the socket associated to *skb*. If the socket 1667*4882a593Smuzhiyun * is **NULL**, or if it is not a full socket (i.e. if it is a 1668*4882a593Smuzhiyun * time-wait or a request socket instead), **overflowuid** value 1669*4882a593Smuzhiyun * is returned (note that **overflowuid** might also be the actual 1670*4882a593Smuzhiyun * UID value for the socket). 1671*4882a593Smuzhiyun * 1672*4882a593Smuzhiyun * long bpf_set_hash(struct sk_buff *skb, u32 hash) 1673*4882a593Smuzhiyun * Description 1674*4882a593Smuzhiyun * Set the full hash for *skb* (set the field *skb*\ **->hash**) 1675*4882a593Smuzhiyun * to value *hash*. 1676*4882a593Smuzhiyun * Return 1677*4882a593Smuzhiyun * 0 1678*4882a593Smuzhiyun * 1679*4882a593Smuzhiyun * long bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) 1680*4882a593Smuzhiyun * Description 1681*4882a593Smuzhiyun * Emulate a call to **setsockopt()** on the socket associated to 1682*4882a593Smuzhiyun * *bpf_socket*, which must be a full socket. The *level* at 1683*4882a593Smuzhiyun * which the option resides and the name *optname* of the option 1684*4882a593Smuzhiyun * must be specified, see **setsockopt(2)** for more information. 1685*4882a593Smuzhiyun * The option value of length *optlen* is pointed by *optval*. 1686*4882a593Smuzhiyun * 1687*4882a593Smuzhiyun * *bpf_socket* should be one of the following: 1688*4882a593Smuzhiyun * 1689*4882a593Smuzhiyun * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. 1690*4882a593Smuzhiyun * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT** 1691*4882a593Smuzhiyun * and **BPF_CGROUP_INET6_CONNECT**. 1692*4882a593Smuzhiyun * 1693*4882a593Smuzhiyun * This helper actually implements a subset of **setsockopt()**. 1694*4882a593Smuzhiyun * It supports the following *level*\ s: 1695*4882a593Smuzhiyun * 1696*4882a593Smuzhiyun * * **SOL_SOCKET**, which supports the following *optname*\ s: 1697*4882a593Smuzhiyun * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**, 1698*4882a593Smuzhiyun * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**, 1699*4882a593Smuzhiyun * **SO_BINDTODEVICE**, **SO_KEEPALIVE**. 1700*4882a593Smuzhiyun * * **IPPROTO_TCP**, which supports the following *optname*\ s: 1701*4882a593Smuzhiyun * **TCP_CONGESTION**, **TCP_BPF_IW**, 1702*4882a593Smuzhiyun * **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**, 1703*4882a593Smuzhiyun * **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**, 1704*4882a593Smuzhiyun * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**, **TCP_NOTSENT_LOWAT**. 1705*4882a593Smuzhiyun * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. 1706*4882a593Smuzhiyun * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. 1707*4882a593Smuzhiyun * Return 1708*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 1709*4882a593Smuzhiyun * 1710*4882a593Smuzhiyun * long bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags) 1711*4882a593Smuzhiyun * Description 1712*4882a593Smuzhiyun * Grow or shrink the room for data in the packet associated to 1713*4882a593Smuzhiyun * *skb* by *len_diff*, and according to the selected *mode*. 1714*4882a593Smuzhiyun * 1715*4882a593Smuzhiyun * By default, the helper will reset any offloaded checksum 1716*4882a593Smuzhiyun * indicator of the skb to CHECKSUM_NONE. This can be avoided 1717*4882a593Smuzhiyun * by the following flag: 1718*4882a593Smuzhiyun * 1719*4882a593Smuzhiyun * * **BPF_F_ADJ_ROOM_NO_CSUM_RESET**: Do not reset offloaded 1720*4882a593Smuzhiyun * checksum data of the skb to CHECKSUM_NONE. 1721*4882a593Smuzhiyun * 1722*4882a593Smuzhiyun * There are two supported modes at this time: 1723*4882a593Smuzhiyun * 1724*4882a593Smuzhiyun * * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer 1725*4882a593Smuzhiyun * (room space is added or removed below the layer 2 header). 1726*4882a593Smuzhiyun * 1727*4882a593Smuzhiyun * * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer 1728*4882a593Smuzhiyun * (room space is added or removed below the layer 3 header). 1729*4882a593Smuzhiyun * 1730*4882a593Smuzhiyun * The following flags are supported at this time: 1731*4882a593Smuzhiyun * 1732*4882a593Smuzhiyun * * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size. 1733*4882a593Smuzhiyun * Adjusting mss in this way is not allowed for datagrams. 1734*4882a593Smuzhiyun * 1735*4882a593Smuzhiyun * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4**, 1736*4882a593Smuzhiyun * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6**: 1737*4882a593Smuzhiyun * Any new space is reserved to hold a tunnel header. 1738*4882a593Smuzhiyun * Configure skb offsets and other fields accordingly. 1739*4882a593Smuzhiyun * 1740*4882a593Smuzhiyun * * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE**, 1741*4882a593Smuzhiyun * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP**: 1742*4882a593Smuzhiyun * Use with ENCAP_L3 flags to further specify the tunnel type. 1743*4882a593Smuzhiyun * 1744*4882a593Smuzhiyun * * **BPF_F_ADJ_ROOM_ENCAP_L2**\ (*len*): 1745*4882a593Smuzhiyun * Use with ENCAP_L3/L4 flags to further specify the tunnel 1746*4882a593Smuzhiyun * type; *len* is the length of the inner MAC header. 1747*4882a593Smuzhiyun * 1748*4882a593Smuzhiyun * A call to this helper is susceptible to change the underlying 1749*4882a593Smuzhiyun * packet buffer. Therefore, at load time, all checks on pointers 1750*4882a593Smuzhiyun * previously done by the verifier are invalidated and must be 1751*4882a593Smuzhiyun * performed again, if the helper is used in combination with 1752*4882a593Smuzhiyun * direct packet access. 1753*4882a593Smuzhiyun * Return 1754*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 1755*4882a593Smuzhiyun * 1756*4882a593Smuzhiyun * long bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags) 1757*4882a593Smuzhiyun * Description 1758*4882a593Smuzhiyun * Redirect the packet to the endpoint referenced by *map* at 1759*4882a593Smuzhiyun * index *key*. Depending on its type, this *map* can contain 1760*4882a593Smuzhiyun * references to net devices (for forwarding packets through other 1761*4882a593Smuzhiyun * ports), or to CPUs (for redirecting XDP frames to another CPU; 1762*4882a593Smuzhiyun * but this is only implemented for native XDP (with driver 1763*4882a593Smuzhiyun * support) as of this writing). 1764*4882a593Smuzhiyun * 1765*4882a593Smuzhiyun * The lower two bits of *flags* are used as the return code if 1766*4882a593Smuzhiyun * the map lookup fails. This is so that the return value can be 1767*4882a593Smuzhiyun * one of the XDP program return codes up to **XDP_TX**, as chosen 1768*4882a593Smuzhiyun * by the caller. Any higher bits in the *flags* argument must be 1769*4882a593Smuzhiyun * unset. 1770*4882a593Smuzhiyun * 1771*4882a593Smuzhiyun * See also **bpf_redirect**\ (), which only supports redirecting 1772*4882a593Smuzhiyun * to an ifindex, but doesn't require a map to do so. 1773*4882a593Smuzhiyun * Return 1774*4882a593Smuzhiyun * **XDP_REDIRECT** on success, or the value of the two lower bits 1775*4882a593Smuzhiyun * of the *flags* argument on error. 1776*4882a593Smuzhiyun * 1777*4882a593Smuzhiyun * long bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags) 1778*4882a593Smuzhiyun * Description 1779*4882a593Smuzhiyun * Redirect the packet to the socket referenced by *map* (of type 1780*4882a593Smuzhiyun * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and 1781*4882a593Smuzhiyun * egress interfaces can be used for redirection. The 1782*4882a593Smuzhiyun * **BPF_F_INGRESS** value in *flags* is used to make the 1783*4882a593Smuzhiyun * distinction (ingress path is selected if the flag is present, 1784*4882a593Smuzhiyun * egress path otherwise). This is the only flag supported for now. 1785*4882a593Smuzhiyun * Return 1786*4882a593Smuzhiyun * **SK_PASS** on success, or **SK_DROP** on error. 1787*4882a593Smuzhiyun * 1788*4882a593Smuzhiyun * long bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) 1789*4882a593Smuzhiyun * Description 1790*4882a593Smuzhiyun * Add an entry to, or update a *map* referencing sockets. The 1791*4882a593Smuzhiyun * *skops* is used as a new value for the entry associated to 1792*4882a593Smuzhiyun * *key*. *flags* is one of: 1793*4882a593Smuzhiyun * 1794*4882a593Smuzhiyun * **BPF_NOEXIST** 1795*4882a593Smuzhiyun * The entry for *key* must not exist in the map. 1796*4882a593Smuzhiyun * **BPF_EXIST** 1797*4882a593Smuzhiyun * The entry for *key* must already exist in the map. 1798*4882a593Smuzhiyun * **BPF_ANY** 1799*4882a593Smuzhiyun * No condition on the existence of the entry for *key*. 1800*4882a593Smuzhiyun * 1801*4882a593Smuzhiyun * If the *map* has eBPF programs (parser and verdict), those will 1802*4882a593Smuzhiyun * be inherited by the socket being added. If the socket is 1803*4882a593Smuzhiyun * already attached to eBPF programs, this results in an error. 1804*4882a593Smuzhiyun * Return 1805*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 1806*4882a593Smuzhiyun * 1807*4882a593Smuzhiyun * long bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta) 1808*4882a593Smuzhiyun * Description 1809*4882a593Smuzhiyun * Adjust the address pointed by *xdp_md*\ **->data_meta** by 1810*4882a593Smuzhiyun * *delta* (which can be positive or negative). Note that this 1811*4882a593Smuzhiyun * operation modifies the address stored in *xdp_md*\ **->data**, 1812*4882a593Smuzhiyun * so the latter must be loaded only after the helper has been 1813*4882a593Smuzhiyun * called. 1814*4882a593Smuzhiyun * 1815*4882a593Smuzhiyun * The use of *xdp_md*\ **->data_meta** is optional and programs 1816*4882a593Smuzhiyun * are not required to use it. The rationale is that when the 1817*4882a593Smuzhiyun * packet is processed with XDP (e.g. as DoS filter), it is 1818*4882a593Smuzhiyun * possible to push further meta data along with it before passing 1819*4882a593Smuzhiyun * to the stack, and to give the guarantee that an ingress eBPF 1820*4882a593Smuzhiyun * program attached as a TC classifier on the same device can pick 1821*4882a593Smuzhiyun * this up for further post-processing. Since TC works with socket 1822*4882a593Smuzhiyun * buffers, it remains possible to set from XDP the **mark** or 1823*4882a593Smuzhiyun * **priority** pointers, or other pointers for the socket buffer. 1824*4882a593Smuzhiyun * Having this scratch space generic and programmable allows for 1825*4882a593Smuzhiyun * more flexibility as the user is free to store whatever meta 1826*4882a593Smuzhiyun * data they need. 1827*4882a593Smuzhiyun * 1828*4882a593Smuzhiyun * A call to this helper is susceptible to change the underlying 1829*4882a593Smuzhiyun * packet buffer. Therefore, at load time, all checks on pointers 1830*4882a593Smuzhiyun * previously done by the verifier are invalidated and must be 1831*4882a593Smuzhiyun * performed again, if the helper is used in combination with 1832*4882a593Smuzhiyun * direct packet access. 1833*4882a593Smuzhiyun * Return 1834*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 1835*4882a593Smuzhiyun * 1836*4882a593Smuzhiyun * long bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size) 1837*4882a593Smuzhiyun * Description 1838*4882a593Smuzhiyun * Read the value of a perf event counter, and store it into *buf* 1839*4882a593Smuzhiyun * of size *buf_size*. This helper relies on a *map* of type 1840*4882a593Smuzhiyun * **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of the perf event 1841*4882a593Smuzhiyun * counter is selected when *map* is updated with perf event file 1842*4882a593Smuzhiyun * descriptors. The *map* is an array whose size is the number of 1843*4882a593Smuzhiyun * available CPUs, and each cell contains a value relative to one 1844*4882a593Smuzhiyun * CPU. The value to retrieve is indicated by *flags*, that 1845*4882a593Smuzhiyun * contains the index of the CPU to look up, masked with 1846*4882a593Smuzhiyun * **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to 1847*4882a593Smuzhiyun * **BPF_F_CURRENT_CPU** to indicate that the value for the 1848*4882a593Smuzhiyun * current CPU should be retrieved. 1849*4882a593Smuzhiyun * 1850*4882a593Smuzhiyun * This helper behaves in a way close to 1851*4882a593Smuzhiyun * **bpf_perf_event_read**\ () helper, save that instead of 1852*4882a593Smuzhiyun * just returning the value observed, it fills the *buf* 1853*4882a593Smuzhiyun * structure. This allows for additional data to be retrieved: in 1854*4882a593Smuzhiyun * particular, the enabled and running times (in *buf*\ 1855*4882a593Smuzhiyun * **->enabled** and *buf*\ **->running**, respectively) are 1856*4882a593Smuzhiyun * copied. In general, **bpf_perf_event_read_value**\ () is 1857*4882a593Smuzhiyun * recommended over **bpf_perf_event_read**\ (), which has some 1858*4882a593Smuzhiyun * ABI issues and provides fewer functionalities. 1859*4882a593Smuzhiyun * 1860*4882a593Smuzhiyun * These values are interesting, because hardware PMU (Performance 1861*4882a593Smuzhiyun * Monitoring Unit) counters are limited resources. When there are 1862*4882a593Smuzhiyun * more PMU based perf events opened than available counters, 1863*4882a593Smuzhiyun * kernel will multiplex these events so each event gets certain 1864*4882a593Smuzhiyun * percentage (but not all) of the PMU time. In case that 1865*4882a593Smuzhiyun * multiplexing happens, the number of samples or counter value 1866*4882a593Smuzhiyun * will not reflect the case compared to when no multiplexing 1867*4882a593Smuzhiyun * occurs. This makes comparison between different runs difficult. 1868*4882a593Smuzhiyun * Typically, the counter value should be normalized before 1869*4882a593Smuzhiyun * comparing to other experiments. The usual normalization is done 1870*4882a593Smuzhiyun * as follows. 1871*4882a593Smuzhiyun * 1872*4882a593Smuzhiyun * :: 1873*4882a593Smuzhiyun * 1874*4882a593Smuzhiyun * normalized_counter = counter * t_enabled / t_running 1875*4882a593Smuzhiyun * 1876*4882a593Smuzhiyun * Where t_enabled is the time enabled for event and t_running is 1877*4882a593Smuzhiyun * the time running for event since last normalization. The 1878*4882a593Smuzhiyun * enabled and running times are accumulated since the perf event 1879*4882a593Smuzhiyun * open. To achieve scaling factor between two invocations of an 1880*4882a593Smuzhiyun * eBPF program, users can use CPU id as the key (which is 1881*4882a593Smuzhiyun * typical for perf array usage model) to remember the previous 1882*4882a593Smuzhiyun * value and do the calculation inside the eBPF program. 1883*4882a593Smuzhiyun * Return 1884*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 1885*4882a593Smuzhiyun * 1886*4882a593Smuzhiyun * long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size) 1887*4882a593Smuzhiyun * Description 1888*4882a593Smuzhiyun * For en eBPF program attached to a perf event, retrieve the 1889*4882a593Smuzhiyun * value of the event counter associated to *ctx* and store it in 1890*4882a593Smuzhiyun * the structure pointed by *buf* and of size *buf_size*. Enabled 1891*4882a593Smuzhiyun * and running times are also stored in the structure (see 1892*4882a593Smuzhiyun * description of helper **bpf_perf_event_read_value**\ () for 1893*4882a593Smuzhiyun * more details). 1894*4882a593Smuzhiyun * Return 1895*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 1896*4882a593Smuzhiyun * 1897*4882a593Smuzhiyun * long bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) 1898*4882a593Smuzhiyun * Description 1899*4882a593Smuzhiyun * Emulate a call to **getsockopt()** on the socket associated to 1900*4882a593Smuzhiyun * *bpf_socket*, which must be a full socket. The *level* at 1901*4882a593Smuzhiyun * which the option resides and the name *optname* of the option 1902*4882a593Smuzhiyun * must be specified, see **getsockopt(2)** for more information. 1903*4882a593Smuzhiyun * The retrieved value is stored in the structure pointed by 1904*4882a593Smuzhiyun * *opval* and of length *optlen*. 1905*4882a593Smuzhiyun * 1906*4882a593Smuzhiyun * *bpf_socket* should be one of the following: 1907*4882a593Smuzhiyun * 1908*4882a593Smuzhiyun * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. 1909*4882a593Smuzhiyun * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT** 1910*4882a593Smuzhiyun * and **BPF_CGROUP_INET6_CONNECT**. 1911*4882a593Smuzhiyun * 1912*4882a593Smuzhiyun * This helper actually implements a subset of **getsockopt()**. 1913*4882a593Smuzhiyun * It supports the following *level*\ s: 1914*4882a593Smuzhiyun * 1915*4882a593Smuzhiyun * * **IPPROTO_TCP**, which supports *optname* 1916*4882a593Smuzhiyun * **TCP_CONGESTION**. 1917*4882a593Smuzhiyun * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. 1918*4882a593Smuzhiyun * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. 1919*4882a593Smuzhiyun * Return 1920*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 1921*4882a593Smuzhiyun * 1922*4882a593Smuzhiyun * long bpf_override_return(struct pt_regs *regs, u64 rc) 1923*4882a593Smuzhiyun * Description 1924*4882a593Smuzhiyun * Used for error injection, this helper uses kprobes to override 1925*4882a593Smuzhiyun * the return value of the probed function, and to set it to *rc*. 1926*4882a593Smuzhiyun * The first argument is the context *regs* on which the kprobe 1927*4882a593Smuzhiyun * works. 1928*4882a593Smuzhiyun * 1929*4882a593Smuzhiyun * This helper works by setting the PC (program counter) 1930*4882a593Smuzhiyun * to an override function which is run in place of the original 1931*4882a593Smuzhiyun * probed function. This means the probed function is not run at 1932*4882a593Smuzhiyun * all. The replacement function just returns with the required 1933*4882a593Smuzhiyun * value. 1934*4882a593Smuzhiyun * 1935*4882a593Smuzhiyun * This helper has security implications, and thus is subject to 1936*4882a593Smuzhiyun * restrictions. It is only available if the kernel was compiled 1937*4882a593Smuzhiyun * with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration 1938*4882a593Smuzhiyun * option, and in this case it only works on functions tagged with 1939*4882a593Smuzhiyun * **ALLOW_ERROR_INJECTION** in the kernel code. 1940*4882a593Smuzhiyun * 1941*4882a593Smuzhiyun * Also, the helper is only available for the architectures having 1942*4882a593Smuzhiyun * the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing, 1943*4882a593Smuzhiyun * x86 architecture is the only one to support this feature. 1944*4882a593Smuzhiyun * Return 1945*4882a593Smuzhiyun * 0 1946*4882a593Smuzhiyun * 1947*4882a593Smuzhiyun * long bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval) 1948*4882a593Smuzhiyun * Description 1949*4882a593Smuzhiyun * Attempt to set the value of the **bpf_sock_ops_cb_flags** field 1950*4882a593Smuzhiyun * for the full TCP socket associated to *bpf_sock_ops* to 1951*4882a593Smuzhiyun * *argval*. 1952*4882a593Smuzhiyun * 1953*4882a593Smuzhiyun * The primary use of this field is to determine if there should 1954*4882a593Smuzhiyun * be calls to eBPF programs of type 1955*4882a593Smuzhiyun * **BPF_PROG_TYPE_SOCK_OPS** at various points in the TCP 1956*4882a593Smuzhiyun * code. A program of the same type can change its value, per 1957*4882a593Smuzhiyun * connection and as necessary, when the connection is 1958*4882a593Smuzhiyun * established. This field is directly accessible for reading, but 1959*4882a593Smuzhiyun * this helper must be used for updates in order to return an 1960*4882a593Smuzhiyun * error if an eBPF program tries to set a callback that is not 1961*4882a593Smuzhiyun * supported in the current kernel. 1962*4882a593Smuzhiyun * 1963*4882a593Smuzhiyun * *argval* is a flag array which can combine these flags: 1964*4882a593Smuzhiyun * 1965*4882a593Smuzhiyun * * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out) 1966*4882a593Smuzhiyun * * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission) 1967*4882a593Smuzhiyun * * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change) 1968*4882a593Smuzhiyun * * **BPF_SOCK_OPS_RTT_CB_FLAG** (every RTT) 1969*4882a593Smuzhiyun * 1970*4882a593Smuzhiyun * Therefore, this function can be used to clear a callback flag by 1971*4882a593Smuzhiyun * setting the appropriate bit to zero. e.g. to disable the RTO 1972*4882a593Smuzhiyun * callback: 1973*4882a593Smuzhiyun * 1974*4882a593Smuzhiyun * **bpf_sock_ops_cb_flags_set(bpf_sock,** 1975*4882a593Smuzhiyun * **bpf_sock->bpf_sock_ops_cb_flags & ~BPF_SOCK_OPS_RTO_CB_FLAG)** 1976*4882a593Smuzhiyun * 1977*4882a593Smuzhiyun * Here are some examples of where one could call such eBPF 1978*4882a593Smuzhiyun * program: 1979*4882a593Smuzhiyun * 1980*4882a593Smuzhiyun * * When RTO fires. 1981*4882a593Smuzhiyun * * When a packet is retransmitted. 1982*4882a593Smuzhiyun * * When the connection terminates. 1983*4882a593Smuzhiyun * * When a packet is sent. 1984*4882a593Smuzhiyun * * When a packet is received. 1985*4882a593Smuzhiyun * Return 1986*4882a593Smuzhiyun * Code **-EINVAL** if the socket is not a full TCP socket; 1987*4882a593Smuzhiyun * otherwise, a positive number containing the bits that could not 1988*4882a593Smuzhiyun * be set is returned (which comes down to 0 if all bits were set 1989*4882a593Smuzhiyun * as required). 1990*4882a593Smuzhiyun * 1991*4882a593Smuzhiyun * long bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags) 1992*4882a593Smuzhiyun * Description 1993*4882a593Smuzhiyun * This helper is used in programs implementing policies at the 1994*4882a593Smuzhiyun * socket level. If the message *msg* is allowed to pass (i.e. if 1995*4882a593Smuzhiyun * the verdict eBPF program returns **SK_PASS**), redirect it to 1996*4882a593Smuzhiyun * the socket referenced by *map* (of type 1997*4882a593Smuzhiyun * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and 1998*4882a593Smuzhiyun * egress interfaces can be used for redirection. The 1999*4882a593Smuzhiyun * **BPF_F_INGRESS** value in *flags* is used to make the 2000*4882a593Smuzhiyun * distinction (ingress path is selected if the flag is present, 2001*4882a593Smuzhiyun * egress path otherwise). This is the only flag supported for now. 2002*4882a593Smuzhiyun * Return 2003*4882a593Smuzhiyun * **SK_PASS** on success, or **SK_DROP** on error. 2004*4882a593Smuzhiyun * 2005*4882a593Smuzhiyun * long bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes) 2006*4882a593Smuzhiyun * Description 2007*4882a593Smuzhiyun * For socket policies, apply the verdict of the eBPF program to 2008*4882a593Smuzhiyun * the next *bytes* (number of bytes) of message *msg*. 2009*4882a593Smuzhiyun * 2010*4882a593Smuzhiyun * For example, this helper can be used in the following cases: 2011*4882a593Smuzhiyun * 2012*4882a593Smuzhiyun * * A single **sendmsg**\ () or **sendfile**\ () system call 2013*4882a593Smuzhiyun * contains multiple logical messages that the eBPF program is 2014*4882a593Smuzhiyun * supposed to read and for which it should apply a verdict. 2015*4882a593Smuzhiyun * * An eBPF program only cares to read the first *bytes* of a 2016*4882a593Smuzhiyun * *msg*. If the message has a large payload, then setting up 2017*4882a593Smuzhiyun * and calling the eBPF program repeatedly for all bytes, even 2018*4882a593Smuzhiyun * though the verdict is already known, would create unnecessary 2019*4882a593Smuzhiyun * overhead. 2020*4882a593Smuzhiyun * 2021*4882a593Smuzhiyun * When called from within an eBPF program, the helper sets a 2022*4882a593Smuzhiyun * counter internal to the BPF infrastructure, that is used to 2023*4882a593Smuzhiyun * apply the last verdict to the next *bytes*. If *bytes* is 2024*4882a593Smuzhiyun * smaller than the current data being processed from a 2025*4882a593Smuzhiyun * **sendmsg**\ () or **sendfile**\ () system call, the first 2026*4882a593Smuzhiyun * *bytes* will be sent and the eBPF program will be re-run with 2027*4882a593Smuzhiyun * the pointer for start of data pointing to byte number *bytes* 2028*4882a593Smuzhiyun * **+ 1**. If *bytes* is larger than the current data being 2029*4882a593Smuzhiyun * processed, then the eBPF verdict will be applied to multiple 2030*4882a593Smuzhiyun * **sendmsg**\ () or **sendfile**\ () calls until *bytes* are 2031*4882a593Smuzhiyun * consumed. 2032*4882a593Smuzhiyun * 2033*4882a593Smuzhiyun * Note that if a socket closes with the internal counter holding 2034*4882a593Smuzhiyun * a non-zero value, this is not a problem because data is not 2035*4882a593Smuzhiyun * being buffered for *bytes* and is sent as it is received. 2036*4882a593Smuzhiyun * Return 2037*4882a593Smuzhiyun * 0 2038*4882a593Smuzhiyun * 2039*4882a593Smuzhiyun * long bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes) 2040*4882a593Smuzhiyun * Description 2041*4882a593Smuzhiyun * For socket policies, prevent the execution of the verdict eBPF 2042*4882a593Smuzhiyun * program for message *msg* until *bytes* (byte number) have been 2043*4882a593Smuzhiyun * accumulated. 2044*4882a593Smuzhiyun * 2045*4882a593Smuzhiyun * This can be used when one needs a specific number of bytes 2046*4882a593Smuzhiyun * before a verdict can be assigned, even if the data spans 2047*4882a593Smuzhiyun * multiple **sendmsg**\ () or **sendfile**\ () calls. The extreme 2048*4882a593Smuzhiyun * case would be a user calling **sendmsg**\ () repeatedly with 2049*4882a593Smuzhiyun * 1-byte long message segments. Obviously, this is bad for 2050*4882a593Smuzhiyun * performance, but it is still valid. If the eBPF program needs 2051*4882a593Smuzhiyun * *bytes* bytes to validate a header, this helper can be used to 2052*4882a593Smuzhiyun * prevent the eBPF program to be called again until *bytes* have 2053*4882a593Smuzhiyun * been accumulated. 2054*4882a593Smuzhiyun * Return 2055*4882a593Smuzhiyun * 0 2056*4882a593Smuzhiyun * 2057*4882a593Smuzhiyun * long bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags) 2058*4882a593Smuzhiyun * Description 2059*4882a593Smuzhiyun * For socket policies, pull in non-linear data from user space 2060*4882a593Smuzhiyun * for *msg* and set pointers *msg*\ **->data** and *msg*\ 2061*4882a593Smuzhiyun * **->data_end** to *start* and *end* bytes offsets into *msg*, 2062*4882a593Smuzhiyun * respectively. 2063*4882a593Smuzhiyun * 2064*4882a593Smuzhiyun * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a 2065*4882a593Smuzhiyun * *msg* it can only parse data that the (**data**, **data_end**) 2066*4882a593Smuzhiyun * pointers have already consumed. For **sendmsg**\ () hooks this 2067*4882a593Smuzhiyun * is likely the first scatterlist element. But for calls relying 2068*4882a593Smuzhiyun * on the **sendpage** handler (e.g. **sendfile**\ ()) this will 2069*4882a593Smuzhiyun * be the range (**0**, **0**) because the data is shared with 2070*4882a593Smuzhiyun * user space and by default the objective is to avoid allowing 2071*4882a593Smuzhiyun * user space to modify data while (or after) eBPF verdict is 2072*4882a593Smuzhiyun * being decided. This helper can be used to pull in data and to 2073*4882a593Smuzhiyun * set the start and end pointer to given values. Data will be 2074*4882a593Smuzhiyun * copied if necessary (i.e. if data was not linear and if start 2075*4882a593Smuzhiyun * and end pointers do not point to the same chunk). 2076*4882a593Smuzhiyun * 2077*4882a593Smuzhiyun * A call to this helper is susceptible to change the underlying 2078*4882a593Smuzhiyun * packet buffer. Therefore, at load time, all checks on pointers 2079*4882a593Smuzhiyun * previously done by the verifier are invalidated and must be 2080*4882a593Smuzhiyun * performed again, if the helper is used in combination with 2081*4882a593Smuzhiyun * direct packet access. 2082*4882a593Smuzhiyun * 2083*4882a593Smuzhiyun * All values for *flags* are reserved for future usage, and must 2084*4882a593Smuzhiyun * be left at zero. 2085*4882a593Smuzhiyun * Return 2086*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 2087*4882a593Smuzhiyun * 2088*4882a593Smuzhiyun * long bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) 2089*4882a593Smuzhiyun * Description 2090*4882a593Smuzhiyun * Bind the socket associated to *ctx* to the address pointed by 2091*4882a593Smuzhiyun * *addr*, of length *addr_len*. This allows for making outgoing 2092*4882a593Smuzhiyun * connection from the desired IP address, which can be useful for 2093*4882a593Smuzhiyun * example when all processes inside a cgroup should use one 2094*4882a593Smuzhiyun * single IP address on a host that has multiple IP configured. 2095*4882a593Smuzhiyun * 2096*4882a593Smuzhiyun * This helper works for IPv4 and IPv6, TCP and UDP sockets. The 2097*4882a593Smuzhiyun * domain (*addr*\ **->sa_family**) must be **AF_INET** (or 2098*4882a593Smuzhiyun * **AF_INET6**). It's advised to pass zero port (**sin_port** 2099*4882a593Smuzhiyun * or **sin6_port**) which triggers IP_BIND_ADDRESS_NO_PORT-like 2100*4882a593Smuzhiyun * behavior and lets the kernel efficiently pick up an unused 2101*4882a593Smuzhiyun * port as long as 4-tuple is unique. Passing non-zero port might 2102*4882a593Smuzhiyun * lead to degraded performance. 2103*4882a593Smuzhiyun * Return 2104*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 2105*4882a593Smuzhiyun * 2106*4882a593Smuzhiyun * long bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta) 2107*4882a593Smuzhiyun * Description 2108*4882a593Smuzhiyun * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is 2109*4882a593Smuzhiyun * possible to both shrink and grow the packet tail. 2110*4882a593Smuzhiyun * Shrink done via *delta* being a negative integer. 2111*4882a593Smuzhiyun * 2112*4882a593Smuzhiyun * A call to this helper is susceptible to change the underlying 2113*4882a593Smuzhiyun * packet buffer. Therefore, at load time, all checks on pointers 2114*4882a593Smuzhiyun * previously done by the verifier are invalidated and must be 2115*4882a593Smuzhiyun * performed again, if the helper is used in combination with 2116*4882a593Smuzhiyun * direct packet access. 2117*4882a593Smuzhiyun * Return 2118*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 2119*4882a593Smuzhiyun * 2120*4882a593Smuzhiyun * long bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags) 2121*4882a593Smuzhiyun * Description 2122*4882a593Smuzhiyun * Retrieve the XFRM state (IP transform framework, see also 2123*4882a593Smuzhiyun * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*. 2124*4882a593Smuzhiyun * 2125*4882a593Smuzhiyun * The retrieved value is stored in the **struct bpf_xfrm_state** 2126*4882a593Smuzhiyun * pointed by *xfrm_state* and of length *size*. 2127*4882a593Smuzhiyun * 2128*4882a593Smuzhiyun * All values for *flags* are reserved for future usage, and must 2129*4882a593Smuzhiyun * be left at zero. 2130*4882a593Smuzhiyun * 2131*4882a593Smuzhiyun * This helper is available only if the kernel was compiled with 2132*4882a593Smuzhiyun * **CONFIG_XFRM** configuration option. 2133*4882a593Smuzhiyun * Return 2134*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 2135*4882a593Smuzhiyun * 2136*4882a593Smuzhiyun * long bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags) 2137*4882a593Smuzhiyun * Description 2138*4882a593Smuzhiyun * Return a user or a kernel stack in bpf program provided buffer. 2139*4882a593Smuzhiyun * To achieve this, the helper needs *ctx*, which is a pointer 2140*4882a593Smuzhiyun * to the context on which the tracing program is executed. 2141*4882a593Smuzhiyun * To store the stacktrace, the bpf program provides *buf* with 2142*4882a593Smuzhiyun * a nonnegative *size*. 2143*4882a593Smuzhiyun * 2144*4882a593Smuzhiyun * The last argument, *flags*, holds the number of stack frames to 2145*4882a593Smuzhiyun * skip (from 0 to 255), masked with 2146*4882a593Smuzhiyun * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set 2147*4882a593Smuzhiyun * the following flags: 2148*4882a593Smuzhiyun * 2149*4882a593Smuzhiyun * **BPF_F_USER_STACK** 2150*4882a593Smuzhiyun * Collect a user space stack instead of a kernel stack. 2151*4882a593Smuzhiyun * **BPF_F_USER_BUILD_ID** 2152*4882a593Smuzhiyun * Collect buildid+offset instead of ips for user stack, 2153*4882a593Smuzhiyun * only valid if **BPF_F_USER_STACK** is also specified. 2154*4882a593Smuzhiyun * 2155*4882a593Smuzhiyun * **bpf_get_stack**\ () can collect up to 2156*4882a593Smuzhiyun * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject 2157*4882a593Smuzhiyun * to sufficient large buffer size. Note that 2158*4882a593Smuzhiyun * this limit can be controlled with the **sysctl** program, and 2159*4882a593Smuzhiyun * that it should be manually increased in order to profile long 2160*4882a593Smuzhiyun * user stacks (such as stacks for Java programs). To do so, use: 2161*4882a593Smuzhiyun * 2162*4882a593Smuzhiyun * :: 2163*4882a593Smuzhiyun * 2164*4882a593Smuzhiyun * # sysctl kernel.perf_event_max_stack=<new value> 2165*4882a593Smuzhiyun * Return 2166*4882a593Smuzhiyun * A non-negative value equal to or less than *size* on success, 2167*4882a593Smuzhiyun * or a negative error in case of failure. 2168*4882a593Smuzhiyun * 2169*4882a593Smuzhiyun * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header) 2170*4882a593Smuzhiyun * Description 2171*4882a593Smuzhiyun * This helper is similar to **bpf_skb_load_bytes**\ () in that 2172*4882a593Smuzhiyun * it provides an easy way to load *len* bytes from *offset* 2173*4882a593Smuzhiyun * from the packet associated to *skb*, into the buffer pointed 2174*4882a593Smuzhiyun * by *to*. The difference to **bpf_skb_load_bytes**\ () is that 2175*4882a593Smuzhiyun * a fifth argument *start_header* exists in order to select a 2176*4882a593Smuzhiyun * base offset to start from. *start_header* can be one of: 2177*4882a593Smuzhiyun * 2178*4882a593Smuzhiyun * **BPF_HDR_START_MAC** 2179*4882a593Smuzhiyun * Base offset to load data from is *skb*'s mac header. 2180*4882a593Smuzhiyun * **BPF_HDR_START_NET** 2181*4882a593Smuzhiyun * Base offset to load data from is *skb*'s network header. 2182*4882a593Smuzhiyun * 2183*4882a593Smuzhiyun * In general, "direct packet access" is the preferred method to 2184*4882a593Smuzhiyun * access packet data, however, this helper is in particular useful 2185*4882a593Smuzhiyun * in socket filters where *skb*\ **->data** does not always point 2186*4882a593Smuzhiyun * to the start of the mac header and where "direct packet access" 2187*4882a593Smuzhiyun * is not available. 2188*4882a593Smuzhiyun * Return 2189*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 2190*4882a593Smuzhiyun * 2191*4882a593Smuzhiyun * long bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags) 2192*4882a593Smuzhiyun * Description 2193*4882a593Smuzhiyun * Do FIB lookup in kernel tables using parameters in *params*. 2194*4882a593Smuzhiyun * If lookup is successful and result shows packet is to be 2195*4882a593Smuzhiyun * forwarded, the neighbor tables are searched for the nexthop. 2196*4882a593Smuzhiyun * If successful (ie., FIB lookup shows forwarding and nexthop 2197*4882a593Smuzhiyun * is resolved), the nexthop address is returned in ipv4_dst 2198*4882a593Smuzhiyun * or ipv6_dst based on family, smac is set to mac address of 2199*4882a593Smuzhiyun * egress device, dmac is set to nexthop mac address, rt_metric 2200*4882a593Smuzhiyun * is set to metric from route (IPv4/IPv6 only), and ifindex 2201*4882a593Smuzhiyun * is set to the device index of the nexthop from the FIB lookup. 2202*4882a593Smuzhiyun * 2203*4882a593Smuzhiyun * *plen* argument is the size of the passed in struct. 2204*4882a593Smuzhiyun * *flags* argument can be a combination of one or more of the 2205*4882a593Smuzhiyun * following values: 2206*4882a593Smuzhiyun * 2207*4882a593Smuzhiyun * **BPF_FIB_LOOKUP_DIRECT** 2208*4882a593Smuzhiyun * Do a direct table lookup vs full lookup using FIB 2209*4882a593Smuzhiyun * rules. 2210*4882a593Smuzhiyun * **BPF_FIB_LOOKUP_OUTPUT** 2211*4882a593Smuzhiyun * Perform lookup from an egress perspective (default is 2212*4882a593Smuzhiyun * ingress). 2213*4882a593Smuzhiyun * 2214*4882a593Smuzhiyun * *ctx* is either **struct xdp_md** for XDP programs or 2215*4882a593Smuzhiyun * **struct sk_buff** tc cls_act programs. 2216*4882a593Smuzhiyun * Return 2217*4882a593Smuzhiyun * * < 0 if any input argument is invalid 2218*4882a593Smuzhiyun * * 0 on success (packet is forwarded, nexthop neighbor exists) 2219*4882a593Smuzhiyun * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the 2220*4882a593Smuzhiyun * packet is not forwarded or needs assist from full stack 2221*4882a593Smuzhiyun * 2222*4882a593Smuzhiyun * long bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) 2223*4882a593Smuzhiyun * Description 2224*4882a593Smuzhiyun * Add an entry to, or update a sockhash *map* referencing sockets. 2225*4882a593Smuzhiyun * The *skops* is used as a new value for the entry associated to 2226*4882a593Smuzhiyun * *key*. *flags* is one of: 2227*4882a593Smuzhiyun * 2228*4882a593Smuzhiyun * **BPF_NOEXIST** 2229*4882a593Smuzhiyun * The entry for *key* must not exist in the map. 2230*4882a593Smuzhiyun * **BPF_EXIST** 2231*4882a593Smuzhiyun * The entry for *key* must already exist in the map. 2232*4882a593Smuzhiyun * **BPF_ANY** 2233*4882a593Smuzhiyun * No condition on the existence of the entry for *key*. 2234*4882a593Smuzhiyun * 2235*4882a593Smuzhiyun * If the *map* has eBPF programs (parser and verdict), those will 2236*4882a593Smuzhiyun * be inherited by the socket being added. If the socket is 2237*4882a593Smuzhiyun * already attached to eBPF programs, this results in an error. 2238*4882a593Smuzhiyun * Return 2239*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 2240*4882a593Smuzhiyun * 2241*4882a593Smuzhiyun * long bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags) 2242*4882a593Smuzhiyun * Description 2243*4882a593Smuzhiyun * This helper is used in programs implementing policies at the 2244*4882a593Smuzhiyun * socket level. If the message *msg* is allowed to pass (i.e. if 2245*4882a593Smuzhiyun * the verdict eBPF program returns **SK_PASS**), redirect it to 2246*4882a593Smuzhiyun * the socket referenced by *map* (of type 2247*4882a593Smuzhiyun * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and 2248*4882a593Smuzhiyun * egress interfaces can be used for redirection. The 2249*4882a593Smuzhiyun * **BPF_F_INGRESS** value in *flags* is used to make the 2250*4882a593Smuzhiyun * distinction (ingress path is selected if the flag is present, 2251*4882a593Smuzhiyun * egress path otherwise). This is the only flag supported for now. 2252*4882a593Smuzhiyun * Return 2253*4882a593Smuzhiyun * **SK_PASS** on success, or **SK_DROP** on error. 2254*4882a593Smuzhiyun * 2255*4882a593Smuzhiyun * long bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags) 2256*4882a593Smuzhiyun * Description 2257*4882a593Smuzhiyun * This helper is used in programs implementing policies at the 2258*4882a593Smuzhiyun * skb socket level. If the sk_buff *skb* is allowed to pass (i.e. 2259*4882a593Smuzhiyun * if the verdict eBPF program returns **SK_PASS**), redirect it 2260*4882a593Smuzhiyun * to the socket referenced by *map* (of type 2261*4882a593Smuzhiyun * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and 2262*4882a593Smuzhiyun * egress interfaces can be used for redirection. The 2263*4882a593Smuzhiyun * **BPF_F_INGRESS** value in *flags* is used to make the 2264*4882a593Smuzhiyun * distinction (ingress path is selected if the flag is present, 2265*4882a593Smuzhiyun * egress otherwise). This is the only flag supported for now. 2266*4882a593Smuzhiyun * Return 2267*4882a593Smuzhiyun * **SK_PASS** on success, or **SK_DROP** on error. 2268*4882a593Smuzhiyun * 2269*4882a593Smuzhiyun * long bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) 2270*4882a593Smuzhiyun * Description 2271*4882a593Smuzhiyun * Encapsulate the packet associated to *skb* within a Layer 3 2272*4882a593Smuzhiyun * protocol header. This header is provided in the buffer at 2273*4882a593Smuzhiyun * address *hdr*, with *len* its size in bytes. *type* indicates 2274*4882a593Smuzhiyun * the protocol of the header and can be one of: 2275*4882a593Smuzhiyun * 2276*4882a593Smuzhiyun * **BPF_LWT_ENCAP_SEG6** 2277*4882a593Smuzhiyun * IPv6 encapsulation with Segment Routing Header 2278*4882a593Smuzhiyun * (**struct ipv6_sr_hdr**). *hdr* only contains the SRH, 2279*4882a593Smuzhiyun * the IPv6 header is computed by the kernel. 2280*4882a593Smuzhiyun * **BPF_LWT_ENCAP_SEG6_INLINE** 2281*4882a593Smuzhiyun * Only works if *skb* contains an IPv6 packet. Insert a 2282*4882a593Smuzhiyun * Segment Routing Header (**struct ipv6_sr_hdr**) inside 2283*4882a593Smuzhiyun * the IPv6 header. 2284*4882a593Smuzhiyun * **BPF_LWT_ENCAP_IP** 2285*4882a593Smuzhiyun * IP encapsulation (GRE/GUE/IPIP/etc). The outer header 2286*4882a593Smuzhiyun * must be IPv4 or IPv6, followed by zero or more 2287*4882a593Smuzhiyun * additional headers, up to **LWT_BPF_MAX_HEADROOM** 2288*4882a593Smuzhiyun * total bytes in all prepended headers. Please note that 2289*4882a593Smuzhiyun * if **skb_is_gso**\ (*skb*) is true, no more than two 2290*4882a593Smuzhiyun * headers can be prepended, and the inner header, if 2291*4882a593Smuzhiyun * present, should be either GRE or UDP/GUE. 2292*4882a593Smuzhiyun * 2293*4882a593Smuzhiyun * **BPF_LWT_ENCAP_SEG6**\ \* types can be called by BPF programs 2294*4882a593Smuzhiyun * of type **BPF_PROG_TYPE_LWT_IN**; **BPF_LWT_ENCAP_IP** type can 2295*4882a593Smuzhiyun * be called by bpf programs of types **BPF_PROG_TYPE_LWT_IN** and 2296*4882a593Smuzhiyun * **BPF_PROG_TYPE_LWT_XMIT**. 2297*4882a593Smuzhiyun * 2298*4882a593Smuzhiyun * A call to this helper is susceptible to change the underlying 2299*4882a593Smuzhiyun * packet buffer. Therefore, at load time, all checks on pointers 2300*4882a593Smuzhiyun * previously done by the verifier are invalidated and must be 2301*4882a593Smuzhiyun * performed again, if the helper is used in combination with 2302*4882a593Smuzhiyun * direct packet access. 2303*4882a593Smuzhiyun * Return 2304*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 2305*4882a593Smuzhiyun * 2306*4882a593Smuzhiyun * long bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len) 2307*4882a593Smuzhiyun * Description 2308*4882a593Smuzhiyun * Store *len* bytes from address *from* into the packet 2309*4882a593Smuzhiyun * associated to *skb*, at *offset*. Only the flags, tag and TLVs 2310*4882a593Smuzhiyun * inside the outermost IPv6 Segment Routing Header can be 2311*4882a593Smuzhiyun * modified through this helper. 2312*4882a593Smuzhiyun * 2313*4882a593Smuzhiyun * A call to this helper is susceptible to change the underlying 2314*4882a593Smuzhiyun * packet buffer. Therefore, at load time, all checks on pointers 2315*4882a593Smuzhiyun * previously done by the verifier are invalidated and must be 2316*4882a593Smuzhiyun * performed again, if the helper is used in combination with 2317*4882a593Smuzhiyun * direct packet access. 2318*4882a593Smuzhiyun * Return 2319*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 2320*4882a593Smuzhiyun * 2321*4882a593Smuzhiyun * long bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta) 2322*4882a593Smuzhiyun * Description 2323*4882a593Smuzhiyun * Adjust the size allocated to TLVs in the outermost IPv6 2324*4882a593Smuzhiyun * Segment Routing Header contained in the packet associated to 2325*4882a593Smuzhiyun * *skb*, at position *offset* by *delta* bytes. Only offsets 2326*4882a593Smuzhiyun * after the segments are accepted. *delta* can be as well 2327*4882a593Smuzhiyun * positive (growing) as negative (shrinking). 2328*4882a593Smuzhiyun * 2329*4882a593Smuzhiyun * A call to this helper is susceptible to change the underlying 2330*4882a593Smuzhiyun * packet buffer. Therefore, at load time, all checks on pointers 2331*4882a593Smuzhiyun * previously done by the verifier are invalidated and must be 2332*4882a593Smuzhiyun * performed again, if the helper is used in combination with 2333*4882a593Smuzhiyun * direct packet access. 2334*4882a593Smuzhiyun * Return 2335*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 2336*4882a593Smuzhiyun * 2337*4882a593Smuzhiyun * long bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len) 2338*4882a593Smuzhiyun * Description 2339*4882a593Smuzhiyun * Apply an IPv6 Segment Routing action of type *action* to the 2340*4882a593Smuzhiyun * packet associated to *skb*. Each action takes a parameter 2341*4882a593Smuzhiyun * contained at address *param*, and of length *param_len* bytes. 2342*4882a593Smuzhiyun * *action* can be one of: 2343*4882a593Smuzhiyun * 2344*4882a593Smuzhiyun * **SEG6_LOCAL_ACTION_END_X** 2345*4882a593Smuzhiyun * End.X action: Endpoint with Layer-3 cross-connect. 2346*4882a593Smuzhiyun * Type of *param*: **struct in6_addr**. 2347*4882a593Smuzhiyun * **SEG6_LOCAL_ACTION_END_T** 2348*4882a593Smuzhiyun * End.T action: Endpoint with specific IPv6 table lookup. 2349*4882a593Smuzhiyun * Type of *param*: **int**. 2350*4882a593Smuzhiyun * **SEG6_LOCAL_ACTION_END_B6** 2351*4882a593Smuzhiyun * End.B6 action: Endpoint bound to an SRv6 policy. 2352*4882a593Smuzhiyun * Type of *param*: **struct ipv6_sr_hdr**. 2353*4882a593Smuzhiyun * **SEG6_LOCAL_ACTION_END_B6_ENCAP** 2354*4882a593Smuzhiyun * End.B6.Encap action: Endpoint bound to an SRv6 2355*4882a593Smuzhiyun * encapsulation policy. 2356*4882a593Smuzhiyun * Type of *param*: **struct ipv6_sr_hdr**. 2357*4882a593Smuzhiyun * 2358*4882a593Smuzhiyun * A call to this helper is susceptible to change the underlying 2359*4882a593Smuzhiyun * packet buffer. Therefore, at load time, all checks on pointers 2360*4882a593Smuzhiyun * previously done by the verifier are invalidated and must be 2361*4882a593Smuzhiyun * performed again, if the helper is used in combination with 2362*4882a593Smuzhiyun * direct packet access. 2363*4882a593Smuzhiyun * Return 2364*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 2365*4882a593Smuzhiyun * 2366*4882a593Smuzhiyun * long bpf_rc_repeat(void *ctx) 2367*4882a593Smuzhiyun * Description 2368*4882a593Smuzhiyun * This helper is used in programs implementing IR decoding, to 2369*4882a593Smuzhiyun * report a successfully decoded repeat key message. This delays 2370*4882a593Smuzhiyun * the generation of a key up event for previously generated 2371*4882a593Smuzhiyun * key down event. 2372*4882a593Smuzhiyun * 2373*4882a593Smuzhiyun * Some IR protocols like NEC have a special IR message for 2374*4882a593Smuzhiyun * repeating last button, for when a button is held down. 2375*4882a593Smuzhiyun * 2376*4882a593Smuzhiyun * The *ctx* should point to the lirc sample as passed into 2377*4882a593Smuzhiyun * the program. 2378*4882a593Smuzhiyun * 2379*4882a593Smuzhiyun * This helper is only available is the kernel was compiled with 2380*4882a593Smuzhiyun * the **CONFIG_BPF_LIRC_MODE2** configuration option set to 2381*4882a593Smuzhiyun * "**y**". 2382*4882a593Smuzhiyun * Return 2383*4882a593Smuzhiyun * 0 2384*4882a593Smuzhiyun * 2385*4882a593Smuzhiyun * long bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) 2386*4882a593Smuzhiyun * Description 2387*4882a593Smuzhiyun * This helper is used in programs implementing IR decoding, to 2388*4882a593Smuzhiyun * report a successfully decoded key press with *scancode*, 2389*4882a593Smuzhiyun * *toggle* value in the given *protocol*. The scancode will be 2390*4882a593Smuzhiyun * translated to a keycode using the rc keymap, and reported as 2391*4882a593Smuzhiyun * an input key down event. After a period a key up event is 2392*4882a593Smuzhiyun * generated. This period can be extended by calling either 2393*4882a593Smuzhiyun * **bpf_rc_keydown**\ () again with the same values, or calling 2394*4882a593Smuzhiyun * **bpf_rc_repeat**\ (). 2395*4882a593Smuzhiyun * 2396*4882a593Smuzhiyun * Some protocols include a toggle bit, in case the button was 2397*4882a593Smuzhiyun * released and pressed again between consecutive scancodes. 2398*4882a593Smuzhiyun * 2399*4882a593Smuzhiyun * The *ctx* should point to the lirc sample as passed into 2400*4882a593Smuzhiyun * the program. 2401*4882a593Smuzhiyun * 2402*4882a593Smuzhiyun * The *protocol* is the decoded protocol number (see 2403*4882a593Smuzhiyun * **enum rc_proto** for some predefined values). 2404*4882a593Smuzhiyun * 2405*4882a593Smuzhiyun * This helper is only available is the kernel was compiled with 2406*4882a593Smuzhiyun * the **CONFIG_BPF_LIRC_MODE2** configuration option set to 2407*4882a593Smuzhiyun * "**y**". 2408*4882a593Smuzhiyun * Return 2409*4882a593Smuzhiyun * 0 2410*4882a593Smuzhiyun * 2411*4882a593Smuzhiyun * u64 bpf_skb_cgroup_id(struct sk_buff *skb) 2412*4882a593Smuzhiyun * Description 2413*4882a593Smuzhiyun * Return the cgroup v2 id of the socket associated with the *skb*. 2414*4882a593Smuzhiyun * This is roughly similar to the **bpf_get_cgroup_classid**\ () 2415*4882a593Smuzhiyun * helper for cgroup v1 by providing a tag resp. identifier that 2416*4882a593Smuzhiyun * can be matched on or used for map lookups e.g. to implement 2417*4882a593Smuzhiyun * policy. The cgroup v2 id of a given path in the hierarchy is 2418*4882a593Smuzhiyun * exposed in user space through the f_handle API in order to get 2419*4882a593Smuzhiyun * to the same 64-bit id. 2420*4882a593Smuzhiyun * 2421*4882a593Smuzhiyun * This helper can be used on TC egress path, but not on ingress, 2422*4882a593Smuzhiyun * and is available only if the kernel was compiled with the 2423*4882a593Smuzhiyun * **CONFIG_SOCK_CGROUP_DATA** configuration option. 2424*4882a593Smuzhiyun * Return 2425*4882a593Smuzhiyun * The id is returned or 0 in case the id could not be retrieved. 2426*4882a593Smuzhiyun * 2427*4882a593Smuzhiyun * u64 bpf_get_current_cgroup_id(void) 2428*4882a593Smuzhiyun * Return 2429*4882a593Smuzhiyun * A 64-bit integer containing the current cgroup id based 2430*4882a593Smuzhiyun * on the cgroup within which the current task is running. 2431*4882a593Smuzhiyun * 2432*4882a593Smuzhiyun * void *bpf_get_local_storage(void *map, u64 flags) 2433*4882a593Smuzhiyun * Description 2434*4882a593Smuzhiyun * Get the pointer to the local storage area. 2435*4882a593Smuzhiyun * The type and the size of the local storage is defined 2436*4882a593Smuzhiyun * by the *map* argument. 2437*4882a593Smuzhiyun * The *flags* meaning is specific for each map type, 2438*4882a593Smuzhiyun * and has to be 0 for cgroup local storage. 2439*4882a593Smuzhiyun * 2440*4882a593Smuzhiyun * Depending on the BPF program type, a local storage area 2441*4882a593Smuzhiyun * can be shared between multiple instances of the BPF program, 2442*4882a593Smuzhiyun * running simultaneously. 2443*4882a593Smuzhiyun * 2444*4882a593Smuzhiyun * A user should care about the synchronization by himself. 2445*4882a593Smuzhiyun * For example, by using the **BPF_STX_XADD** instruction to alter 2446*4882a593Smuzhiyun * the shared data. 2447*4882a593Smuzhiyun * Return 2448*4882a593Smuzhiyun * A pointer to the local storage area. 2449*4882a593Smuzhiyun * 2450*4882a593Smuzhiyun * long bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags) 2451*4882a593Smuzhiyun * Description 2452*4882a593Smuzhiyun * Select a **SO_REUSEPORT** socket from a 2453*4882a593Smuzhiyun * **BPF_MAP_TYPE_REUSEPORT_SOCKARRAY** *map*. 2454*4882a593Smuzhiyun * It checks the selected socket is matching the incoming 2455*4882a593Smuzhiyun * request in the socket buffer. 2456*4882a593Smuzhiyun * Return 2457*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 2458*4882a593Smuzhiyun * 2459*4882a593Smuzhiyun * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level) 2460*4882a593Smuzhiyun * Description 2461*4882a593Smuzhiyun * Return id of cgroup v2 that is ancestor of cgroup associated 2462*4882a593Smuzhiyun * with the *skb* at the *ancestor_level*. The root cgroup is at 2463*4882a593Smuzhiyun * *ancestor_level* zero and each step down the hierarchy 2464*4882a593Smuzhiyun * increments the level. If *ancestor_level* == level of cgroup 2465*4882a593Smuzhiyun * associated with *skb*, then return value will be same as that 2466*4882a593Smuzhiyun * of **bpf_skb_cgroup_id**\ (). 2467*4882a593Smuzhiyun * 2468*4882a593Smuzhiyun * The helper is useful to implement policies based on cgroups 2469*4882a593Smuzhiyun * that are upper in hierarchy than immediate cgroup associated 2470*4882a593Smuzhiyun * with *skb*. 2471*4882a593Smuzhiyun * 2472*4882a593Smuzhiyun * The format of returned id and helper limitations are same as in 2473*4882a593Smuzhiyun * **bpf_skb_cgroup_id**\ (). 2474*4882a593Smuzhiyun * Return 2475*4882a593Smuzhiyun * The id is returned or 0 in case the id could not be retrieved. 2476*4882a593Smuzhiyun * 2477*4882a593Smuzhiyun * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) 2478*4882a593Smuzhiyun * Description 2479*4882a593Smuzhiyun * Look for TCP socket matching *tuple*, optionally in a child 2480*4882a593Smuzhiyun * network namespace *netns*. The return value must be checked, 2481*4882a593Smuzhiyun * and if non-**NULL**, released via **bpf_sk_release**\ (). 2482*4882a593Smuzhiyun * 2483*4882a593Smuzhiyun * The *ctx* should point to the context of the program, such as 2484*4882a593Smuzhiyun * the skb or socket (depending on the hook in use). This is used 2485*4882a593Smuzhiyun * to determine the base network namespace for the lookup. 2486*4882a593Smuzhiyun * 2487*4882a593Smuzhiyun * *tuple_size* must be one of: 2488*4882a593Smuzhiyun * 2489*4882a593Smuzhiyun * **sizeof**\ (*tuple*\ **->ipv4**) 2490*4882a593Smuzhiyun * Look for an IPv4 socket. 2491*4882a593Smuzhiyun * **sizeof**\ (*tuple*\ **->ipv6**) 2492*4882a593Smuzhiyun * Look for an IPv6 socket. 2493*4882a593Smuzhiyun * 2494*4882a593Smuzhiyun * If the *netns* is a negative signed 32-bit integer, then the 2495*4882a593Smuzhiyun * socket lookup table in the netns associated with the *ctx* 2496*4882a593Smuzhiyun * will be used. For the TC hooks, this is the netns of the device 2497*4882a593Smuzhiyun * in the skb. For socket hooks, this is the netns of the socket. 2498*4882a593Smuzhiyun * If *netns* is any other signed 32-bit value greater than or 2499*4882a593Smuzhiyun * equal to zero then it specifies the ID of the netns relative to 2500*4882a593Smuzhiyun * the netns associated with the *ctx*. *netns* values beyond the 2501*4882a593Smuzhiyun * range of 32-bit integers are reserved for future use. 2502*4882a593Smuzhiyun * 2503*4882a593Smuzhiyun * All values for *flags* are reserved for future usage, and must 2504*4882a593Smuzhiyun * be left at zero. 2505*4882a593Smuzhiyun * 2506*4882a593Smuzhiyun * This helper is available only if the kernel was compiled with 2507*4882a593Smuzhiyun * **CONFIG_NET** configuration option. 2508*4882a593Smuzhiyun * Return 2509*4882a593Smuzhiyun * Pointer to **struct bpf_sock**, or **NULL** in case of failure. 2510*4882a593Smuzhiyun * For sockets with reuseport option, the **struct bpf_sock** 2511*4882a593Smuzhiyun * result is from *reuse*\ **->socks**\ [] using the hash of the 2512*4882a593Smuzhiyun * tuple. 2513*4882a593Smuzhiyun * 2514*4882a593Smuzhiyun * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) 2515*4882a593Smuzhiyun * Description 2516*4882a593Smuzhiyun * Look for UDP socket matching *tuple*, optionally in a child 2517*4882a593Smuzhiyun * network namespace *netns*. The return value must be checked, 2518*4882a593Smuzhiyun * and if non-**NULL**, released via **bpf_sk_release**\ (). 2519*4882a593Smuzhiyun * 2520*4882a593Smuzhiyun * The *ctx* should point to the context of the program, such as 2521*4882a593Smuzhiyun * the skb or socket (depending on the hook in use). This is used 2522*4882a593Smuzhiyun * to determine the base network namespace for the lookup. 2523*4882a593Smuzhiyun * 2524*4882a593Smuzhiyun * *tuple_size* must be one of: 2525*4882a593Smuzhiyun * 2526*4882a593Smuzhiyun * **sizeof**\ (*tuple*\ **->ipv4**) 2527*4882a593Smuzhiyun * Look for an IPv4 socket. 2528*4882a593Smuzhiyun * **sizeof**\ (*tuple*\ **->ipv6**) 2529*4882a593Smuzhiyun * Look for an IPv6 socket. 2530*4882a593Smuzhiyun * 2531*4882a593Smuzhiyun * If the *netns* is a negative signed 32-bit integer, then the 2532*4882a593Smuzhiyun * socket lookup table in the netns associated with the *ctx* 2533*4882a593Smuzhiyun * will be used. For the TC hooks, this is the netns of the device 2534*4882a593Smuzhiyun * in the skb. For socket hooks, this is the netns of the socket. 2535*4882a593Smuzhiyun * If *netns* is any other signed 32-bit value greater than or 2536*4882a593Smuzhiyun * equal to zero then it specifies the ID of the netns relative to 2537*4882a593Smuzhiyun * the netns associated with the *ctx*. *netns* values beyond the 2538*4882a593Smuzhiyun * range of 32-bit integers are reserved for future use. 2539*4882a593Smuzhiyun * 2540*4882a593Smuzhiyun * All values for *flags* are reserved for future usage, and must 2541*4882a593Smuzhiyun * be left at zero. 2542*4882a593Smuzhiyun * 2543*4882a593Smuzhiyun * This helper is available only if the kernel was compiled with 2544*4882a593Smuzhiyun * **CONFIG_NET** configuration option. 2545*4882a593Smuzhiyun * Return 2546*4882a593Smuzhiyun * Pointer to **struct bpf_sock**, or **NULL** in case of failure. 2547*4882a593Smuzhiyun * For sockets with reuseport option, the **struct bpf_sock** 2548*4882a593Smuzhiyun * result is from *reuse*\ **->socks**\ [] using the hash of the 2549*4882a593Smuzhiyun * tuple. 2550*4882a593Smuzhiyun * 2551*4882a593Smuzhiyun * long bpf_sk_release(void *sock) 2552*4882a593Smuzhiyun * Description 2553*4882a593Smuzhiyun * Release the reference held by *sock*. *sock* must be a 2554*4882a593Smuzhiyun * non-**NULL** pointer that was returned from 2555*4882a593Smuzhiyun * **bpf_sk_lookup_xxx**\ (). 2556*4882a593Smuzhiyun * Return 2557*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 2558*4882a593Smuzhiyun * 2559*4882a593Smuzhiyun * long bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags) 2560*4882a593Smuzhiyun * Description 2561*4882a593Smuzhiyun * Push an element *value* in *map*. *flags* is one of: 2562*4882a593Smuzhiyun * 2563*4882a593Smuzhiyun * **BPF_EXIST** 2564*4882a593Smuzhiyun * If the queue/stack is full, the oldest element is 2565*4882a593Smuzhiyun * removed to make room for this. 2566*4882a593Smuzhiyun * Return 2567*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 2568*4882a593Smuzhiyun * 2569*4882a593Smuzhiyun * long bpf_map_pop_elem(struct bpf_map *map, void *value) 2570*4882a593Smuzhiyun * Description 2571*4882a593Smuzhiyun * Pop an element from *map*. 2572*4882a593Smuzhiyun * Return 2573*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 2574*4882a593Smuzhiyun * 2575*4882a593Smuzhiyun * long bpf_map_peek_elem(struct bpf_map *map, void *value) 2576*4882a593Smuzhiyun * Description 2577*4882a593Smuzhiyun * Get an element from *map* without removing it. 2578*4882a593Smuzhiyun * Return 2579*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 2580*4882a593Smuzhiyun * 2581*4882a593Smuzhiyun * long bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) 2582*4882a593Smuzhiyun * Description 2583*4882a593Smuzhiyun * For socket policies, insert *len* bytes into *msg* at offset 2584*4882a593Smuzhiyun * *start*. 2585*4882a593Smuzhiyun * 2586*4882a593Smuzhiyun * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a 2587*4882a593Smuzhiyun * *msg* it may want to insert metadata or options into the *msg*. 2588*4882a593Smuzhiyun * This can later be read and used by any of the lower layer BPF 2589*4882a593Smuzhiyun * hooks. 2590*4882a593Smuzhiyun * 2591*4882a593Smuzhiyun * This helper may fail if under memory pressure (a malloc 2592*4882a593Smuzhiyun * fails) in these cases BPF programs will get an appropriate 2593*4882a593Smuzhiyun * error and BPF programs will need to handle them. 2594*4882a593Smuzhiyun * Return 2595*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 2596*4882a593Smuzhiyun * 2597*4882a593Smuzhiyun * long bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) 2598*4882a593Smuzhiyun * Description 2599*4882a593Smuzhiyun * Will remove *len* bytes from a *msg* starting at byte *start*. 2600*4882a593Smuzhiyun * This may result in **ENOMEM** errors under certain situations if 2601*4882a593Smuzhiyun * an allocation and copy are required due to a full ring buffer. 2602*4882a593Smuzhiyun * However, the helper will try to avoid doing the allocation 2603*4882a593Smuzhiyun * if possible. Other errors can occur if input parameters are 2604*4882a593Smuzhiyun * invalid either due to *start* byte not being valid part of *msg* 2605*4882a593Smuzhiyun * payload and/or *pop* value being to large. 2606*4882a593Smuzhiyun * Return 2607*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 2608*4882a593Smuzhiyun * 2609*4882a593Smuzhiyun * long bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y) 2610*4882a593Smuzhiyun * Description 2611*4882a593Smuzhiyun * This helper is used in programs implementing IR decoding, to 2612*4882a593Smuzhiyun * report a successfully decoded pointer movement. 2613*4882a593Smuzhiyun * 2614*4882a593Smuzhiyun * The *ctx* should point to the lirc sample as passed into 2615*4882a593Smuzhiyun * the program. 2616*4882a593Smuzhiyun * 2617*4882a593Smuzhiyun * This helper is only available is the kernel was compiled with 2618*4882a593Smuzhiyun * the **CONFIG_BPF_LIRC_MODE2** configuration option set to 2619*4882a593Smuzhiyun * "**y**". 2620*4882a593Smuzhiyun * Return 2621*4882a593Smuzhiyun * 0 2622*4882a593Smuzhiyun * 2623*4882a593Smuzhiyun * long bpf_spin_lock(struct bpf_spin_lock *lock) 2624*4882a593Smuzhiyun * Description 2625*4882a593Smuzhiyun * Acquire a spinlock represented by the pointer *lock*, which is 2626*4882a593Smuzhiyun * stored as part of a value of a map. Taking the lock allows to 2627*4882a593Smuzhiyun * safely update the rest of the fields in that value. The 2628*4882a593Smuzhiyun * spinlock can (and must) later be released with a call to 2629*4882a593Smuzhiyun * **bpf_spin_unlock**\ (\ *lock*\ ). 2630*4882a593Smuzhiyun * 2631*4882a593Smuzhiyun * Spinlocks in BPF programs come with a number of restrictions 2632*4882a593Smuzhiyun * and constraints: 2633*4882a593Smuzhiyun * 2634*4882a593Smuzhiyun * * **bpf_spin_lock** objects are only allowed inside maps of 2635*4882a593Smuzhiyun * types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this 2636*4882a593Smuzhiyun * list could be extended in the future). 2637*4882a593Smuzhiyun * * BTF description of the map is mandatory. 2638*4882a593Smuzhiyun * * The BPF program can take ONE lock at a time, since taking two 2639*4882a593Smuzhiyun * or more could cause dead locks. 2640*4882a593Smuzhiyun * * Only one **struct bpf_spin_lock** is allowed per map element. 2641*4882a593Smuzhiyun * * When the lock is taken, calls (either BPF to BPF or helpers) 2642*4882a593Smuzhiyun * are not allowed. 2643*4882a593Smuzhiyun * * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not 2644*4882a593Smuzhiyun * allowed inside a spinlock-ed region. 2645*4882a593Smuzhiyun * * The BPF program MUST call **bpf_spin_unlock**\ () to release 2646*4882a593Smuzhiyun * the lock, on all execution paths, before it returns. 2647*4882a593Smuzhiyun * * The BPF program can access **struct bpf_spin_lock** only via 2648*4882a593Smuzhiyun * the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ () 2649*4882a593Smuzhiyun * helpers. Loading or storing data into the **struct 2650*4882a593Smuzhiyun * bpf_spin_lock** *lock*\ **;** field of a map is not allowed. 2651*4882a593Smuzhiyun * * To use the **bpf_spin_lock**\ () helper, the BTF description 2652*4882a593Smuzhiyun * of the map value must be a struct and have **struct 2653*4882a593Smuzhiyun * bpf_spin_lock** *anyname*\ **;** field at the top level. 2654*4882a593Smuzhiyun * Nested lock inside another struct is not allowed. 2655*4882a593Smuzhiyun * * The **struct bpf_spin_lock** *lock* field in a map value must 2656*4882a593Smuzhiyun * be aligned on a multiple of 4 bytes in that value. 2657*4882a593Smuzhiyun * * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy 2658*4882a593Smuzhiyun * the **bpf_spin_lock** field to user space. 2659*4882a593Smuzhiyun * * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from 2660*4882a593Smuzhiyun * a BPF program, do not update the **bpf_spin_lock** field. 2661*4882a593Smuzhiyun * * **bpf_spin_lock** cannot be on the stack or inside a 2662*4882a593Smuzhiyun * networking packet (it can only be inside of a map values). 2663*4882a593Smuzhiyun * * **bpf_spin_lock** is available to root only. 2664*4882a593Smuzhiyun * * Tracing programs and socket filter programs cannot use 2665*4882a593Smuzhiyun * **bpf_spin_lock**\ () due to insufficient preemption checks 2666*4882a593Smuzhiyun * (but this may change in the future). 2667*4882a593Smuzhiyun * * **bpf_spin_lock** is not allowed in inner maps of map-in-map. 2668*4882a593Smuzhiyun * Return 2669*4882a593Smuzhiyun * 0 2670*4882a593Smuzhiyun * 2671*4882a593Smuzhiyun * long bpf_spin_unlock(struct bpf_spin_lock *lock) 2672*4882a593Smuzhiyun * Description 2673*4882a593Smuzhiyun * Release the *lock* previously locked by a call to 2674*4882a593Smuzhiyun * **bpf_spin_lock**\ (\ *lock*\ ). 2675*4882a593Smuzhiyun * Return 2676*4882a593Smuzhiyun * 0 2677*4882a593Smuzhiyun * 2678*4882a593Smuzhiyun * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk) 2679*4882a593Smuzhiyun * Description 2680*4882a593Smuzhiyun * This helper gets a **struct bpf_sock** pointer such 2681*4882a593Smuzhiyun * that all the fields in this **bpf_sock** can be accessed. 2682*4882a593Smuzhiyun * Return 2683*4882a593Smuzhiyun * A **struct bpf_sock** pointer on success, or **NULL** in 2684*4882a593Smuzhiyun * case of failure. 2685*4882a593Smuzhiyun * 2686*4882a593Smuzhiyun * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk) 2687*4882a593Smuzhiyun * Description 2688*4882a593Smuzhiyun * This helper gets a **struct bpf_tcp_sock** pointer from a 2689*4882a593Smuzhiyun * **struct bpf_sock** pointer. 2690*4882a593Smuzhiyun * Return 2691*4882a593Smuzhiyun * A **struct bpf_tcp_sock** pointer on success, or **NULL** in 2692*4882a593Smuzhiyun * case of failure. 2693*4882a593Smuzhiyun * 2694*4882a593Smuzhiyun * long bpf_skb_ecn_set_ce(struct sk_buff *skb) 2695*4882a593Smuzhiyun * Description 2696*4882a593Smuzhiyun * Set ECN (Explicit Congestion Notification) field of IP header 2697*4882a593Smuzhiyun * to **CE** (Congestion Encountered) if current value is **ECT** 2698*4882a593Smuzhiyun * (ECN Capable Transport). Otherwise, do nothing. Works with IPv6 2699*4882a593Smuzhiyun * and IPv4. 2700*4882a593Smuzhiyun * Return 2701*4882a593Smuzhiyun * 1 if the **CE** flag is set (either by the current helper call 2702*4882a593Smuzhiyun * or because it was already present), 0 if it is not set. 2703*4882a593Smuzhiyun * 2704*4882a593Smuzhiyun * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk) 2705*4882a593Smuzhiyun * Description 2706*4882a593Smuzhiyun * Return a **struct bpf_sock** pointer in **TCP_LISTEN** state. 2707*4882a593Smuzhiyun * **bpf_sk_release**\ () is unnecessary and not allowed. 2708*4882a593Smuzhiyun * Return 2709*4882a593Smuzhiyun * A **struct bpf_sock** pointer on success, or **NULL** in 2710*4882a593Smuzhiyun * case of failure. 2711*4882a593Smuzhiyun * 2712*4882a593Smuzhiyun * struct bpf_sock *bpf_skc_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) 2713*4882a593Smuzhiyun * Description 2714*4882a593Smuzhiyun * Look for TCP socket matching *tuple*, optionally in a child 2715*4882a593Smuzhiyun * network namespace *netns*. The return value must be checked, 2716*4882a593Smuzhiyun * and if non-**NULL**, released via **bpf_sk_release**\ (). 2717*4882a593Smuzhiyun * 2718*4882a593Smuzhiyun * This function is identical to **bpf_sk_lookup_tcp**\ (), except 2719*4882a593Smuzhiyun * that it also returns timewait or request sockets. Use 2720*4882a593Smuzhiyun * **bpf_sk_fullsock**\ () or **bpf_tcp_sock**\ () to access the 2721*4882a593Smuzhiyun * full structure. 2722*4882a593Smuzhiyun * 2723*4882a593Smuzhiyun * This helper is available only if the kernel was compiled with 2724*4882a593Smuzhiyun * **CONFIG_NET** configuration option. 2725*4882a593Smuzhiyun * Return 2726*4882a593Smuzhiyun * Pointer to **struct bpf_sock**, or **NULL** in case of failure. 2727*4882a593Smuzhiyun * For sockets with reuseport option, the **struct bpf_sock** 2728*4882a593Smuzhiyun * result is from *reuse*\ **->socks**\ [] using the hash of the 2729*4882a593Smuzhiyun * tuple. 2730*4882a593Smuzhiyun * 2731*4882a593Smuzhiyun * long bpf_tcp_check_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) 2732*4882a593Smuzhiyun * Description 2733*4882a593Smuzhiyun * Check whether *iph* and *th* contain a valid SYN cookie ACK for 2734*4882a593Smuzhiyun * the listening socket in *sk*. 2735*4882a593Smuzhiyun * 2736*4882a593Smuzhiyun * *iph* points to the start of the IPv4 or IPv6 header, while 2737*4882a593Smuzhiyun * *iph_len* contains **sizeof**\ (**struct iphdr**) or 2738*4882a593Smuzhiyun * **sizeof**\ (**struct ip6hdr**). 2739*4882a593Smuzhiyun * 2740*4882a593Smuzhiyun * *th* points to the start of the TCP header, while *th_len* 2741*4882a593Smuzhiyun * contains **sizeof**\ (**struct tcphdr**). 2742*4882a593Smuzhiyun * Return 2743*4882a593Smuzhiyun * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative 2744*4882a593Smuzhiyun * error otherwise. 2745*4882a593Smuzhiyun * 2746*4882a593Smuzhiyun * long bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags) 2747*4882a593Smuzhiyun * Description 2748*4882a593Smuzhiyun * Get name of sysctl in /proc/sys/ and copy it into provided by 2749*4882a593Smuzhiyun * program buffer *buf* of size *buf_len*. 2750*4882a593Smuzhiyun * 2751*4882a593Smuzhiyun * The buffer is always NUL terminated, unless it's zero-sized. 2752*4882a593Smuzhiyun * 2753*4882a593Smuzhiyun * If *flags* is zero, full name (e.g. "net/ipv4/tcp_mem") is 2754*4882a593Smuzhiyun * copied. Use **BPF_F_SYSCTL_BASE_NAME** flag to copy base name 2755*4882a593Smuzhiyun * only (e.g. "tcp_mem"). 2756*4882a593Smuzhiyun * Return 2757*4882a593Smuzhiyun * Number of character copied (not including the trailing NUL). 2758*4882a593Smuzhiyun * 2759*4882a593Smuzhiyun * **-E2BIG** if the buffer wasn't big enough (*buf* will contain 2760*4882a593Smuzhiyun * truncated name in this case). 2761*4882a593Smuzhiyun * 2762*4882a593Smuzhiyun * long bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) 2763*4882a593Smuzhiyun * Description 2764*4882a593Smuzhiyun * Get current value of sysctl as it is presented in /proc/sys 2765*4882a593Smuzhiyun * (incl. newline, etc), and copy it as a string into provided 2766*4882a593Smuzhiyun * by program buffer *buf* of size *buf_len*. 2767*4882a593Smuzhiyun * 2768*4882a593Smuzhiyun * The whole value is copied, no matter what file position user 2769*4882a593Smuzhiyun * space issued e.g. sys_read at. 2770*4882a593Smuzhiyun * 2771*4882a593Smuzhiyun * The buffer is always NUL terminated, unless it's zero-sized. 2772*4882a593Smuzhiyun * Return 2773*4882a593Smuzhiyun * Number of character copied (not including the trailing NUL). 2774*4882a593Smuzhiyun * 2775*4882a593Smuzhiyun * **-E2BIG** if the buffer wasn't big enough (*buf* will contain 2776*4882a593Smuzhiyun * truncated name in this case). 2777*4882a593Smuzhiyun * 2778*4882a593Smuzhiyun * **-EINVAL** if current value was unavailable, e.g. because 2779*4882a593Smuzhiyun * sysctl is uninitialized and read returns -EIO for it. 2780*4882a593Smuzhiyun * 2781*4882a593Smuzhiyun * long bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) 2782*4882a593Smuzhiyun * Description 2783*4882a593Smuzhiyun * Get new value being written by user space to sysctl (before 2784*4882a593Smuzhiyun * the actual write happens) and copy it as a string into 2785*4882a593Smuzhiyun * provided by program buffer *buf* of size *buf_len*. 2786*4882a593Smuzhiyun * 2787*4882a593Smuzhiyun * User space may write new value at file position > 0. 2788*4882a593Smuzhiyun * 2789*4882a593Smuzhiyun * The buffer is always NUL terminated, unless it's zero-sized. 2790*4882a593Smuzhiyun * Return 2791*4882a593Smuzhiyun * Number of character copied (not including the trailing NUL). 2792*4882a593Smuzhiyun * 2793*4882a593Smuzhiyun * **-E2BIG** if the buffer wasn't big enough (*buf* will contain 2794*4882a593Smuzhiyun * truncated name in this case). 2795*4882a593Smuzhiyun * 2796*4882a593Smuzhiyun * **-EINVAL** if sysctl is being read. 2797*4882a593Smuzhiyun * 2798*4882a593Smuzhiyun * long bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len) 2799*4882a593Smuzhiyun * Description 2800*4882a593Smuzhiyun * Override new value being written by user space to sysctl with 2801*4882a593Smuzhiyun * value provided by program in buffer *buf* of size *buf_len*. 2802*4882a593Smuzhiyun * 2803*4882a593Smuzhiyun * *buf* should contain a string in same form as provided by user 2804*4882a593Smuzhiyun * space on sysctl write. 2805*4882a593Smuzhiyun * 2806*4882a593Smuzhiyun * User space may write new value at file position > 0. To override 2807*4882a593Smuzhiyun * the whole sysctl value file position should be set to zero. 2808*4882a593Smuzhiyun * Return 2809*4882a593Smuzhiyun * 0 on success. 2810*4882a593Smuzhiyun * 2811*4882a593Smuzhiyun * **-E2BIG** if the *buf_len* is too big. 2812*4882a593Smuzhiyun * 2813*4882a593Smuzhiyun * **-EINVAL** if sysctl is being read. 2814*4882a593Smuzhiyun * 2815*4882a593Smuzhiyun * long bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res) 2816*4882a593Smuzhiyun * Description 2817*4882a593Smuzhiyun * Convert the initial part of the string from buffer *buf* of 2818*4882a593Smuzhiyun * size *buf_len* to a long integer according to the given base 2819*4882a593Smuzhiyun * and save the result in *res*. 2820*4882a593Smuzhiyun * 2821*4882a593Smuzhiyun * The string may begin with an arbitrary amount of white space 2822*4882a593Smuzhiyun * (as determined by **isspace**\ (3)) followed by a single 2823*4882a593Smuzhiyun * optional '**-**' sign. 2824*4882a593Smuzhiyun * 2825*4882a593Smuzhiyun * Five least significant bits of *flags* encode base, other bits 2826*4882a593Smuzhiyun * are currently unused. 2827*4882a593Smuzhiyun * 2828*4882a593Smuzhiyun * Base must be either 8, 10, 16 or 0 to detect it automatically 2829*4882a593Smuzhiyun * similar to user space **strtol**\ (3). 2830*4882a593Smuzhiyun * Return 2831*4882a593Smuzhiyun * Number of characters consumed on success. Must be positive but 2832*4882a593Smuzhiyun * no more than *buf_len*. 2833*4882a593Smuzhiyun * 2834*4882a593Smuzhiyun * **-EINVAL** if no valid digits were found or unsupported base 2835*4882a593Smuzhiyun * was provided. 2836*4882a593Smuzhiyun * 2837*4882a593Smuzhiyun * **-ERANGE** if resulting value was out of range. 2838*4882a593Smuzhiyun * 2839*4882a593Smuzhiyun * long bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res) 2840*4882a593Smuzhiyun * Description 2841*4882a593Smuzhiyun * Convert the initial part of the string from buffer *buf* of 2842*4882a593Smuzhiyun * size *buf_len* to an unsigned long integer according to the 2843*4882a593Smuzhiyun * given base and save the result in *res*. 2844*4882a593Smuzhiyun * 2845*4882a593Smuzhiyun * The string may begin with an arbitrary amount of white space 2846*4882a593Smuzhiyun * (as determined by **isspace**\ (3)). 2847*4882a593Smuzhiyun * 2848*4882a593Smuzhiyun * Five least significant bits of *flags* encode base, other bits 2849*4882a593Smuzhiyun * are currently unused. 2850*4882a593Smuzhiyun * 2851*4882a593Smuzhiyun * Base must be either 8, 10, 16 or 0 to detect it automatically 2852*4882a593Smuzhiyun * similar to user space **strtoul**\ (3). 2853*4882a593Smuzhiyun * Return 2854*4882a593Smuzhiyun * Number of characters consumed on success. Must be positive but 2855*4882a593Smuzhiyun * no more than *buf_len*. 2856*4882a593Smuzhiyun * 2857*4882a593Smuzhiyun * **-EINVAL** if no valid digits were found or unsupported base 2858*4882a593Smuzhiyun * was provided. 2859*4882a593Smuzhiyun * 2860*4882a593Smuzhiyun * **-ERANGE** if resulting value was out of range. 2861*4882a593Smuzhiyun * 2862*4882a593Smuzhiyun * void *bpf_sk_storage_get(struct bpf_map *map, void *sk, void *value, u64 flags) 2863*4882a593Smuzhiyun * Description 2864*4882a593Smuzhiyun * Get a bpf-local-storage from a *sk*. 2865*4882a593Smuzhiyun * 2866*4882a593Smuzhiyun * Logically, it could be thought of getting the value from 2867*4882a593Smuzhiyun * a *map* with *sk* as the **key**. From this 2868*4882a593Smuzhiyun * perspective, the usage is not much different from 2869*4882a593Smuzhiyun * **bpf_map_lookup_elem**\ (*map*, **&**\ *sk*) except this 2870*4882a593Smuzhiyun * helper enforces the key must be a full socket and the map must 2871*4882a593Smuzhiyun * be a **BPF_MAP_TYPE_SK_STORAGE** also. 2872*4882a593Smuzhiyun * 2873*4882a593Smuzhiyun * Underneath, the value is stored locally at *sk* instead of 2874*4882a593Smuzhiyun * the *map*. The *map* is used as the bpf-local-storage 2875*4882a593Smuzhiyun * "type". The bpf-local-storage "type" (i.e. the *map*) is 2876*4882a593Smuzhiyun * searched against all bpf-local-storages residing at *sk*. 2877*4882a593Smuzhiyun * 2878*4882a593Smuzhiyun * *sk* is a kernel **struct sock** pointer for LSM program. 2879*4882a593Smuzhiyun * *sk* is a **struct bpf_sock** pointer for other program types. 2880*4882a593Smuzhiyun * 2881*4882a593Smuzhiyun * An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be 2882*4882a593Smuzhiyun * used such that a new bpf-local-storage will be 2883*4882a593Smuzhiyun * created if one does not exist. *value* can be used 2884*4882a593Smuzhiyun * together with **BPF_SK_STORAGE_GET_F_CREATE** to specify 2885*4882a593Smuzhiyun * the initial value of a bpf-local-storage. If *value* is 2886*4882a593Smuzhiyun * **NULL**, the new bpf-local-storage will be zero initialized. 2887*4882a593Smuzhiyun * Return 2888*4882a593Smuzhiyun * A bpf-local-storage pointer is returned on success. 2889*4882a593Smuzhiyun * 2890*4882a593Smuzhiyun * **NULL** if not found or there was an error in adding 2891*4882a593Smuzhiyun * a new bpf-local-storage. 2892*4882a593Smuzhiyun * 2893*4882a593Smuzhiyun * long bpf_sk_storage_delete(struct bpf_map *map, void *sk) 2894*4882a593Smuzhiyun * Description 2895*4882a593Smuzhiyun * Delete a bpf-local-storage from a *sk*. 2896*4882a593Smuzhiyun * Return 2897*4882a593Smuzhiyun * 0 on success. 2898*4882a593Smuzhiyun * 2899*4882a593Smuzhiyun * **-ENOENT** if the bpf-local-storage cannot be found. 2900*4882a593Smuzhiyun * **-EINVAL** if sk is not a fullsock (e.g. a request_sock). 2901*4882a593Smuzhiyun * 2902*4882a593Smuzhiyun * long bpf_send_signal(u32 sig) 2903*4882a593Smuzhiyun * Description 2904*4882a593Smuzhiyun * Send signal *sig* to the process of the current task. 2905*4882a593Smuzhiyun * The signal may be delivered to any of this process's threads. 2906*4882a593Smuzhiyun * Return 2907*4882a593Smuzhiyun * 0 on success or successfully queued. 2908*4882a593Smuzhiyun * 2909*4882a593Smuzhiyun * **-EBUSY** if work queue under nmi is full. 2910*4882a593Smuzhiyun * 2911*4882a593Smuzhiyun * **-EINVAL** if *sig* is invalid. 2912*4882a593Smuzhiyun * 2913*4882a593Smuzhiyun * **-EPERM** if no permission to send the *sig*. 2914*4882a593Smuzhiyun * 2915*4882a593Smuzhiyun * **-EAGAIN** if bpf program can try again. 2916*4882a593Smuzhiyun * 2917*4882a593Smuzhiyun * s64 bpf_tcp_gen_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) 2918*4882a593Smuzhiyun * Description 2919*4882a593Smuzhiyun * Try to issue a SYN cookie for the packet with corresponding 2920*4882a593Smuzhiyun * IP/TCP headers, *iph* and *th*, on the listening socket in *sk*. 2921*4882a593Smuzhiyun * 2922*4882a593Smuzhiyun * *iph* points to the start of the IPv4 or IPv6 header, while 2923*4882a593Smuzhiyun * *iph_len* contains **sizeof**\ (**struct iphdr**) or 2924*4882a593Smuzhiyun * **sizeof**\ (**struct ip6hdr**). 2925*4882a593Smuzhiyun * 2926*4882a593Smuzhiyun * *th* points to the start of the TCP header, while *th_len* 2927*4882a593Smuzhiyun * contains the length of the TCP header. 2928*4882a593Smuzhiyun * Return 2929*4882a593Smuzhiyun * On success, lower 32 bits hold the generated SYN cookie in 2930*4882a593Smuzhiyun * followed by 16 bits which hold the MSS value for that cookie, 2931*4882a593Smuzhiyun * and the top 16 bits are unused. 2932*4882a593Smuzhiyun * 2933*4882a593Smuzhiyun * On failure, the returned value is one of the following: 2934*4882a593Smuzhiyun * 2935*4882a593Smuzhiyun * **-EINVAL** SYN cookie cannot be issued due to error 2936*4882a593Smuzhiyun * 2937*4882a593Smuzhiyun * **-ENOENT** SYN cookie should not be issued (no SYN flood) 2938*4882a593Smuzhiyun * 2939*4882a593Smuzhiyun * **-EOPNOTSUPP** kernel configuration does not enable SYN cookies 2940*4882a593Smuzhiyun * 2941*4882a593Smuzhiyun * **-EPROTONOSUPPORT** IP packet version is not 4 or 6 2942*4882a593Smuzhiyun * 2943*4882a593Smuzhiyun * long bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) 2944*4882a593Smuzhiyun * Description 2945*4882a593Smuzhiyun * Write raw *data* blob into a special BPF perf event held by 2946*4882a593Smuzhiyun * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf 2947*4882a593Smuzhiyun * event must have the following attributes: **PERF_SAMPLE_RAW** 2948*4882a593Smuzhiyun * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and 2949*4882a593Smuzhiyun * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. 2950*4882a593Smuzhiyun * 2951*4882a593Smuzhiyun * The *flags* are used to indicate the index in *map* for which 2952*4882a593Smuzhiyun * the value must be put, masked with **BPF_F_INDEX_MASK**. 2953*4882a593Smuzhiyun * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** 2954*4882a593Smuzhiyun * to indicate that the index of the current CPU core should be 2955*4882a593Smuzhiyun * used. 2956*4882a593Smuzhiyun * 2957*4882a593Smuzhiyun * The value to write, of *size*, is passed through eBPF stack and 2958*4882a593Smuzhiyun * pointed by *data*. 2959*4882a593Smuzhiyun * 2960*4882a593Smuzhiyun * *ctx* is a pointer to in-kernel struct sk_buff. 2961*4882a593Smuzhiyun * 2962*4882a593Smuzhiyun * This helper is similar to **bpf_perf_event_output**\ () but 2963*4882a593Smuzhiyun * restricted to raw_tracepoint bpf programs. 2964*4882a593Smuzhiyun * Return 2965*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 2966*4882a593Smuzhiyun * 2967*4882a593Smuzhiyun * long bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr) 2968*4882a593Smuzhiyun * Description 2969*4882a593Smuzhiyun * Safely attempt to read *size* bytes from user space address 2970*4882a593Smuzhiyun * *unsafe_ptr* and store the data in *dst*. 2971*4882a593Smuzhiyun * Return 2972*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 2973*4882a593Smuzhiyun * 2974*4882a593Smuzhiyun * long bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) 2975*4882a593Smuzhiyun * Description 2976*4882a593Smuzhiyun * Safely attempt to read *size* bytes from kernel space address 2977*4882a593Smuzhiyun * *unsafe_ptr* and store the data in *dst*. 2978*4882a593Smuzhiyun * Return 2979*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 2980*4882a593Smuzhiyun * 2981*4882a593Smuzhiyun * long bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr) 2982*4882a593Smuzhiyun * Description 2983*4882a593Smuzhiyun * Copy a NUL terminated string from an unsafe user address 2984*4882a593Smuzhiyun * *unsafe_ptr* to *dst*. The *size* should include the 2985*4882a593Smuzhiyun * terminating NUL byte. In case the string length is smaller than 2986*4882a593Smuzhiyun * *size*, the target is not padded with further NUL bytes. If the 2987*4882a593Smuzhiyun * string length is larger than *size*, just *size*-1 bytes are 2988*4882a593Smuzhiyun * copied and the last byte is set to NUL. 2989*4882a593Smuzhiyun * 2990*4882a593Smuzhiyun * On success, the length of the copied string is returned. This 2991*4882a593Smuzhiyun * makes this helper useful in tracing programs for reading 2992*4882a593Smuzhiyun * strings, and more importantly to get its length at runtime. See 2993*4882a593Smuzhiyun * the following snippet: 2994*4882a593Smuzhiyun * 2995*4882a593Smuzhiyun * :: 2996*4882a593Smuzhiyun * 2997*4882a593Smuzhiyun * SEC("kprobe/sys_open") 2998*4882a593Smuzhiyun * void bpf_sys_open(struct pt_regs *ctx) 2999*4882a593Smuzhiyun * { 3000*4882a593Smuzhiyun * char buf[PATHLEN]; // PATHLEN is defined to 256 3001*4882a593Smuzhiyun * int res = bpf_probe_read_user_str(buf, sizeof(buf), 3002*4882a593Smuzhiyun * ctx->di); 3003*4882a593Smuzhiyun * 3004*4882a593Smuzhiyun * // Consume buf, for example push it to 3005*4882a593Smuzhiyun * // userspace via bpf_perf_event_output(); we 3006*4882a593Smuzhiyun * // can use res (the string length) as event 3007*4882a593Smuzhiyun * // size, after checking its boundaries. 3008*4882a593Smuzhiyun * } 3009*4882a593Smuzhiyun * 3010*4882a593Smuzhiyun * In comparison, using **bpf_probe_read_user**\ () helper here 3011*4882a593Smuzhiyun * instead to read the string would require to estimate the length 3012*4882a593Smuzhiyun * at compile time, and would often result in copying more memory 3013*4882a593Smuzhiyun * than necessary. 3014*4882a593Smuzhiyun * 3015*4882a593Smuzhiyun * Another useful use case is when parsing individual process 3016*4882a593Smuzhiyun * arguments or individual environment variables navigating 3017*4882a593Smuzhiyun * *current*\ **->mm->arg_start** and *current*\ 3018*4882a593Smuzhiyun * **->mm->env_start**: using this helper and the return value, 3019*4882a593Smuzhiyun * one can quickly iterate at the right offset of the memory area. 3020*4882a593Smuzhiyun * Return 3021*4882a593Smuzhiyun * On success, the strictly positive length of the string, 3022*4882a593Smuzhiyun * including the trailing NUL character. On error, a negative 3023*4882a593Smuzhiyun * value. 3024*4882a593Smuzhiyun * 3025*4882a593Smuzhiyun * long bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr) 3026*4882a593Smuzhiyun * Description 3027*4882a593Smuzhiyun * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr* 3028*4882a593Smuzhiyun * to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply. 3029*4882a593Smuzhiyun * Return 3030*4882a593Smuzhiyun * On success, the strictly positive length of the string, including 3031*4882a593Smuzhiyun * the trailing NUL character. On error, a negative value. 3032*4882a593Smuzhiyun * 3033*4882a593Smuzhiyun * long bpf_tcp_send_ack(void *tp, u32 rcv_nxt) 3034*4882a593Smuzhiyun * Description 3035*4882a593Smuzhiyun * Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**. 3036*4882a593Smuzhiyun * *rcv_nxt* is the ack_seq to be sent out. 3037*4882a593Smuzhiyun * Return 3038*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 3039*4882a593Smuzhiyun * 3040*4882a593Smuzhiyun * long bpf_send_signal_thread(u32 sig) 3041*4882a593Smuzhiyun * Description 3042*4882a593Smuzhiyun * Send signal *sig* to the thread corresponding to the current task. 3043*4882a593Smuzhiyun * Return 3044*4882a593Smuzhiyun * 0 on success or successfully queued. 3045*4882a593Smuzhiyun * 3046*4882a593Smuzhiyun * **-EBUSY** if work queue under nmi is full. 3047*4882a593Smuzhiyun * 3048*4882a593Smuzhiyun * **-EINVAL** if *sig* is invalid. 3049*4882a593Smuzhiyun * 3050*4882a593Smuzhiyun * **-EPERM** if no permission to send the *sig*. 3051*4882a593Smuzhiyun * 3052*4882a593Smuzhiyun * **-EAGAIN** if bpf program can try again. 3053*4882a593Smuzhiyun * 3054*4882a593Smuzhiyun * u64 bpf_jiffies64(void) 3055*4882a593Smuzhiyun * Description 3056*4882a593Smuzhiyun * Obtain the 64bit jiffies 3057*4882a593Smuzhiyun * Return 3058*4882a593Smuzhiyun * The 64 bit jiffies 3059*4882a593Smuzhiyun * 3060*4882a593Smuzhiyun * long bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags) 3061*4882a593Smuzhiyun * Description 3062*4882a593Smuzhiyun * For an eBPF program attached to a perf event, retrieve the 3063*4882a593Smuzhiyun * branch records (**struct perf_branch_entry**) associated to *ctx* 3064*4882a593Smuzhiyun * and store it in the buffer pointed by *buf* up to size 3065*4882a593Smuzhiyun * *size* bytes. 3066*4882a593Smuzhiyun * Return 3067*4882a593Smuzhiyun * On success, number of bytes written to *buf*. On error, a 3068*4882a593Smuzhiyun * negative value. 3069*4882a593Smuzhiyun * 3070*4882a593Smuzhiyun * The *flags* can be set to **BPF_F_GET_BRANCH_RECORDS_SIZE** to 3071*4882a593Smuzhiyun * instead return the number of bytes required to store all the 3072*4882a593Smuzhiyun * branch entries. If this flag is set, *buf* may be NULL. 3073*4882a593Smuzhiyun * 3074*4882a593Smuzhiyun * **-EINVAL** if arguments invalid or **size** not a multiple 3075*4882a593Smuzhiyun * of **sizeof**\ (**struct perf_branch_entry**\ ). 3076*4882a593Smuzhiyun * 3077*4882a593Smuzhiyun * **-ENOENT** if architecture does not support branch records. 3078*4882a593Smuzhiyun * 3079*4882a593Smuzhiyun * long bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size) 3080*4882a593Smuzhiyun * Description 3081*4882a593Smuzhiyun * Returns 0 on success, values for *pid* and *tgid* as seen from the current 3082*4882a593Smuzhiyun * *namespace* will be returned in *nsdata*. 3083*4882a593Smuzhiyun * Return 3084*4882a593Smuzhiyun * 0 on success, or one of the following in case of failure: 3085*4882a593Smuzhiyun * 3086*4882a593Smuzhiyun * **-EINVAL** if dev and inum supplied don't match dev_t and inode number 3087*4882a593Smuzhiyun * with nsfs of current task, or if dev conversion to dev_t lost high bits. 3088*4882a593Smuzhiyun * 3089*4882a593Smuzhiyun * **-ENOENT** if pidns does not exists for the current task. 3090*4882a593Smuzhiyun * 3091*4882a593Smuzhiyun * long bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) 3092*4882a593Smuzhiyun * Description 3093*4882a593Smuzhiyun * Write raw *data* blob into a special BPF perf event held by 3094*4882a593Smuzhiyun * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf 3095*4882a593Smuzhiyun * event must have the following attributes: **PERF_SAMPLE_RAW** 3096*4882a593Smuzhiyun * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and 3097*4882a593Smuzhiyun * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. 3098*4882a593Smuzhiyun * 3099*4882a593Smuzhiyun * The *flags* are used to indicate the index in *map* for which 3100*4882a593Smuzhiyun * the value must be put, masked with **BPF_F_INDEX_MASK**. 3101*4882a593Smuzhiyun * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** 3102*4882a593Smuzhiyun * to indicate that the index of the current CPU core should be 3103*4882a593Smuzhiyun * used. 3104*4882a593Smuzhiyun * 3105*4882a593Smuzhiyun * The value to write, of *size*, is passed through eBPF stack and 3106*4882a593Smuzhiyun * pointed by *data*. 3107*4882a593Smuzhiyun * 3108*4882a593Smuzhiyun * *ctx* is a pointer to in-kernel struct xdp_buff. 3109*4882a593Smuzhiyun * 3110*4882a593Smuzhiyun * This helper is similar to **bpf_perf_eventoutput**\ () but 3111*4882a593Smuzhiyun * restricted to raw_tracepoint bpf programs. 3112*4882a593Smuzhiyun * Return 3113*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 3114*4882a593Smuzhiyun * 3115*4882a593Smuzhiyun * u64 bpf_get_netns_cookie(void *ctx) 3116*4882a593Smuzhiyun * Description 3117*4882a593Smuzhiyun * Retrieve the cookie (generated by the kernel) of the network 3118*4882a593Smuzhiyun * namespace the input *ctx* is associated with. The network 3119*4882a593Smuzhiyun * namespace cookie remains stable for its lifetime and provides 3120*4882a593Smuzhiyun * a global identifier that can be assumed unique. If *ctx* is 3121*4882a593Smuzhiyun * NULL, then the helper returns the cookie for the initial 3122*4882a593Smuzhiyun * network namespace. The cookie itself is very similar to that 3123*4882a593Smuzhiyun * of **bpf_get_socket_cookie**\ () helper, but for network 3124*4882a593Smuzhiyun * namespaces instead of sockets. 3125*4882a593Smuzhiyun * Return 3126*4882a593Smuzhiyun * A 8-byte long opaque number. 3127*4882a593Smuzhiyun * 3128*4882a593Smuzhiyun * u64 bpf_get_current_ancestor_cgroup_id(int ancestor_level) 3129*4882a593Smuzhiyun * Description 3130*4882a593Smuzhiyun * Return id of cgroup v2 that is ancestor of the cgroup associated 3131*4882a593Smuzhiyun * with the current task at the *ancestor_level*. The root cgroup 3132*4882a593Smuzhiyun * is at *ancestor_level* zero and each step down the hierarchy 3133*4882a593Smuzhiyun * increments the level. If *ancestor_level* == level of cgroup 3134*4882a593Smuzhiyun * associated with the current task, then return value will be the 3135*4882a593Smuzhiyun * same as that of **bpf_get_current_cgroup_id**\ (). 3136*4882a593Smuzhiyun * 3137*4882a593Smuzhiyun * The helper is useful to implement policies based on cgroups 3138*4882a593Smuzhiyun * that are upper in hierarchy than immediate cgroup associated 3139*4882a593Smuzhiyun * with the current task. 3140*4882a593Smuzhiyun * 3141*4882a593Smuzhiyun * The format of returned id and helper limitations are same as in 3142*4882a593Smuzhiyun * **bpf_get_current_cgroup_id**\ (). 3143*4882a593Smuzhiyun * Return 3144*4882a593Smuzhiyun * The id is returned or 0 in case the id could not be retrieved. 3145*4882a593Smuzhiyun * 3146*4882a593Smuzhiyun * long bpf_sk_assign(struct sk_buff *skb, void *sk, u64 flags) 3147*4882a593Smuzhiyun * Description 3148*4882a593Smuzhiyun * Helper is overloaded depending on BPF program type. This 3149*4882a593Smuzhiyun * description applies to **BPF_PROG_TYPE_SCHED_CLS** and 3150*4882a593Smuzhiyun * **BPF_PROG_TYPE_SCHED_ACT** programs. 3151*4882a593Smuzhiyun * 3152*4882a593Smuzhiyun * Assign the *sk* to the *skb*. When combined with appropriate 3153*4882a593Smuzhiyun * routing configuration to receive the packet towards the socket, 3154*4882a593Smuzhiyun * will cause *skb* to be delivered to the specified socket. 3155*4882a593Smuzhiyun * Subsequent redirection of *skb* via **bpf_redirect**\ (), 3156*4882a593Smuzhiyun * **bpf_clone_redirect**\ () or other methods outside of BPF may 3157*4882a593Smuzhiyun * interfere with successful delivery to the socket. 3158*4882a593Smuzhiyun * 3159*4882a593Smuzhiyun * This operation is only valid from TC ingress path. 3160*4882a593Smuzhiyun * 3161*4882a593Smuzhiyun * The *flags* argument must be zero. 3162*4882a593Smuzhiyun * Return 3163*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure: 3164*4882a593Smuzhiyun * 3165*4882a593Smuzhiyun * **-EINVAL** if specified *flags* are not supported. 3166*4882a593Smuzhiyun * 3167*4882a593Smuzhiyun * **-ENOENT** if the socket is unavailable for assignment. 3168*4882a593Smuzhiyun * 3169*4882a593Smuzhiyun * **-ENETUNREACH** if the socket is unreachable (wrong netns). 3170*4882a593Smuzhiyun * 3171*4882a593Smuzhiyun * **-EOPNOTSUPP** if the operation is not supported, for example 3172*4882a593Smuzhiyun * a call from outside of TC ingress. 3173*4882a593Smuzhiyun * 3174*4882a593Smuzhiyun * **-ESOCKTNOSUPPORT** if the socket type is not supported 3175*4882a593Smuzhiyun * (reuseport). 3176*4882a593Smuzhiyun * 3177*4882a593Smuzhiyun * long bpf_sk_assign(struct bpf_sk_lookup *ctx, struct bpf_sock *sk, u64 flags) 3178*4882a593Smuzhiyun * Description 3179*4882a593Smuzhiyun * Helper is overloaded depending on BPF program type. This 3180*4882a593Smuzhiyun * description applies to **BPF_PROG_TYPE_SK_LOOKUP** programs. 3181*4882a593Smuzhiyun * 3182*4882a593Smuzhiyun * Select the *sk* as a result of a socket lookup. 3183*4882a593Smuzhiyun * 3184*4882a593Smuzhiyun * For the operation to succeed passed socket must be compatible 3185*4882a593Smuzhiyun * with the packet description provided by the *ctx* object. 3186*4882a593Smuzhiyun * 3187*4882a593Smuzhiyun * L4 protocol (**IPPROTO_TCP** or **IPPROTO_UDP**) must 3188*4882a593Smuzhiyun * be an exact match. While IP family (**AF_INET** or 3189*4882a593Smuzhiyun * **AF_INET6**) must be compatible, that is IPv6 sockets 3190*4882a593Smuzhiyun * that are not v6-only can be selected for IPv4 packets. 3191*4882a593Smuzhiyun * 3192*4882a593Smuzhiyun * Only TCP listeners and UDP unconnected sockets can be 3193*4882a593Smuzhiyun * selected. *sk* can also be NULL to reset any previous 3194*4882a593Smuzhiyun * selection. 3195*4882a593Smuzhiyun * 3196*4882a593Smuzhiyun * *flags* argument can combination of following values: 3197*4882a593Smuzhiyun * 3198*4882a593Smuzhiyun * * **BPF_SK_LOOKUP_F_REPLACE** to override the previous 3199*4882a593Smuzhiyun * socket selection, potentially done by a BPF program 3200*4882a593Smuzhiyun * that ran before us. 3201*4882a593Smuzhiyun * 3202*4882a593Smuzhiyun * * **BPF_SK_LOOKUP_F_NO_REUSEPORT** to skip 3203*4882a593Smuzhiyun * load-balancing within reuseport group for the socket 3204*4882a593Smuzhiyun * being selected. 3205*4882a593Smuzhiyun * 3206*4882a593Smuzhiyun * On success *ctx->sk* will point to the selected socket. 3207*4882a593Smuzhiyun * 3208*4882a593Smuzhiyun * Return 3209*4882a593Smuzhiyun * 0 on success, or a negative errno in case of failure. 3210*4882a593Smuzhiyun * 3211*4882a593Smuzhiyun * * **-EAFNOSUPPORT** if socket family (*sk->family*) is 3212*4882a593Smuzhiyun * not compatible with packet family (*ctx->family*). 3213*4882a593Smuzhiyun * 3214*4882a593Smuzhiyun * * **-EEXIST** if socket has been already selected, 3215*4882a593Smuzhiyun * potentially by another program, and 3216*4882a593Smuzhiyun * **BPF_SK_LOOKUP_F_REPLACE** flag was not specified. 3217*4882a593Smuzhiyun * 3218*4882a593Smuzhiyun * * **-EINVAL** if unsupported flags were specified. 3219*4882a593Smuzhiyun * 3220*4882a593Smuzhiyun * * **-EPROTOTYPE** if socket L4 protocol 3221*4882a593Smuzhiyun * (*sk->protocol*) doesn't match packet protocol 3222*4882a593Smuzhiyun * (*ctx->protocol*). 3223*4882a593Smuzhiyun * 3224*4882a593Smuzhiyun * * **-ESOCKTNOSUPPORT** if socket is not in allowed 3225*4882a593Smuzhiyun * state (TCP listening or UDP unconnected). 3226*4882a593Smuzhiyun * 3227*4882a593Smuzhiyun * u64 bpf_ktime_get_boot_ns(void) 3228*4882a593Smuzhiyun * Description 3229*4882a593Smuzhiyun * Return the time elapsed since system boot, in nanoseconds. 3230*4882a593Smuzhiyun * Does include the time the system was suspended. 3231*4882a593Smuzhiyun * See: **clock_gettime**\ (**CLOCK_BOOTTIME**) 3232*4882a593Smuzhiyun * Return 3233*4882a593Smuzhiyun * Current *ktime*. 3234*4882a593Smuzhiyun * 3235*4882a593Smuzhiyun * long bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len) 3236*4882a593Smuzhiyun * Description 3237*4882a593Smuzhiyun * **bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print 3238*4882a593Smuzhiyun * out the format string. 3239*4882a593Smuzhiyun * The *m* represents the seq_file. The *fmt* and *fmt_size* are for 3240*4882a593Smuzhiyun * the format string itself. The *data* and *data_len* are format string 3241*4882a593Smuzhiyun * arguments. The *data* are a **u64** array and corresponding format string 3242*4882a593Smuzhiyun * values are stored in the array. For strings and pointers where pointees 3243*4882a593Smuzhiyun * are accessed, only the pointer values are stored in the *data* array. 3244*4882a593Smuzhiyun * The *data_len* is the size of *data* in bytes. 3245*4882a593Smuzhiyun * 3246*4882a593Smuzhiyun * Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory. 3247*4882a593Smuzhiyun * Reading kernel memory may fail due to either invalid address or 3248*4882a593Smuzhiyun * valid address but requiring a major memory fault. If reading kernel memory 3249*4882a593Smuzhiyun * fails, the string for **%s** will be an empty string, and the ip 3250*4882a593Smuzhiyun * address for **%p{i,I}{4,6}** will be 0. Not returning error to 3251*4882a593Smuzhiyun * bpf program is consistent with what **bpf_trace_printk**\ () does for now. 3252*4882a593Smuzhiyun * Return 3253*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure: 3254*4882a593Smuzhiyun * 3255*4882a593Smuzhiyun * **-EBUSY** if per-CPU memory copy buffer is busy, can try again 3256*4882a593Smuzhiyun * by returning 1 from bpf program. 3257*4882a593Smuzhiyun * 3258*4882a593Smuzhiyun * **-EINVAL** if arguments are invalid, or if *fmt* is invalid/unsupported. 3259*4882a593Smuzhiyun * 3260*4882a593Smuzhiyun * **-E2BIG** if *fmt* contains too many format specifiers. 3261*4882a593Smuzhiyun * 3262*4882a593Smuzhiyun * **-EOVERFLOW** if an overflow happened: The same object will be tried again. 3263*4882a593Smuzhiyun * 3264*4882a593Smuzhiyun * long bpf_seq_write(struct seq_file *m, const void *data, u32 len) 3265*4882a593Smuzhiyun * Description 3266*4882a593Smuzhiyun * **bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data. 3267*4882a593Smuzhiyun * The *m* represents the seq_file. The *data* and *len* represent the 3268*4882a593Smuzhiyun * data to write in bytes. 3269*4882a593Smuzhiyun * Return 3270*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure: 3271*4882a593Smuzhiyun * 3272*4882a593Smuzhiyun * **-EOVERFLOW** if an overflow happened: The same object will be tried again. 3273*4882a593Smuzhiyun * 3274*4882a593Smuzhiyun * u64 bpf_sk_cgroup_id(void *sk) 3275*4882a593Smuzhiyun * Description 3276*4882a593Smuzhiyun * Return the cgroup v2 id of the socket *sk*. 3277*4882a593Smuzhiyun * 3278*4882a593Smuzhiyun * *sk* must be a non-**NULL** pointer to a socket, e.g. one 3279*4882a593Smuzhiyun * returned from **bpf_sk_lookup_xxx**\ (), 3280*4882a593Smuzhiyun * **bpf_sk_fullsock**\ (), etc. The format of returned id is 3281*4882a593Smuzhiyun * same as in **bpf_skb_cgroup_id**\ (). 3282*4882a593Smuzhiyun * 3283*4882a593Smuzhiyun * This helper is available only if the kernel was compiled with 3284*4882a593Smuzhiyun * the **CONFIG_SOCK_CGROUP_DATA** configuration option. 3285*4882a593Smuzhiyun * Return 3286*4882a593Smuzhiyun * The id is returned or 0 in case the id could not be retrieved. 3287*4882a593Smuzhiyun * 3288*4882a593Smuzhiyun * u64 bpf_sk_ancestor_cgroup_id(void *sk, int ancestor_level) 3289*4882a593Smuzhiyun * Description 3290*4882a593Smuzhiyun * Return id of cgroup v2 that is ancestor of cgroup associated 3291*4882a593Smuzhiyun * with the *sk* at the *ancestor_level*. The root cgroup is at 3292*4882a593Smuzhiyun * *ancestor_level* zero and each step down the hierarchy 3293*4882a593Smuzhiyun * increments the level. If *ancestor_level* == level of cgroup 3294*4882a593Smuzhiyun * associated with *sk*, then return value will be same as that 3295*4882a593Smuzhiyun * of **bpf_sk_cgroup_id**\ (). 3296*4882a593Smuzhiyun * 3297*4882a593Smuzhiyun * The helper is useful to implement policies based on cgroups 3298*4882a593Smuzhiyun * that are upper in hierarchy than immediate cgroup associated 3299*4882a593Smuzhiyun * with *sk*. 3300*4882a593Smuzhiyun * 3301*4882a593Smuzhiyun * The format of returned id and helper limitations are same as in 3302*4882a593Smuzhiyun * **bpf_sk_cgroup_id**\ (). 3303*4882a593Smuzhiyun * Return 3304*4882a593Smuzhiyun * The id is returned or 0 in case the id could not be retrieved. 3305*4882a593Smuzhiyun * 3306*4882a593Smuzhiyun * long bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags) 3307*4882a593Smuzhiyun * Description 3308*4882a593Smuzhiyun * Copy *size* bytes from *data* into a ring buffer *ringbuf*. 3309*4882a593Smuzhiyun * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification 3310*4882a593Smuzhiyun * of new data availability is sent. 3311*4882a593Smuzhiyun * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification 3312*4882a593Smuzhiyun * of new data availability is sent unconditionally. 3313*4882a593Smuzhiyun * Return 3314*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 3315*4882a593Smuzhiyun * 3316*4882a593Smuzhiyun * void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags) 3317*4882a593Smuzhiyun * Description 3318*4882a593Smuzhiyun * Reserve *size* bytes of payload in a ring buffer *ringbuf*. 3319*4882a593Smuzhiyun * Return 3320*4882a593Smuzhiyun * Valid pointer with *size* bytes of memory available; NULL, 3321*4882a593Smuzhiyun * otherwise. 3322*4882a593Smuzhiyun * 3323*4882a593Smuzhiyun * void bpf_ringbuf_submit(void *data, u64 flags) 3324*4882a593Smuzhiyun * Description 3325*4882a593Smuzhiyun * Submit reserved ring buffer sample, pointed to by *data*. 3326*4882a593Smuzhiyun * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification 3327*4882a593Smuzhiyun * of new data availability is sent. 3328*4882a593Smuzhiyun * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification 3329*4882a593Smuzhiyun * of new data availability is sent unconditionally. 3330*4882a593Smuzhiyun * Return 3331*4882a593Smuzhiyun * Nothing. Always succeeds. 3332*4882a593Smuzhiyun * 3333*4882a593Smuzhiyun * void bpf_ringbuf_discard(void *data, u64 flags) 3334*4882a593Smuzhiyun * Description 3335*4882a593Smuzhiyun * Discard reserved ring buffer sample, pointed to by *data*. 3336*4882a593Smuzhiyun * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification 3337*4882a593Smuzhiyun * of new data availability is sent. 3338*4882a593Smuzhiyun * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification 3339*4882a593Smuzhiyun * of new data availability is sent unconditionally. 3340*4882a593Smuzhiyun * Return 3341*4882a593Smuzhiyun * Nothing. Always succeeds. 3342*4882a593Smuzhiyun * 3343*4882a593Smuzhiyun * u64 bpf_ringbuf_query(void *ringbuf, u64 flags) 3344*4882a593Smuzhiyun * Description 3345*4882a593Smuzhiyun * Query various characteristics of provided ring buffer. What 3346*4882a593Smuzhiyun * exactly is queries is determined by *flags*: 3347*4882a593Smuzhiyun * 3348*4882a593Smuzhiyun * * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed. 3349*4882a593Smuzhiyun * * **BPF_RB_RING_SIZE**: The size of ring buffer. 3350*4882a593Smuzhiyun * * **BPF_RB_CONS_POS**: Consumer position (can wrap around). 3351*4882a593Smuzhiyun * * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around). 3352*4882a593Smuzhiyun * 3353*4882a593Smuzhiyun * Data returned is just a momentary snapshot of actual values 3354*4882a593Smuzhiyun * and could be inaccurate, so this facility should be used to 3355*4882a593Smuzhiyun * power heuristics and for reporting, not to make 100% correct 3356*4882a593Smuzhiyun * calculation. 3357*4882a593Smuzhiyun * Return 3358*4882a593Smuzhiyun * Requested value, or 0, if *flags* are not recognized. 3359*4882a593Smuzhiyun * 3360*4882a593Smuzhiyun * long bpf_csum_level(struct sk_buff *skb, u64 level) 3361*4882a593Smuzhiyun * Description 3362*4882a593Smuzhiyun * Change the skbs checksum level by one layer up or down, or 3363*4882a593Smuzhiyun * reset it entirely to none in order to have the stack perform 3364*4882a593Smuzhiyun * checksum validation. The level is applicable to the following 3365*4882a593Smuzhiyun * protocols: TCP, UDP, GRE, SCTP, FCOE. For example, a decap of 3366*4882a593Smuzhiyun * | ETH | IP | UDP | GUE | IP | TCP | into | ETH | IP | TCP | 3367*4882a593Smuzhiyun * through **bpf_skb_adjust_room**\ () helper with passing in 3368*4882a593Smuzhiyun * **BPF_F_ADJ_ROOM_NO_CSUM_RESET** flag would require one call 3369*4882a593Smuzhiyun * to **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_DEC** since 3370*4882a593Smuzhiyun * the UDP header is removed. Similarly, an encap of the latter 3371*4882a593Smuzhiyun * into the former could be accompanied by a helper call to 3372*4882a593Smuzhiyun * **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_INC** if the 3373*4882a593Smuzhiyun * skb is still intended to be processed in higher layers of the 3374*4882a593Smuzhiyun * stack instead of just egressing at tc. 3375*4882a593Smuzhiyun * 3376*4882a593Smuzhiyun * There are three supported level settings at this time: 3377*4882a593Smuzhiyun * 3378*4882a593Smuzhiyun * * **BPF_CSUM_LEVEL_INC**: Increases skb->csum_level for skbs 3379*4882a593Smuzhiyun * with CHECKSUM_UNNECESSARY. 3380*4882a593Smuzhiyun * * **BPF_CSUM_LEVEL_DEC**: Decreases skb->csum_level for skbs 3381*4882a593Smuzhiyun * with CHECKSUM_UNNECESSARY. 3382*4882a593Smuzhiyun * * **BPF_CSUM_LEVEL_RESET**: Resets skb->csum_level to 0 and 3383*4882a593Smuzhiyun * sets CHECKSUM_NONE to force checksum validation by the stack. 3384*4882a593Smuzhiyun * * **BPF_CSUM_LEVEL_QUERY**: No-op, returns the current 3385*4882a593Smuzhiyun * skb->csum_level. 3386*4882a593Smuzhiyun * Return 3387*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. In the 3388*4882a593Smuzhiyun * case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level 3389*4882a593Smuzhiyun * is returned or the error code -EACCES in case the skb is not 3390*4882a593Smuzhiyun * subject to CHECKSUM_UNNECESSARY. 3391*4882a593Smuzhiyun * 3392*4882a593Smuzhiyun * struct tcp6_sock *bpf_skc_to_tcp6_sock(void *sk) 3393*4882a593Smuzhiyun * Description 3394*4882a593Smuzhiyun * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer. 3395*4882a593Smuzhiyun * Return 3396*4882a593Smuzhiyun * *sk* if casting is valid, or **NULL** otherwise. 3397*4882a593Smuzhiyun * 3398*4882a593Smuzhiyun * struct tcp_sock *bpf_skc_to_tcp_sock(void *sk) 3399*4882a593Smuzhiyun * Description 3400*4882a593Smuzhiyun * Dynamically cast a *sk* pointer to a *tcp_sock* pointer. 3401*4882a593Smuzhiyun * Return 3402*4882a593Smuzhiyun * *sk* if casting is valid, or **NULL** otherwise. 3403*4882a593Smuzhiyun * 3404*4882a593Smuzhiyun * struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk) 3405*4882a593Smuzhiyun * Description 3406*4882a593Smuzhiyun * Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer. 3407*4882a593Smuzhiyun * Return 3408*4882a593Smuzhiyun * *sk* if casting is valid, or **NULL** otherwise. 3409*4882a593Smuzhiyun * 3410*4882a593Smuzhiyun * struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk) 3411*4882a593Smuzhiyun * Description 3412*4882a593Smuzhiyun * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer. 3413*4882a593Smuzhiyun * Return 3414*4882a593Smuzhiyun * *sk* if casting is valid, or **NULL** otherwise. 3415*4882a593Smuzhiyun * 3416*4882a593Smuzhiyun * struct udp6_sock *bpf_skc_to_udp6_sock(void *sk) 3417*4882a593Smuzhiyun * Description 3418*4882a593Smuzhiyun * Dynamically cast a *sk* pointer to a *udp6_sock* pointer. 3419*4882a593Smuzhiyun * Return 3420*4882a593Smuzhiyun * *sk* if casting is valid, or **NULL** otherwise. 3421*4882a593Smuzhiyun * 3422*4882a593Smuzhiyun * long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags) 3423*4882a593Smuzhiyun * Description 3424*4882a593Smuzhiyun * Return a user or a kernel stack in bpf program provided buffer. 3425*4882a593Smuzhiyun * To achieve this, the helper needs *task*, which is a valid 3426*4882a593Smuzhiyun * pointer to **struct task_struct**. To store the stacktrace, the 3427*4882a593Smuzhiyun * bpf program provides *buf* with a nonnegative *size*. 3428*4882a593Smuzhiyun * 3429*4882a593Smuzhiyun * The last argument, *flags*, holds the number of stack frames to 3430*4882a593Smuzhiyun * skip (from 0 to 255), masked with 3431*4882a593Smuzhiyun * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set 3432*4882a593Smuzhiyun * the following flags: 3433*4882a593Smuzhiyun * 3434*4882a593Smuzhiyun * **BPF_F_USER_STACK** 3435*4882a593Smuzhiyun * Collect a user space stack instead of a kernel stack. 3436*4882a593Smuzhiyun * **BPF_F_USER_BUILD_ID** 3437*4882a593Smuzhiyun * Collect buildid+offset instead of ips for user stack, 3438*4882a593Smuzhiyun * only valid if **BPF_F_USER_STACK** is also specified. 3439*4882a593Smuzhiyun * 3440*4882a593Smuzhiyun * **bpf_get_task_stack**\ () can collect up to 3441*4882a593Smuzhiyun * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject 3442*4882a593Smuzhiyun * to sufficient large buffer size. Note that 3443*4882a593Smuzhiyun * this limit can be controlled with the **sysctl** program, and 3444*4882a593Smuzhiyun * that it should be manually increased in order to profile long 3445*4882a593Smuzhiyun * user stacks (such as stacks for Java programs). To do so, use: 3446*4882a593Smuzhiyun * 3447*4882a593Smuzhiyun * :: 3448*4882a593Smuzhiyun * 3449*4882a593Smuzhiyun * # sysctl kernel.perf_event_max_stack=<new value> 3450*4882a593Smuzhiyun * Return 3451*4882a593Smuzhiyun * A non-negative value equal to or less than *size* on success, 3452*4882a593Smuzhiyun * or a negative error in case of failure. 3453*4882a593Smuzhiyun * 3454*4882a593Smuzhiyun * long bpf_load_hdr_opt(struct bpf_sock_ops *skops, void *searchby_res, u32 len, u64 flags) 3455*4882a593Smuzhiyun * Description 3456*4882a593Smuzhiyun * Load header option. Support reading a particular TCP header 3457*4882a593Smuzhiyun * option for bpf program (**BPF_PROG_TYPE_SOCK_OPS**). 3458*4882a593Smuzhiyun * 3459*4882a593Smuzhiyun * If *flags* is 0, it will search the option from the 3460*4882a593Smuzhiyun * *skops*\ **->skb_data**. The comment in **struct bpf_sock_ops** 3461*4882a593Smuzhiyun * has details on what skb_data contains under different 3462*4882a593Smuzhiyun * *skops*\ **->op**. 3463*4882a593Smuzhiyun * 3464*4882a593Smuzhiyun * The first byte of the *searchby_res* specifies the 3465*4882a593Smuzhiyun * kind that it wants to search. 3466*4882a593Smuzhiyun * 3467*4882a593Smuzhiyun * If the searching kind is an experimental kind 3468*4882a593Smuzhiyun * (i.e. 253 or 254 according to RFC6994). It also 3469*4882a593Smuzhiyun * needs to specify the "magic" which is either 3470*4882a593Smuzhiyun * 2 bytes or 4 bytes. It then also needs to 3471*4882a593Smuzhiyun * specify the size of the magic by using 3472*4882a593Smuzhiyun * the 2nd byte which is "kind-length" of a TCP 3473*4882a593Smuzhiyun * header option and the "kind-length" also 3474*4882a593Smuzhiyun * includes the first 2 bytes "kind" and "kind-length" 3475*4882a593Smuzhiyun * itself as a normal TCP header option also does. 3476*4882a593Smuzhiyun * 3477*4882a593Smuzhiyun * For example, to search experimental kind 254 with 3478*4882a593Smuzhiyun * 2 byte magic 0xeB9F, the searchby_res should be 3479*4882a593Smuzhiyun * [ 254, 4, 0xeB, 0x9F, 0, 0, .... 0 ]. 3480*4882a593Smuzhiyun * 3481*4882a593Smuzhiyun * To search for the standard window scale option (3), 3482*4882a593Smuzhiyun * the *searchby_res* should be [ 3, 0, 0, .... 0 ]. 3483*4882a593Smuzhiyun * Note, kind-length must be 0 for regular option. 3484*4882a593Smuzhiyun * 3485*4882a593Smuzhiyun * Searching for No-Op (0) and End-of-Option-List (1) are 3486*4882a593Smuzhiyun * not supported. 3487*4882a593Smuzhiyun * 3488*4882a593Smuzhiyun * *len* must be at least 2 bytes which is the minimal size 3489*4882a593Smuzhiyun * of a header option. 3490*4882a593Smuzhiyun * 3491*4882a593Smuzhiyun * Supported flags: 3492*4882a593Smuzhiyun * 3493*4882a593Smuzhiyun * * **BPF_LOAD_HDR_OPT_TCP_SYN** to search from the 3494*4882a593Smuzhiyun * saved_syn packet or the just-received syn packet. 3495*4882a593Smuzhiyun * 3496*4882a593Smuzhiyun * Return 3497*4882a593Smuzhiyun * > 0 when found, the header option is copied to *searchby_res*. 3498*4882a593Smuzhiyun * The return value is the total length copied. On failure, a 3499*4882a593Smuzhiyun * negative error code is returned: 3500*4882a593Smuzhiyun * 3501*4882a593Smuzhiyun * **-EINVAL** if a parameter is invalid. 3502*4882a593Smuzhiyun * 3503*4882a593Smuzhiyun * **-ENOMSG** if the option is not found. 3504*4882a593Smuzhiyun * 3505*4882a593Smuzhiyun * **-ENOENT** if no syn packet is available when 3506*4882a593Smuzhiyun * **BPF_LOAD_HDR_OPT_TCP_SYN** is used. 3507*4882a593Smuzhiyun * 3508*4882a593Smuzhiyun * **-ENOSPC** if there is not enough space. Only *len* number of 3509*4882a593Smuzhiyun * bytes are copied. 3510*4882a593Smuzhiyun * 3511*4882a593Smuzhiyun * **-EFAULT** on failure to parse the header options in the 3512*4882a593Smuzhiyun * packet. 3513*4882a593Smuzhiyun * 3514*4882a593Smuzhiyun * **-EPERM** if the helper cannot be used under the current 3515*4882a593Smuzhiyun * *skops*\ **->op**. 3516*4882a593Smuzhiyun * 3517*4882a593Smuzhiyun * long bpf_store_hdr_opt(struct bpf_sock_ops *skops, const void *from, u32 len, u64 flags) 3518*4882a593Smuzhiyun * Description 3519*4882a593Smuzhiyun * Store header option. The data will be copied 3520*4882a593Smuzhiyun * from buffer *from* with length *len* to the TCP header. 3521*4882a593Smuzhiyun * 3522*4882a593Smuzhiyun * The buffer *from* should have the whole option that 3523*4882a593Smuzhiyun * includes the kind, kind-length, and the actual 3524*4882a593Smuzhiyun * option data. The *len* must be at least kind-length 3525*4882a593Smuzhiyun * long. The kind-length does not have to be 4 byte 3526*4882a593Smuzhiyun * aligned. The kernel will take care of the padding 3527*4882a593Smuzhiyun * and setting the 4 bytes aligned value to th->doff. 3528*4882a593Smuzhiyun * 3529*4882a593Smuzhiyun * This helper will check for duplicated option 3530*4882a593Smuzhiyun * by searching the same option in the outgoing skb. 3531*4882a593Smuzhiyun * 3532*4882a593Smuzhiyun * This helper can only be called during 3533*4882a593Smuzhiyun * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**. 3534*4882a593Smuzhiyun * 3535*4882a593Smuzhiyun * Return 3536*4882a593Smuzhiyun * 0 on success, or negative error in case of failure: 3537*4882a593Smuzhiyun * 3538*4882a593Smuzhiyun * **-EINVAL** If param is invalid. 3539*4882a593Smuzhiyun * 3540*4882a593Smuzhiyun * **-ENOSPC** if there is not enough space in the header. 3541*4882a593Smuzhiyun * Nothing has been written 3542*4882a593Smuzhiyun * 3543*4882a593Smuzhiyun * **-EEXIST** if the option already exists. 3544*4882a593Smuzhiyun * 3545*4882a593Smuzhiyun * **-EFAULT** on failrue to parse the existing header options. 3546*4882a593Smuzhiyun * 3547*4882a593Smuzhiyun * **-EPERM** if the helper cannot be used under the current 3548*4882a593Smuzhiyun * *skops*\ **->op**. 3549*4882a593Smuzhiyun * 3550*4882a593Smuzhiyun * long bpf_reserve_hdr_opt(struct bpf_sock_ops *skops, u32 len, u64 flags) 3551*4882a593Smuzhiyun * Description 3552*4882a593Smuzhiyun * Reserve *len* bytes for the bpf header option. The 3553*4882a593Smuzhiyun * space will be used by **bpf_store_hdr_opt**\ () later in 3554*4882a593Smuzhiyun * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**. 3555*4882a593Smuzhiyun * 3556*4882a593Smuzhiyun * If **bpf_reserve_hdr_opt**\ () is called multiple times, 3557*4882a593Smuzhiyun * the total number of bytes will be reserved. 3558*4882a593Smuzhiyun * 3559*4882a593Smuzhiyun * This helper can only be called during 3560*4882a593Smuzhiyun * **BPF_SOCK_OPS_HDR_OPT_LEN_CB**. 3561*4882a593Smuzhiyun * 3562*4882a593Smuzhiyun * Return 3563*4882a593Smuzhiyun * 0 on success, or negative error in case of failure: 3564*4882a593Smuzhiyun * 3565*4882a593Smuzhiyun * **-EINVAL** if a parameter is invalid. 3566*4882a593Smuzhiyun * 3567*4882a593Smuzhiyun * **-ENOSPC** if there is not enough space in the header. 3568*4882a593Smuzhiyun * 3569*4882a593Smuzhiyun * **-EPERM** if the helper cannot be used under the current 3570*4882a593Smuzhiyun * *skops*\ **->op**. 3571*4882a593Smuzhiyun * 3572*4882a593Smuzhiyun * void *bpf_inode_storage_get(struct bpf_map *map, void *inode, void *value, u64 flags) 3573*4882a593Smuzhiyun * Description 3574*4882a593Smuzhiyun * Get a bpf_local_storage from an *inode*. 3575*4882a593Smuzhiyun * 3576*4882a593Smuzhiyun * Logically, it could be thought of as getting the value from 3577*4882a593Smuzhiyun * a *map* with *inode* as the **key**. From this 3578*4882a593Smuzhiyun * perspective, the usage is not much different from 3579*4882a593Smuzhiyun * **bpf_map_lookup_elem**\ (*map*, **&**\ *inode*) except this 3580*4882a593Smuzhiyun * helper enforces the key must be an inode and the map must also 3581*4882a593Smuzhiyun * be a **BPF_MAP_TYPE_INODE_STORAGE**. 3582*4882a593Smuzhiyun * 3583*4882a593Smuzhiyun * Underneath, the value is stored locally at *inode* instead of 3584*4882a593Smuzhiyun * the *map*. The *map* is used as the bpf-local-storage 3585*4882a593Smuzhiyun * "type". The bpf-local-storage "type" (i.e. the *map*) is 3586*4882a593Smuzhiyun * searched against all bpf_local_storage residing at *inode*. 3587*4882a593Smuzhiyun * 3588*4882a593Smuzhiyun * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be 3589*4882a593Smuzhiyun * used such that a new bpf_local_storage will be 3590*4882a593Smuzhiyun * created if one does not exist. *value* can be used 3591*4882a593Smuzhiyun * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify 3592*4882a593Smuzhiyun * the initial value of a bpf_local_storage. If *value* is 3593*4882a593Smuzhiyun * **NULL**, the new bpf_local_storage will be zero initialized. 3594*4882a593Smuzhiyun * Return 3595*4882a593Smuzhiyun * A bpf_local_storage pointer is returned on success. 3596*4882a593Smuzhiyun * 3597*4882a593Smuzhiyun * **NULL** if not found or there was an error in adding 3598*4882a593Smuzhiyun * a new bpf_local_storage. 3599*4882a593Smuzhiyun * 3600*4882a593Smuzhiyun * int bpf_inode_storage_delete(struct bpf_map *map, void *inode) 3601*4882a593Smuzhiyun * Description 3602*4882a593Smuzhiyun * Delete a bpf_local_storage from an *inode*. 3603*4882a593Smuzhiyun * Return 3604*4882a593Smuzhiyun * 0 on success. 3605*4882a593Smuzhiyun * 3606*4882a593Smuzhiyun * **-ENOENT** if the bpf_local_storage cannot be found. 3607*4882a593Smuzhiyun * 3608*4882a593Smuzhiyun * long bpf_d_path(struct path *path, char *buf, u32 sz) 3609*4882a593Smuzhiyun * Description 3610*4882a593Smuzhiyun * Return full path for given **struct path** object, which 3611*4882a593Smuzhiyun * needs to be the kernel BTF *path* object. The path is 3612*4882a593Smuzhiyun * returned in the provided buffer *buf* of size *sz* and 3613*4882a593Smuzhiyun * is zero terminated. 3614*4882a593Smuzhiyun * 3615*4882a593Smuzhiyun * Return 3616*4882a593Smuzhiyun * On success, the strictly positive length of the string, 3617*4882a593Smuzhiyun * including the trailing NUL character. On error, a negative 3618*4882a593Smuzhiyun * value. 3619*4882a593Smuzhiyun * 3620*4882a593Smuzhiyun * long bpf_copy_from_user(void *dst, u32 size, const void *user_ptr) 3621*4882a593Smuzhiyun * Description 3622*4882a593Smuzhiyun * Read *size* bytes from user space address *user_ptr* and store 3623*4882a593Smuzhiyun * the data in *dst*. This is a wrapper of **copy_from_user**\ (). 3624*4882a593Smuzhiyun * Return 3625*4882a593Smuzhiyun * 0 on success, or a negative error in case of failure. 3626*4882a593Smuzhiyun * 3627*4882a593Smuzhiyun * long bpf_snprintf_btf(char *str, u32 str_size, struct btf_ptr *ptr, u32 btf_ptr_size, u64 flags) 3628*4882a593Smuzhiyun * Description 3629*4882a593Smuzhiyun * Use BTF to store a string representation of *ptr*->ptr in *str*, 3630*4882a593Smuzhiyun * using *ptr*->type_id. This value should specify the type 3631*4882a593Smuzhiyun * that *ptr*->ptr points to. LLVM __builtin_btf_type_id(type, 1) 3632*4882a593Smuzhiyun * can be used to look up vmlinux BTF type ids. Traversing the 3633*4882a593Smuzhiyun * data structure using BTF, the type information and values are 3634*4882a593Smuzhiyun * stored in the first *str_size* - 1 bytes of *str*. Safe copy of 3635*4882a593Smuzhiyun * the pointer data is carried out to avoid kernel crashes during 3636*4882a593Smuzhiyun * operation. Smaller types can use string space on the stack; 3637*4882a593Smuzhiyun * larger programs can use map data to store the string 3638*4882a593Smuzhiyun * representation. 3639*4882a593Smuzhiyun * 3640*4882a593Smuzhiyun * The string can be subsequently shared with userspace via 3641*4882a593Smuzhiyun * bpf_perf_event_output() or ring buffer interfaces. 3642*4882a593Smuzhiyun * bpf_trace_printk() is to be avoided as it places too small 3643*4882a593Smuzhiyun * a limit on string size to be useful. 3644*4882a593Smuzhiyun * 3645*4882a593Smuzhiyun * *flags* is a combination of 3646*4882a593Smuzhiyun * 3647*4882a593Smuzhiyun * **BTF_F_COMPACT** 3648*4882a593Smuzhiyun * no formatting around type information 3649*4882a593Smuzhiyun * **BTF_F_NONAME** 3650*4882a593Smuzhiyun * no struct/union member names/types 3651*4882a593Smuzhiyun * **BTF_F_PTR_RAW** 3652*4882a593Smuzhiyun * show raw (unobfuscated) pointer values; 3653*4882a593Smuzhiyun * equivalent to printk specifier %px. 3654*4882a593Smuzhiyun * **BTF_F_ZERO** 3655*4882a593Smuzhiyun * show zero-valued struct/union members; they 3656*4882a593Smuzhiyun * are not displayed by default 3657*4882a593Smuzhiyun * 3658*4882a593Smuzhiyun * Return 3659*4882a593Smuzhiyun * The number of bytes that were written (or would have been 3660*4882a593Smuzhiyun * written if output had to be truncated due to string size), 3661*4882a593Smuzhiyun * or a negative error in cases of failure. 3662*4882a593Smuzhiyun * 3663*4882a593Smuzhiyun * long bpf_seq_printf_btf(struct seq_file *m, struct btf_ptr *ptr, u32 ptr_size, u64 flags) 3664*4882a593Smuzhiyun * Description 3665*4882a593Smuzhiyun * Use BTF to write to seq_write a string representation of 3666*4882a593Smuzhiyun * *ptr*->ptr, using *ptr*->type_id as per bpf_snprintf_btf(). 3667*4882a593Smuzhiyun * *flags* are identical to those used for bpf_snprintf_btf. 3668*4882a593Smuzhiyun * Return 3669*4882a593Smuzhiyun * 0 on success or a negative error in case of failure. 3670*4882a593Smuzhiyun * 3671*4882a593Smuzhiyun * u64 bpf_skb_cgroup_classid(struct sk_buff *skb) 3672*4882a593Smuzhiyun * Description 3673*4882a593Smuzhiyun * See **bpf_get_cgroup_classid**\ () for the main description. 3674*4882a593Smuzhiyun * This helper differs from **bpf_get_cgroup_classid**\ () in that 3675*4882a593Smuzhiyun * the cgroup v1 net_cls class is retrieved only from the *skb*'s 3676*4882a593Smuzhiyun * associated socket instead of the current process. 3677*4882a593Smuzhiyun * Return 3678*4882a593Smuzhiyun * The id is returned or 0 in case the id could not be retrieved. 3679*4882a593Smuzhiyun * 3680*4882a593Smuzhiyun * long bpf_redirect_neigh(u32 ifindex, struct bpf_redir_neigh *params, int plen, u64 flags) 3681*4882a593Smuzhiyun * Description 3682*4882a593Smuzhiyun * Redirect the packet to another net device of index *ifindex* 3683*4882a593Smuzhiyun * and fill in L2 addresses from neighboring subsystem. This helper 3684*4882a593Smuzhiyun * is somewhat similar to **bpf_redirect**\ (), except that it 3685*4882a593Smuzhiyun * populates L2 addresses as well, meaning, internally, the helper 3686*4882a593Smuzhiyun * relies on the neighbor lookup for the L2 address of the nexthop. 3687*4882a593Smuzhiyun * 3688*4882a593Smuzhiyun * The helper will perform a FIB lookup based on the skb's 3689*4882a593Smuzhiyun * networking header to get the address of the next hop, unless 3690*4882a593Smuzhiyun * this is supplied by the caller in the *params* argument. The 3691*4882a593Smuzhiyun * *plen* argument indicates the len of *params* and should be set 3692*4882a593Smuzhiyun * to 0 if *params* is NULL. 3693*4882a593Smuzhiyun * 3694*4882a593Smuzhiyun * The *flags* argument is reserved and must be 0. The helper is 3695*4882a593Smuzhiyun * currently only supported for tc BPF program types, and enabled 3696*4882a593Smuzhiyun * for IPv4 and IPv6 protocols. 3697*4882a593Smuzhiyun * Return 3698*4882a593Smuzhiyun * The helper returns **TC_ACT_REDIRECT** on success or 3699*4882a593Smuzhiyun * **TC_ACT_SHOT** on error. 3700*4882a593Smuzhiyun * 3701*4882a593Smuzhiyun * void *bpf_per_cpu_ptr(const void *percpu_ptr, u32 cpu) 3702*4882a593Smuzhiyun * Description 3703*4882a593Smuzhiyun * Take a pointer to a percpu ksym, *percpu_ptr*, and return a 3704*4882a593Smuzhiyun * pointer to the percpu kernel variable on *cpu*. A ksym is an 3705*4882a593Smuzhiyun * extern variable decorated with '__ksym'. For ksym, there is a 3706*4882a593Smuzhiyun * global var (either static or global) defined of the same name 3707*4882a593Smuzhiyun * in the kernel. The ksym is percpu if the global var is percpu. 3708*4882a593Smuzhiyun * The returned pointer points to the global percpu var on *cpu*. 3709*4882a593Smuzhiyun * 3710*4882a593Smuzhiyun * bpf_per_cpu_ptr() has the same semantic as per_cpu_ptr() in the 3711*4882a593Smuzhiyun * kernel, except that bpf_per_cpu_ptr() may return NULL. This 3712*4882a593Smuzhiyun * happens if *cpu* is larger than nr_cpu_ids. The caller of 3713*4882a593Smuzhiyun * bpf_per_cpu_ptr() must check the returned value. 3714*4882a593Smuzhiyun * Return 3715*4882a593Smuzhiyun * A pointer pointing to the kernel percpu variable on *cpu*, or 3716*4882a593Smuzhiyun * NULL, if *cpu* is invalid. 3717*4882a593Smuzhiyun * 3718*4882a593Smuzhiyun * void *bpf_this_cpu_ptr(const void *percpu_ptr) 3719*4882a593Smuzhiyun * Description 3720*4882a593Smuzhiyun * Take a pointer to a percpu ksym, *percpu_ptr*, and return a 3721*4882a593Smuzhiyun * pointer to the percpu kernel variable on this cpu. See the 3722*4882a593Smuzhiyun * description of 'ksym' in **bpf_per_cpu_ptr**\ (). 3723*4882a593Smuzhiyun * 3724*4882a593Smuzhiyun * bpf_this_cpu_ptr() has the same semantic as this_cpu_ptr() in 3725*4882a593Smuzhiyun * the kernel. Different from **bpf_per_cpu_ptr**\ (), it would 3726*4882a593Smuzhiyun * never return NULL. 3727*4882a593Smuzhiyun * Return 3728*4882a593Smuzhiyun * A pointer pointing to the kernel percpu variable on this cpu. 3729*4882a593Smuzhiyun * 3730*4882a593Smuzhiyun * long bpf_redirect_peer(u32 ifindex, u64 flags) 3731*4882a593Smuzhiyun * Description 3732*4882a593Smuzhiyun * Redirect the packet to another net device of index *ifindex*. 3733*4882a593Smuzhiyun * This helper is somewhat similar to **bpf_redirect**\ (), except 3734*4882a593Smuzhiyun * that the redirection happens to the *ifindex*' peer device and 3735*4882a593Smuzhiyun * the netns switch takes place from ingress to ingress without 3736*4882a593Smuzhiyun * going through the CPU's backlog queue. 3737*4882a593Smuzhiyun * 3738*4882a593Smuzhiyun * The *flags* argument is reserved and must be 0. The helper is 3739*4882a593Smuzhiyun * currently only supported for tc BPF program types at the ingress 3740*4882a593Smuzhiyun * hook and for veth device types. The peer device must reside in a 3741*4882a593Smuzhiyun * different network namespace. 3742*4882a593Smuzhiyun * Return 3743*4882a593Smuzhiyun * The helper returns **TC_ACT_REDIRECT** on success or 3744*4882a593Smuzhiyun * **TC_ACT_SHOT** on error. 3745*4882a593Smuzhiyun */ 3746*4882a593Smuzhiyun #define __BPF_FUNC_MAPPER(FN) \ 3747*4882a593Smuzhiyun FN(unspec), \ 3748*4882a593Smuzhiyun FN(map_lookup_elem), \ 3749*4882a593Smuzhiyun FN(map_update_elem), \ 3750*4882a593Smuzhiyun FN(map_delete_elem), \ 3751*4882a593Smuzhiyun FN(probe_read), \ 3752*4882a593Smuzhiyun FN(ktime_get_ns), \ 3753*4882a593Smuzhiyun FN(trace_printk), \ 3754*4882a593Smuzhiyun FN(get_prandom_u32), \ 3755*4882a593Smuzhiyun FN(get_smp_processor_id), \ 3756*4882a593Smuzhiyun FN(skb_store_bytes), \ 3757*4882a593Smuzhiyun FN(l3_csum_replace), \ 3758*4882a593Smuzhiyun FN(l4_csum_replace), \ 3759*4882a593Smuzhiyun FN(tail_call), \ 3760*4882a593Smuzhiyun FN(clone_redirect), \ 3761*4882a593Smuzhiyun FN(get_current_pid_tgid), \ 3762*4882a593Smuzhiyun FN(get_current_uid_gid), \ 3763*4882a593Smuzhiyun FN(get_current_comm), \ 3764*4882a593Smuzhiyun FN(get_cgroup_classid), \ 3765*4882a593Smuzhiyun FN(skb_vlan_push), \ 3766*4882a593Smuzhiyun FN(skb_vlan_pop), \ 3767*4882a593Smuzhiyun FN(skb_get_tunnel_key), \ 3768*4882a593Smuzhiyun FN(skb_set_tunnel_key), \ 3769*4882a593Smuzhiyun FN(perf_event_read), \ 3770*4882a593Smuzhiyun FN(redirect), \ 3771*4882a593Smuzhiyun FN(get_route_realm), \ 3772*4882a593Smuzhiyun FN(perf_event_output), \ 3773*4882a593Smuzhiyun FN(skb_load_bytes), \ 3774*4882a593Smuzhiyun FN(get_stackid), \ 3775*4882a593Smuzhiyun FN(csum_diff), \ 3776*4882a593Smuzhiyun FN(skb_get_tunnel_opt), \ 3777*4882a593Smuzhiyun FN(skb_set_tunnel_opt), \ 3778*4882a593Smuzhiyun FN(skb_change_proto), \ 3779*4882a593Smuzhiyun FN(skb_change_type), \ 3780*4882a593Smuzhiyun FN(skb_under_cgroup), \ 3781*4882a593Smuzhiyun FN(get_hash_recalc), \ 3782*4882a593Smuzhiyun FN(get_current_task), \ 3783*4882a593Smuzhiyun FN(probe_write_user), \ 3784*4882a593Smuzhiyun FN(current_task_under_cgroup), \ 3785*4882a593Smuzhiyun FN(skb_change_tail), \ 3786*4882a593Smuzhiyun FN(skb_pull_data), \ 3787*4882a593Smuzhiyun FN(csum_update), \ 3788*4882a593Smuzhiyun FN(set_hash_invalid), \ 3789*4882a593Smuzhiyun FN(get_numa_node_id), \ 3790*4882a593Smuzhiyun FN(skb_change_head), \ 3791*4882a593Smuzhiyun FN(xdp_adjust_head), \ 3792*4882a593Smuzhiyun FN(probe_read_str), \ 3793*4882a593Smuzhiyun FN(get_socket_cookie), \ 3794*4882a593Smuzhiyun FN(get_socket_uid), \ 3795*4882a593Smuzhiyun FN(set_hash), \ 3796*4882a593Smuzhiyun FN(setsockopt), \ 3797*4882a593Smuzhiyun FN(skb_adjust_room), \ 3798*4882a593Smuzhiyun FN(redirect_map), \ 3799*4882a593Smuzhiyun FN(sk_redirect_map), \ 3800*4882a593Smuzhiyun FN(sock_map_update), \ 3801*4882a593Smuzhiyun FN(xdp_adjust_meta), \ 3802*4882a593Smuzhiyun FN(perf_event_read_value), \ 3803*4882a593Smuzhiyun FN(perf_prog_read_value), \ 3804*4882a593Smuzhiyun FN(getsockopt), \ 3805*4882a593Smuzhiyun FN(override_return), \ 3806*4882a593Smuzhiyun FN(sock_ops_cb_flags_set), \ 3807*4882a593Smuzhiyun FN(msg_redirect_map), \ 3808*4882a593Smuzhiyun FN(msg_apply_bytes), \ 3809*4882a593Smuzhiyun FN(msg_cork_bytes), \ 3810*4882a593Smuzhiyun FN(msg_pull_data), \ 3811*4882a593Smuzhiyun FN(bind), \ 3812*4882a593Smuzhiyun FN(xdp_adjust_tail), \ 3813*4882a593Smuzhiyun FN(skb_get_xfrm_state), \ 3814*4882a593Smuzhiyun FN(get_stack), \ 3815*4882a593Smuzhiyun FN(skb_load_bytes_relative), \ 3816*4882a593Smuzhiyun FN(fib_lookup), \ 3817*4882a593Smuzhiyun FN(sock_hash_update), \ 3818*4882a593Smuzhiyun FN(msg_redirect_hash), \ 3819*4882a593Smuzhiyun FN(sk_redirect_hash), \ 3820*4882a593Smuzhiyun FN(lwt_push_encap), \ 3821*4882a593Smuzhiyun FN(lwt_seg6_store_bytes), \ 3822*4882a593Smuzhiyun FN(lwt_seg6_adjust_srh), \ 3823*4882a593Smuzhiyun FN(lwt_seg6_action), \ 3824*4882a593Smuzhiyun FN(rc_repeat), \ 3825*4882a593Smuzhiyun FN(rc_keydown), \ 3826*4882a593Smuzhiyun FN(skb_cgroup_id), \ 3827*4882a593Smuzhiyun FN(get_current_cgroup_id), \ 3828*4882a593Smuzhiyun FN(get_local_storage), \ 3829*4882a593Smuzhiyun FN(sk_select_reuseport), \ 3830*4882a593Smuzhiyun FN(skb_ancestor_cgroup_id), \ 3831*4882a593Smuzhiyun FN(sk_lookup_tcp), \ 3832*4882a593Smuzhiyun FN(sk_lookup_udp), \ 3833*4882a593Smuzhiyun FN(sk_release), \ 3834*4882a593Smuzhiyun FN(map_push_elem), \ 3835*4882a593Smuzhiyun FN(map_pop_elem), \ 3836*4882a593Smuzhiyun FN(map_peek_elem), \ 3837*4882a593Smuzhiyun FN(msg_push_data), \ 3838*4882a593Smuzhiyun FN(msg_pop_data), \ 3839*4882a593Smuzhiyun FN(rc_pointer_rel), \ 3840*4882a593Smuzhiyun FN(spin_lock), \ 3841*4882a593Smuzhiyun FN(spin_unlock), \ 3842*4882a593Smuzhiyun FN(sk_fullsock), \ 3843*4882a593Smuzhiyun FN(tcp_sock), \ 3844*4882a593Smuzhiyun FN(skb_ecn_set_ce), \ 3845*4882a593Smuzhiyun FN(get_listener_sock), \ 3846*4882a593Smuzhiyun FN(skc_lookup_tcp), \ 3847*4882a593Smuzhiyun FN(tcp_check_syncookie), \ 3848*4882a593Smuzhiyun FN(sysctl_get_name), \ 3849*4882a593Smuzhiyun FN(sysctl_get_current_value), \ 3850*4882a593Smuzhiyun FN(sysctl_get_new_value), \ 3851*4882a593Smuzhiyun FN(sysctl_set_new_value), \ 3852*4882a593Smuzhiyun FN(strtol), \ 3853*4882a593Smuzhiyun FN(strtoul), \ 3854*4882a593Smuzhiyun FN(sk_storage_get), \ 3855*4882a593Smuzhiyun FN(sk_storage_delete), \ 3856*4882a593Smuzhiyun FN(send_signal), \ 3857*4882a593Smuzhiyun FN(tcp_gen_syncookie), \ 3858*4882a593Smuzhiyun FN(skb_output), \ 3859*4882a593Smuzhiyun FN(probe_read_user), \ 3860*4882a593Smuzhiyun FN(probe_read_kernel), \ 3861*4882a593Smuzhiyun FN(probe_read_user_str), \ 3862*4882a593Smuzhiyun FN(probe_read_kernel_str), \ 3863*4882a593Smuzhiyun FN(tcp_send_ack), \ 3864*4882a593Smuzhiyun FN(send_signal_thread), \ 3865*4882a593Smuzhiyun FN(jiffies64), \ 3866*4882a593Smuzhiyun FN(read_branch_records), \ 3867*4882a593Smuzhiyun FN(get_ns_current_pid_tgid), \ 3868*4882a593Smuzhiyun FN(xdp_output), \ 3869*4882a593Smuzhiyun FN(get_netns_cookie), \ 3870*4882a593Smuzhiyun FN(get_current_ancestor_cgroup_id), \ 3871*4882a593Smuzhiyun FN(sk_assign), \ 3872*4882a593Smuzhiyun FN(ktime_get_boot_ns), \ 3873*4882a593Smuzhiyun FN(seq_printf), \ 3874*4882a593Smuzhiyun FN(seq_write), \ 3875*4882a593Smuzhiyun FN(sk_cgroup_id), \ 3876*4882a593Smuzhiyun FN(sk_ancestor_cgroup_id), \ 3877*4882a593Smuzhiyun FN(ringbuf_output), \ 3878*4882a593Smuzhiyun FN(ringbuf_reserve), \ 3879*4882a593Smuzhiyun FN(ringbuf_submit), \ 3880*4882a593Smuzhiyun FN(ringbuf_discard), \ 3881*4882a593Smuzhiyun FN(ringbuf_query), \ 3882*4882a593Smuzhiyun FN(csum_level), \ 3883*4882a593Smuzhiyun FN(skc_to_tcp6_sock), \ 3884*4882a593Smuzhiyun FN(skc_to_tcp_sock), \ 3885*4882a593Smuzhiyun FN(skc_to_tcp_timewait_sock), \ 3886*4882a593Smuzhiyun FN(skc_to_tcp_request_sock), \ 3887*4882a593Smuzhiyun FN(skc_to_udp6_sock), \ 3888*4882a593Smuzhiyun FN(get_task_stack), \ 3889*4882a593Smuzhiyun FN(load_hdr_opt), \ 3890*4882a593Smuzhiyun FN(store_hdr_opt), \ 3891*4882a593Smuzhiyun FN(reserve_hdr_opt), \ 3892*4882a593Smuzhiyun FN(inode_storage_get), \ 3893*4882a593Smuzhiyun FN(inode_storage_delete), \ 3894*4882a593Smuzhiyun FN(d_path), \ 3895*4882a593Smuzhiyun FN(copy_from_user), \ 3896*4882a593Smuzhiyun FN(snprintf_btf), \ 3897*4882a593Smuzhiyun FN(seq_printf_btf), \ 3898*4882a593Smuzhiyun FN(skb_cgroup_classid), \ 3899*4882a593Smuzhiyun FN(redirect_neigh), \ 3900*4882a593Smuzhiyun FN(per_cpu_ptr), \ 3901*4882a593Smuzhiyun FN(this_cpu_ptr), \ 3902*4882a593Smuzhiyun FN(redirect_peer), \ 3903*4882a593Smuzhiyun /* */ 3904*4882a593Smuzhiyun 3905*4882a593Smuzhiyun /* integer value in 'imm' field of BPF_CALL instruction selects which helper 3906*4882a593Smuzhiyun * function eBPF program intends to call 3907*4882a593Smuzhiyun */ 3908*4882a593Smuzhiyun #define __BPF_ENUM_FN(x) BPF_FUNC_ ## x 3909*4882a593Smuzhiyun enum bpf_func_id { 3910*4882a593Smuzhiyun __BPF_FUNC_MAPPER(__BPF_ENUM_FN) 3911*4882a593Smuzhiyun __BPF_FUNC_MAX_ID, 3912*4882a593Smuzhiyun }; 3913*4882a593Smuzhiyun #undef __BPF_ENUM_FN 3914*4882a593Smuzhiyun 3915*4882a593Smuzhiyun /* All flags used by eBPF helper functions, placed here. */ 3916*4882a593Smuzhiyun 3917*4882a593Smuzhiyun /* BPF_FUNC_skb_store_bytes flags. */ 3918*4882a593Smuzhiyun enum { 3919*4882a593Smuzhiyun BPF_F_RECOMPUTE_CSUM = (1ULL << 0), 3920*4882a593Smuzhiyun BPF_F_INVALIDATE_HASH = (1ULL << 1), 3921*4882a593Smuzhiyun }; 3922*4882a593Smuzhiyun 3923*4882a593Smuzhiyun /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags. 3924*4882a593Smuzhiyun * First 4 bits are for passing the header field size. 3925*4882a593Smuzhiyun */ 3926*4882a593Smuzhiyun enum { 3927*4882a593Smuzhiyun BPF_F_HDR_FIELD_MASK = 0xfULL, 3928*4882a593Smuzhiyun }; 3929*4882a593Smuzhiyun 3930*4882a593Smuzhiyun /* BPF_FUNC_l4_csum_replace flags. */ 3931*4882a593Smuzhiyun enum { 3932*4882a593Smuzhiyun BPF_F_PSEUDO_HDR = (1ULL << 4), 3933*4882a593Smuzhiyun BPF_F_MARK_MANGLED_0 = (1ULL << 5), 3934*4882a593Smuzhiyun BPF_F_MARK_ENFORCE = (1ULL << 6), 3935*4882a593Smuzhiyun }; 3936*4882a593Smuzhiyun 3937*4882a593Smuzhiyun /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ 3938*4882a593Smuzhiyun enum { 3939*4882a593Smuzhiyun BPF_F_INGRESS = (1ULL << 0), 3940*4882a593Smuzhiyun }; 3941*4882a593Smuzhiyun 3942*4882a593Smuzhiyun /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ 3943*4882a593Smuzhiyun enum { 3944*4882a593Smuzhiyun BPF_F_TUNINFO_IPV6 = (1ULL << 0), 3945*4882a593Smuzhiyun }; 3946*4882a593Smuzhiyun 3947*4882a593Smuzhiyun /* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */ 3948*4882a593Smuzhiyun enum { 3949*4882a593Smuzhiyun BPF_F_SKIP_FIELD_MASK = 0xffULL, 3950*4882a593Smuzhiyun BPF_F_USER_STACK = (1ULL << 8), 3951*4882a593Smuzhiyun /* flags used by BPF_FUNC_get_stackid only. */ 3952*4882a593Smuzhiyun BPF_F_FAST_STACK_CMP = (1ULL << 9), 3953*4882a593Smuzhiyun BPF_F_REUSE_STACKID = (1ULL << 10), 3954*4882a593Smuzhiyun /* flags used by BPF_FUNC_get_stack only. */ 3955*4882a593Smuzhiyun BPF_F_USER_BUILD_ID = (1ULL << 11), 3956*4882a593Smuzhiyun }; 3957*4882a593Smuzhiyun 3958*4882a593Smuzhiyun /* BPF_FUNC_skb_set_tunnel_key flags. */ 3959*4882a593Smuzhiyun enum { 3960*4882a593Smuzhiyun BPF_F_ZERO_CSUM_TX = (1ULL << 1), 3961*4882a593Smuzhiyun BPF_F_DONT_FRAGMENT = (1ULL << 2), 3962*4882a593Smuzhiyun BPF_F_SEQ_NUMBER = (1ULL << 3), 3963*4882a593Smuzhiyun }; 3964*4882a593Smuzhiyun 3965*4882a593Smuzhiyun /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and 3966*4882a593Smuzhiyun * BPF_FUNC_perf_event_read_value flags. 3967*4882a593Smuzhiyun */ 3968*4882a593Smuzhiyun enum { 3969*4882a593Smuzhiyun BPF_F_INDEX_MASK = 0xffffffffULL, 3970*4882a593Smuzhiyun BPF_F_CURRENT_CPU = BPF_F_INDEX_MASK, 3971*4882a593Smuzhiyun /* BPF_FUNC_perf_event_output for sk_buff input context. */ 3972*4882a593Smuzhiyun BPF_F_CTXLEN_MASK = (0xfffffULL << 32), 3973*4882a593Smuzhiyun }; 3974*4882a593Smuzhiyun 3975*4882a593Smuzhiyun /* Current network namespace */ 3976*4882a593Smuzhiyun enum { 3977*4882a593Smuzhiyun BPF_F_CURRENT_NETNS = (-1L), 3978*4882a593Smuzhiyun }; 3979*4882a593Smuzhiyun 3980*4882a593Smuzhiyun /* BPF_FUNC_csum_level level values. */ 3981*4882a593Smuzhiyun enum { 3982*4882a593Smuzhiyun BPF_CSUM_LEVEL_QUERY, 3983*4882a593Smuzhiyun BPF_CSUM_LEVEL_INC, 3984*4882a593Smuzhiyun BPF_CSUM_LEVEL_DEC, 3985*4882a593Smuzhiyun BPF_CSUM_LEVEL_RESET, 3986*4882a593Smuzhiyun }; 3987*4882a593Smuzhiyun 3988*4882a593Smuzhiyun /* BPF_FUNC_skb_adjust_room flags. */ 3989*4882a593Smuzhiyun enum { 3990*4882a593Smuzhiyun BPF_F_ADJ_ROOM_FIXED_GSO = (1ULL << 0), 3991*4882a593Smuzhiyun BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = (1ULL << 1), 3992*4882a593Smuzhiyun BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = (1ULL << 2), 3993*4882a593Smuzhiyun BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3), 3994*4882a593Smuzhiyun BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4), 3995*4882a593Smuzhiyun BPF_F_ADJ_ROOM_NO_CSUM_RESET = (1ULL << 5), 3996*4882a593Smuzhiyun }; 3997*4882a593Smuzhiyun 3998*4882a593Smuzhiyun enum { 3999*4882a593Smuzhiyun BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff, 4000*4882a593Smuzhiyun BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56, 4001*4882a593Smuzhiyun }; 4002*4882a593Smuzhiyun 4003*4882a593Smuzhiyun #define BPF_F_ADJ_ROOM_ENCAP_L2(len) (((__u64)len & \ 4004*4882a593Smuzhiyun BPF_ADJ_ROOM_ENCAP_L2_MASK) \ 4005*4882a593Smuzhiyun << BPF_ADJ_ROOM_ENCAP_L2_SHIFT) 4006*4882a593Smuzhiyun 4007*4882a593Smuzhiyun /* BPF_FUNC_sysctl_get_name flags. */ 4008*4882a593Smuzhiyun enum { 4009*4882a593Smuzhiyun BPF_F_SYSCTL_BASE_NAME = (1ULL << 0), 4010*4882a593Smuzhiyun }; 4011*4882a593Smuzhiyun 4012*4882a593Smuzhiyun /* BPF_FUNC_<kernel_obj>_storage_get flags */ 4013*4882a593Smuzhiyun enum { 4014*4882a593Smuzhiyun BPF_LOCAL_STORAGE_GET_F_CREATE = (1ULL << 0), 4015*4882a593Smuzhiyun /* BPF_SK_STORAGE_GET_F_CREATE is only kept for backward compatibility 4016*4882a593Smuzhiyun * and BPF_LOCAL_STORAGE_GET_F_CREATE must be used instead. 4017*4882a593Smuzhiyun */ 4018*4882a593Smuzhiyun BPF_SK_STORAGE_GET_F_CREATE = BPF_LOCAL_STORAGE_GET_F_CREATE, 4019*4882a593Smuzhiyun }; 4020*4882a593Smuzhiyun 4021*4882a593Smuzhiyun /* BPF_FUNC_read_branch_records flags. */ 4022*4882a593Smuzhiyun enum { 4023*4882a593Smuzhiyun BPF_F_GET_BRANCH_RECORDS_SIZE = (1ULL << 0), 4024*4882a593Smuzhiyun }; 4025*4882a593Smuzhiyun 4026*4882a593Smuzhiyun /* BPF_FUNC_bpf_ringbuf_commit, BPF_FUNC_bpf_ringbuf_discard, and 4027*4882a593Smuzhiyun * BPF_FUNC_bpf_ringbuf_output flags. 4028*4882a593Smuzhiyun */ 4029*4882a593Smuzhiyun enum { 4030*4882a593Smuzhiyun BPF_RB_NO_WAKEUP = (1ULL << 0), 4031*4882a593Smuzhiyun BPF_RB_FORCE_WAKEUP = (1ULL << 1), 4032*4882a593Smuzhiyun }; 4033*4882a593Smuzhiyun 4034*4882a593Smuzhiyun /* BPF_FUNC_bpf_ringbuf_query flags */ 4035*4882a593Smuzhiyun enum { 4036*4882a593Smuzhiyun BPF_RB_AVAIL_DATA = 0, 4037*4882a593Smuzhiyun BPF_RB_RING_SIZE = 1, 4038*4882a593Smuzhiyun BPF_RB_CONS_POS = 2, 4039*4882a593Smuzhiyun BPF_RB_PROD_POS = 3, 4040*4882a593Smuzhiyun }; 4041*4882a593Smuzhiyun 4042*4882a593Smuzhiyun /* BPF ring buffer constants */ 4043*4882a593Smuzhiyun enum { 4044*4882a593Smuzhiyun BPF_RINGBUF_BUSY_BIT = (1U << 31), 4045*4882a593Smuzhiyun BPF_RINGBUF_DISCARD_BIT = (1U << 30), 4046*4882a593Smuzhiyun BPF_RINGBUF_HDR_SZ = 8, 4047*4882a593Smuzhiyun }; 4048*4882a593Smuzhiyun 4049*4882a593Smuzhiyun /* BPF_FUNC_sk_assign flags in bpf_sk_lookup context. */ 4050*4882a593Smuzhiyun enum { 4051*4882a593Smuzhiyun BPF_SK_LOOKUP_F_REPLACE = (1ULL << 0), 4052*4882a593Smuzhiyun BPF_SK_LOOKUP_F_NO_REUSEPORT = (1ULL << 1), 4053*4882a593Smuzhiyun }; 4054*4882a593Smuzhiyun 4055*4882a593Smuzhiyun /* Mode for BPF_FUNC_skb_adjust_room helper. */ 4056*4882a593Smuzhiyun enum bpf_adj_room_mode { 4057*4882a593Smuzhiyun BPF_ADJ_ROOM_NET, 4058*4882a593Smuzhiyun BPF_ADJ_ROOM_MAC, 4059*4882a593Smuzhiyun }; 4060*4882a593Smuzhiyun 4061*4882a593Smuzhiyun /* Mode for BPF_FUNC_skb_load_bytes_relative helper. */ 4062*4882a593Smuzhiyun enum bpf_hdr_start_off { 4063*4882a593Smuzhiyun BPF_HDR_START_MAC, 4064*4882a593Smuzhiyun BPF_HDR_START_NET, 4065*4882a593Smuzhiyun }; 4066*4882a593Smuzhiyun 4067*4882a593Smuzhiyun /* Encapsulation type for BPF_FUNC_lwt_push_encap helper. */ 4068*4882a593Smuzhiyun enum bpf_lwt_encap_mode { 4069*4882a593Smuzhiyun BPF_LWT_ENCAP_SEG6, 4070*4882a593Smuzhiyun BPF_LWT_ENCAP_SEG6_INLINE, 4071*4882a593Smuzhiyun BPF_LWT_ENCAP_IP, 4072*4882a593Smuzhiyun }; 4073*4882a593Smuzhiyun 4074*4882a593Smuzhiyun #define __bpf_md_ptr(type, name) \ 4075*4882a593Smuzhiyun union { \ 4076*4882a593Smuzhiyun type name; \ 4077*4882a593Smuzhiyun __u64 :64; \ 4078*4882a593Smuzhiyun } __attribute__((aligned(8))) 4079*4882a593Smuzhiyun 4080*4882a593Smuzhiyun /* user accessible mirror of in-kernel sk_buff. 4081*4882a593Smuzhiyun * new fields can only be added to the end of this structure 4082*4882a593Smuzhiyun */ 4083*4882a593Smuzhiyun struct __sk_buff { 4084*4882a593Smuzhiyun __u32 len; 4085*4882a593Smuzhiyun __u32 pkt_type; 4086*4882a593Smuzhiyun __u32 mark; 4087*4882a593Smuzhiyun __u32 queue_mapping; 4088*4882a593Smuzhiyun __u32 protocol; 4089*4882a593Smuzhiyun __u32 vlan_present; 4090*4882a593Smuzhiyun __u32 vlan_tci; 4091*4882a593Smuzhiyun __u32 vlan_proto; 4092*4882a593Smuzhiyun __u32 priority; 4093*4882a593Smuzhiyun __u32 ingress_ifindex; 4094*4882a593Smuzhiyun __u32 ifindex; 4095*4882a593Smuzhiyun __u32 tc_index; 4096*4882a593Smuzhiyun __u32 cb[5]; 4097*4882a593Smuzhiyun __u32 hash; 4098*4882a593Smuzhiyun __u32 tc_classid; 4099*4882a593Smuzhiyun __u32 data; 4100*4882a593Smuzhiyun __u32 data_end; 4101*4882a593Smuzhiyun __u32 napi_id; 4102*4882a593Smuzhiyun 4103*4882a593Smuzhiyun /* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */ 4104*4882a593Smuzhiyun __u32 family; 4105*4882a593Smuzhiyun __u32 remote_ip4; /* Stored in network byte order */ 4106*4882a593Smuzhiyun __u32 local_ip4; /* Stored in network byte order */ 4107*4882a593Smuzhiyun __u32 remote_ip6[4]; /* Stored in network byte order */ 4108*4882a593Smuzhiyun __u32 local_ip6[4]; /* Stored in network byte order */ 4109*4882a593Smuzhiyun __u32 remote_port; /* Stored in network byte order */ 4110*4882a593Smuzhiyun __u32 local_port; /* stored in host byte order */ 4111*4882a593Smuzhiyun /* ... here. */ 4112*4882a593Smuzhiyun 4113*4882a593Smuzhiyun __u32 data_meta; 4114*4882a593Smuzhiyun __bpf_md_ptr(struct bpf_flow_keys *, flow_keys); 4115*4882a593Smuzhiyun __u64 tstamp; 4116*4882a593Smuzhiyun __u32 wire_len; 4117*4882a593Smuzhiyun __u32 gso_segs; 4118*4882a593Smuzhiyun __bpf_md_ptr(struct bpf_sock *, sk); 4119*4882a593Smuzhiyun __u32 gso_size; 4120*4882a593Smuzhiyun }; 4121*4882a593Smuzhiyun 4122*4882a593Smuzhiyun struct bpf_tunnel_key { 4123*4882a593Smuzhiyun __u32 tunnel_id; 4124*4882a593Smuzhiyun union { 4125*4882a593Smuzhiyun __u32 remote_ipv4; 4126*4882a593Smuzhiyun __u32 remote_ipv6[4]; 4127*4882a593Smuzhiyun }; 4128*4882a593Smuzhiyun __u8 tunnel_tos; 4129*4882a593Smuzhiyun __u8 tunnel_ttl; 4130*4882a593Smuzhiyun __u16 tunnel_ext; /* Padding, future use. */ 4131*4882a593Smuzhiyun __u32 tunnel_label; 4132*4882a593Smuzhiyun }; 4133*4882a593Smuzhiyun 4134*4882a593Smuzhiyun /* user accessible mirror of in-kernel xfrm_state. 4135*4882a593Smuzhiyun * new fields can only be added to the end of this structure 4136*4882a593Smuzhiyun */ 4137*4882a593Smuzhiyun struct bpf_xfrm_state { 4138*4882a593Smuzhiyun __u32 reqid; 4139*4882a593Smuzhiyun __u32 spi; /* Stored in network byte order */ 4140*4882a593Smuzhiyun __u16 family; 4141*4882a593Smuzhiyun __u16 ext; /* Padding, future use. */ 4142*4882a593Smuzhiyun union { 4143*4882a593Smuzhiyun __u32 remote_ipv4; /* Stored in network byte order */ 4144*4882a593Smuzhiyun __u32 remote_ipv6[4]; /* Stored in network byte order */ 4145*4882a593Smuzhiyun }; 4146*4882a593Smuzhiyun }; 4147*4882a593Smuzhiyun 4148*4882a593Smuzhiyun /* Generic BPF return codes which all BPF program types may support. 4149*4882a593Smuzhiyun * The values are binary compatible with their TC_ACT_* counter-part to 4150*4882a593Smuzhiyun * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT 4151*4882a593Smuzhiyun * programs. 4152*4882a593Smuzhiyun * 4153*4882a593Smuzhiyun * XDP is handled seprately, see XDP_*. 4154*4882a593Smuzhiyun */ 4155*4882a593Smuzhiyun enum bpf_ret_code { 4156*4882a593Smuzhiyun BPF_OK = 0, 4157*4882a593Smuzhiyun /* 1 reserved */ 4158*4882a593Smuzhiyun BPF_DROP = 2, 4159*4882a593Smuzhiyun /* 3-6 reserved */ 4160*4882a593Smuzhiyun BPF_REDIRECT = 7, 4161*4882a593Smuzhiyun /* >127 are reserved for prog type specific return codes. 4162*4882a593Smuzhiyun * 4163*4882a593Smuzhiyun * BPF_LWT_REROUTE: used by BPF_PROG_TYPE_LWT_IN and 4164*4882a593Smuzhiyun * BPF_PROG_TYPE_LWT_XMIT to indicate that skb had been 4165*4882a593Smuzhiyun * changed and should be routed based on its new L3 header. 4166*4882a593Smuzhiyun * (This is an L3 redirect, as opposed to L2 redirect 4167*4882a593Smuzhiyun * represented by BPF_REDIRECT above). 4168*4882a593Smuzhiyun */ 4169*4882a593Smuzhiyun BPF_LWT_REROUTE = 128, 4170*4882a593Smuzhiyun }; 4171*4882a593Smuzhiyun 4172*4882a593Smuzhiyun struct bpf_sock { 4173*4882a593Smuzhiyun __u32 bound_dev_if; 4174*4882a593Smuzhiyun __u32 family; 4175*4882a593Smuzhiyun __u32 type; 4176*4882a593Smuzhiyun __u32 protocol; 4177*4882a593Smuzhiyun __u32 mark; 4178*4882a593Smuzhiyun __u32 priority; 4179*4882a593Smuzhiyun /* IP address also allows 1 and 2 bytes access */ 4180*4882a593Smuzhiyun __u32 src_ip4; 4181*4882a593Smuzhiyun __u32 src_ip6[4]; 4182*4882a593Smuzhiyun __u32 src_port; /* host byte order */ 4183*4882a593Smuzhiyun __be16 dst_port; /* network byte order */ 4184*4882a593Smuzhiyun __u16 :16; /* zero padding */ 4185*4882a593Smuzhiyun __u32 dst_ip4; 4186*4882a593Smuzhiyun __u32 dst_ip6[4]; 4187*4882a593Smuzhiyun __u32 state; 4188*4882a593Smuzhiyun __s32 rx_queue_mapping; 4189*4882a593Smuzhiyun }; 4190*4882a593Smuzhiyun 4191*4882a593Smuzhiyun struct bpf_tcp_sock { 4192*4882a593Smuzhiyun __u32 snd_cwnd; /* Sending congestion window */ 4193*4882a593Smuzhiyun __u32 srtt_us; /* smoothed round trip time << 3 in usecs */ 4194*4882a593Smuzhiyun __u32 rtt_min; 4195*4882a593Smuzhiyun __u32 snd_ssthresh; /* Slow start size threshold */ 4196*4882a593Smuzhiyun __u32 rcv_nxt; /* What we want to receive next */ 4197*4882a593Smuzhiyun __u32 snd_nxt; /* Next sequence we send */ 4198*4882a593Smuzhiyun __u32 snd_una; /* First byte we want an ack for */ 4199*4882a593Smuzhiyun __u32 mss_cache; /* Cached effective mss, not including SACKS */ 4200*4882a593Smuzhiyun __u32 ecn_flags; /* ECN status bits. */ 4201*4882a593Smuzhiyun __u32 rate_delivered; /* saved rate sample: packets delivered */ 4202*4882a593Smuzhiyun __u32 rate_interval_us; /* saved rate sample: time elapsed */ 4203*4882a593Smuzhiyun __u32 packets_out; /* Packets which are "in flight" */ 4204*4882a593Smuzhiyun __u32 retrans_out; /* Retransmitted packets out */ 4205*4882a593Smuzhiyun __u32 total_retrans; /* Total retransmits for entire connection */ 4206*4882a593Smuzhiyun __u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn 4207*4882a593Smuzhiyun * total number of segments in. 4208*4882a593Smuzhiyun */ 4209*4882a593Smuzhiyun __u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn 4210*4882a593Smuzhiyun * total number of data segments in. 4211*4882a593Smuzhiyun */ 4212*4882a593Smuzhiyun __u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut 4213*4882a593Smuzhiyun * The total number of segments sent. 4214*4882a593Smuzhiyun */ 4215*4882a593Smuzhiyun __u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut 4216*4882a593Smuzhiyun * total number of data segments sent. 4217*4882a593Smuzhiyun */ 4218*4882a593Smuzhiyun __u32 lost_out; /* Lost packets */ 4219*4882a593Smuzhiyun __u32 sacked_out; /* SACK'd packets */ 4220*4882a593Smuzhiyun __u64 bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived 4221*4882a593Smuzhiyun * sum(delta(rcv_nxt)), or how many bytes 4222*4882a593Smuzhiyun * were acked. 4223*4882a593Smuzhiyun */ 4224*4882a593Smuzhiyun __u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked 4225*4882a593Smuzhiyun * sum(delta(snd_una)), or how many bytes 4226*4882a593Smuzhiyun * were acked. 4227*4882a593Smuzhiyun */ 4228*4882a593Smuzhiyun __u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups 4229*4882a593Smuzhiyun * total number of DSACK blocks received 4230*4882a593Smuzhiyun */ 4231*4882a593Smuzhiyun __u32 delivered; /* Total data packets delivered incl. rexmits */ 4232*4882a593Smuzhiyun __u32 delivered_ce; /* Like the above but only ECE marked packets */ 4233*4882a593Smuzhiyun __u32 icsk_retransmits; /* Number of unrecovered [RTO] timeouts */ 4234*4882a593Smuzhiyun }; 4235*4882a593Smuzhiyun 4236*4882a593Smuzhiyun struct bpf_sock_tuple { 4237*4882a593Smuzhiyun union { 4238*4882a593Smuzhiyun struct { 4239*4882a593Smuzhiyun __be32 saddr; 4240*4882a593Smuzhiyun __be32 daddr; 4241*4882a593Smuzhiyun __be16 sport; 4242*4882a593Smuzhiyun __be16 dport; 4243*4882a593Smuzhiyun } ipv4; 4244*4882a593Smuzhiyun struct { 4245*4882a593Smuzhiyun __be32 saddr[4]; 4246*4882a593Smuzhiyun __be32 daddr[4]; 4247*4882a593Smuzhiyun __be16 sport; 4248*4882a593Smuzhiyun __be16 dport; 4249*4882a593Smuzhiyun } ipv6; 4250*4882a593Smuzhiyun }; 4251*4882a593Smuzhiyun }; 4252*4882a593Smuzhiyun 4253*4882a593Smuzhiyun struct bpf_xdp_sock { 4254*4882a593Smuzhiyun __u32 queue_id; 4255*4882a593Smuzhiyun }; 4256*4882a593Smuzhiyun 4257*4882a593Smuzhiyun #define XDP_PACKET_HEADROOM 256 4258*4882a593Smuzhiyun 4259*4882a593Smuzhiyun /* User return codes for XDP prog type. 4260*4882a593Smuzhiyun * A valid XDP program must return one of these defined values. All other 4261*4882a593Smuzhiyun * return codes are reserved for future use. Unknown return codes will 4262*4882a593Smuzhiyun * result in packet drops and a warning via bpf_warn_invalid_xdp_action(). 4263*4882a593Smuzhiyun */ 4264*4882a593Smuzhiyun enum xdp_action { 4265*4882a593Smuzhiyun XDP_ABORTED = 0, 4266*4882a593Smuzhiyun XDP_DROP, 4267*4882a593Smuzhiyun XDP_PASS, 4268*4882a593Smuzhiyun XDP_TX, 4269*4882a593Smuzhiyun XDP_REDIRECT, 4270*4882a593Smuzhiyun }; 4271*4882a593Smuzhiyun 4272*4882a593Smuzhiyun /* user accessible metadata for XDP packet hook 4273*4882a593Smuzhiyun * new fields must be added to the end of this structure 4274*4882a593Smuzhiyun */ 4275*4882a593Smuzhiyun struct xdp_md { 4276*4882a593Smuzhiyun __u32 data; 4277*4882a593Smuzhiyun __u32 data_end; 4278*4882a593Smuzhiyun __u32 data_meta; 4279*4882a593Smuzhiyun /* Below access go through struct xdp_rxq_info */ 4280*4882a593Smuzhiyun __u32 ingress_ifindex; /* rxq->dev->ifindex */ 4281*4882a593Smuzhiyun __u32 rx_queue_index; /* rxq->queue_index */ 4282*4882a593Smuzhiyun 4283*4882a593Smuzhiyun __u32 egress_ifindex; /* txq->dev->ifindex */ 4284*4882a593Smuzhiyun }; 4285*4882a593Smuzhiyun 4286*4882a593Smuzhiyun /* DEVMAP map-value layout 4287*4882a593Smuzhiyun * 4288*4882a593Smuzhiyun * The struct data-layout of map-value is a configuration interface. 4289*4882a593Smuzhiyun * New members can only be added to the end of this structure. 4290*4882a593Smuzhiyun */ 4291*4882a593Smuzhiyun struct bpf_devmap_val { 4292*4882a593Smuzhiyun __u32 ifindex; /* device index */ 4293*4882a593Smuzhiyun union { 4294*4882a593Smuzhiyun int fd; /* prog fd on map write */ 4295*4882a593Smuzhiyun __u32 id; /* prog id on map read */ 4296*4882a593Smuzhiyun } bpf_prog; 4297*4882a593Smuzhiyun }; 4298*4882a593Smuzhiyun 4299*4882a593Smuzhiyun /* CPUMAP map-value layout 4300*4882a593Smuzhiyun * 4301*4882a593Smuzhiyun * The struct data-layout of map-value is a configuration interface. 4302*4882a593Smuzhiyun * New members can only be added to the end of this structure. 4303*4882a593Smuzhiyun */ 4304*4882a593Smuzhiyun struct bpf_cpumap_val { 4305*4882a593Smuzhiyun __u32 qsize; /* queue size to remote target CPU */ 4306*4882a593Smuzhiyun union { 4307*4882a593Smuzhiyun int fd; /* prog fd on map write */ 4308*4882a593Smuzhiyun __u32 id; /* prog id on map read */ 4309*4882a593Smuzhiyun } bpf_prog; 4310*4882a593Smuzhiyun }; 4311*4882a593Smuzhiyun 4312*4882a593Smuzhiyun enum sk_action { 4313*4882a593Smuzhiyun SK_DROP = 0, 4314*4882a593Smuzhiyun SK_PASS, 4315*4882a593Smuzhiyun }; 4316*4882a593Smuzhiyun 4317*4882a593Smuzhiyun /* user accessible metadata for SK_MSG packet hook, new fields must 4318*4882a593Smuzhiyun * be added to the end of this structure 4319*4882a593Smuzhiyun */ 4320*4882a593Smuzhiyun struct sk_msg_md { 4321*4882a593Smuzhiyun __bpf_md_ptr(void *, data); 4322*4882a593Smuzhiyun __bpf_md_ptr(void *, data_end); 4323*4882a593Smuzhiyun 4324*4882a593Smuzhiyun __u32 family; 4325*4882a593Smuzhiyun __u32 remote_ip4; /* Stored in network byte order */ 4326*4882a593Smuzhiyun __u32 local_ip4; /* Stored in network byte order */ 4327*4882a593Smuzhiyun __u32 remote_ip6[4]; /* Stored in network byte order */ 4328*4882a593Smuzhiyun __u32 local_ip6[4]; /* Stored in network byte order */ 4329*4882a593Smuzhiyun __u32 remote_port; /* Stored in network byte order */ 4330*4882a593Smuzhiyun __u32 local_port; /* stored in host byte order */ 4331*4882a593Smuzhiyun __u32 size; /* Total size of sk_msg */ 4332*4882a593Smuzhiyun 4333*4882a593Smuzhiyun __bpf_md_ptr(struct bpf_sock *, sk); /* current socket */ 4334*4882a593Smuzhiyun }; 4335*4882a593Smuzhiyun 4336*4882a593Smuzhiyun struct sk_reuseport_md { 4337*4882a593Smuzhiyun /* 4338*4882a593Smuzhiyun * Start of directly accessible data. It begins from 4339*4882a593Smuzhiyun * the tcp/udp header. 4340*4882a593Smuzhiyun */ 4341*4882a593Smuzhiyun __bpf_md_ptr(void *, data); 4342*4882a593Smuzhiyun /* End of directly accessible data */ 4343*4882a593Smuzhiyun __bpf_md_ptr(void *, data_end); 4344*4882a593Smuzhiyun /* 4345*4882a593Smuzhiyun * Total length of packet (starting from the tcp/udp header). 4346*4882a593Smuzhiyun * Note that the directly accessible bytes (data_end - data) 4347*4882a593Smuzhiyun * could be less than this "len". Those bytes could be 4348*4882a593Smuzhiyun * indirectly read by a helper "bpf_skb_load_bytes()". 4349*4882a593Smuzhiyun */ 4350*4882a593Smuzhiyun __u32 len; 4351*4882a593Smuzhiyun /* 4352*4882a593Smuzhiyun * Eth protocol in the mac header (network byte order). e.g. 4353*4882a593Smuzhiyun * ETH_P_IP(0x0800) and ETH_P_IPV6(0x86DD) 4354*4882a593Smuzhiyun */ 4355*4882a593Smuzhiyun __u32 eth_protocol; 4356*4882a593Smuzhiyun __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */ 4357*4882a593Smuzhiyun __u32 bind_inany; /* Is sock bound to an INANY address? */ 4358*4882a593Smuzhiyun __u32 hash; /* A hash of the packet 4 tuples */ 4359*4882a593Smuzhiyun }; 4360*4882a593Smuzhiyun 4361*4882a593Smuzhiyun #define BPF_TAG_SIZE 8 4362*4882a593Smuzhiyun 4363*4882a593Smuzhiyun struct bpf_prog_info { 4364*4882a593Smuzhiyun __u32 type; 4365*4882a593Smuzhiyun __u32 id; 4366*4882a593Smuzhiyun __u8 tag[BPF_TAG_SIZE]; 4367*4882a593Smuzhiyun __u32 jited_prog_len; 4368*4882a593Smuzhiyun __u32 xlated_prog_len; 4369*4882a593Smuzhiyun __aligned_u64 jited_prog_insns; 4370*4882a593Smuzhiyun __aligned_u64 xlated_prog_insns; 4371*4882a593Smuzhiyun __u64 load_time; /* ns since boottime */ 4372*4882a593Smuzhiyun __u32 created_by_uid; 4373*4882a593Smuzhiyun __u32 nr_map_ids; 4374*4882a593Smuzhiyun __aligned_u64 map_ids; 4375*4882a593Smuzhiyun char name[BPF_OBJ_NAME_LEN]; 4376*4882a593Smuzhiyun __u32 ifindex; 4377*4882a593Smuzhiyun __u32 gpl_compatible:1; 4378*4882a593Smuzhiyun __u32 :31; /* alignment pad */ 4379*4882a593Smuzhiyun __u64 netns_dev; 4380*4882a593Smuzhiyun __u64 netns_ino; 4381*4882a593Smuzhiyun __u32 nr_jited_ksyms; 4382*4882a593Smuzhiyun __u32 nr_jited_func_lens; 4383*4882a593Smuzhiyun __aligned_u64 jited_ksyms; 4384*4882a593Smuzhiyun __aligned_u64 jited_func_lens; 4385*4882a593Smuzhiyun __u32 btf_id; 4386*4882a593Smuzhiyun __u32 func_info_rec_size; 4387*4882a593Smuzhiyun __aligned_u64 func_info; 4388*4882a593Smuzhiyun __u32 nr_func_info; 4389*4882a593Smuzhiyun __u32 nr_line_info; 4390*4882a593Smuzhiyun __aligned_u64 line_info; 4391*4882a593Smuzhiyun __aligned_u64 jited_line_info; 4392*4882a593Smuzhiyun __u32 nr_jited_line_info; 4393*4882a593Smuzhiyun __u32 line_info_rec_size; 4394*4882a593Smuzhiyun __u32 jited_line_info_rec_size; 4395*4882a593Smuzhiyun __u32 nr_prog_tags; 4396*4882a593Smuzhiyun __aligned_u64 prog_tags; 4397*4882a593Smuzhiyun __u64 run_time_ns; 4398*4882a593Smuzhiyun __u64 run_cnt; 4399*4882a593Smuzhiyun } __attribute__((aligned(8))); 4400*4882a593Smuzhiyun 4401*4882a593Smuzhiyun struct bpf_map_info { 4402*4882a593Smuzhiyun __u32 type; 4403*4882a593Smuzhiyun __u32 id; 4404*4882a593Smuzhiyun __u32 key_size; 4405*4882a593Smuzhiyun __u32 value_size; 4406*4882a593Smuzhiyun __u32 max_entries; 4407*4882a593Smuzhiyun __u32 map_flags; 4408*4882a593Smuzhiyun char name[BPF_OBJ_NAME_LEN]; 4409*4882a593Smuzhiyun __u32 ifindex; 4410*4882a593Smuzhiyun __u32 btf_vmlinux_value_type_id; 4411*4882a593Smuzhiyun __u64 netns_dev; 4412*4882a593Smuzhiyun __u64 netns_ino; 4413*4882a593Smuzhiyun __u32 btf_id; 4414*4882a593Smuzhiyun __u32 btf_key_type_id; 4415*4882a593Smuzhiyun __u32 btf_value_type_id; 4416*4882a593Smuzhiyun } __attribute__((aligned(8))); 4417*4882a593Smuzhiyun 4418*4882a593Smuzhiyun struct bpf_btf_info { 4419*4882a593Smuzhiyun __aligned_u64 btf; 4420*4882a593Smuzhiyun __u32 btf_size; 4421*4882a593Smuzhiyun __u32 id; 4422*4882a593Smuzhiyun } __attribute__((aligned(8))); 4423*4882a593Smuzhiyun 4424*4882a593Smuzhiyun struct bpf_link_info { 4425*4882a593Smuzhiyun __u32 type; 4426*4882a593Smuzhiyun __u32 id; 4427*4882a593Smuzhiyun __u32 prog_id; 4428*4882a593Smuzhiyun union { 4429*4882a593Smuzhiyun struct { 4430*4882a593Smuzhiyun __aligned_u64 tp_name; /* in/out: tp_name buffer ptr */ 4431*4882a593Smuzhiyun __u32 tp_name_len; /* in/out: tp_name buffer len */ 4432*4882a593Smuzhiyun } raw_tracepoint; 4433*4882a593Smuzhiyun struct { 4434*4882a593Smuzhiyun __u32 attach_type; 4435*4882a593Smuzhiyun } tracing; 4436*4882a593Smuzhiyun struct { 4437*4882a593Smuzhiyun __u64 cgroup_id; 4438*4882a593Smuzhiyun __u32 attach_type; 4439*4882a593Smuzhiyun } cgroup; 4440*4882a593Smuzhiyun struct { 4441*4882a593Smuzhiyun __aligned_u64 target_name; /* in/out: target_name buffer ptr */ 4442*4882a593Smuzhiyun __u32 target_name_len; /* in/out: target_name buffer len */ 4443*4882a593Smuzhiyun union { 4444*4882a593Smuzhiyun struct { 4445*4882a593Smuzhiyun __u32 map_id; 4446*4882a593Smuzhiyun } map; 4447*4882a593Smuzhiyun }; 4448*4882a593Smuzhiyun } iter; 4449*4882a593Smuzhiyun struct { 4450*4882a593Smuzhiyun __u32 netns_ino; 4451*4882a593Smuzhiyun __u32 attach_type; 4452*4882a593Smuzhiyun } netns; 4453*4882a593Smuzhiyun struct { 4454*4882a593Smuzhiyun __u32 ifindex; 4455*4882a593Smuzhiyun } xdp; 4456*4882a593Smuzhiyun }; 4457*4882a593Smuzhiyun } __attribute__((aligned(8))); 4458*4882a593Smuzhiyun 4459*4882a593Smuzhiyun /* User bpf_sock_addr struct to access socket fields and sockaddr struct passed 4460*4882a593Smuzhiyun * by user and intended to be used by socket (e.g. to bind to, depends on 4461*4882a593Smuzhiyun * attach type). 4462*4882a593Smuzhiyun */ 4463*4882a593Smuzhiyun struct bpf_sock_addr { 4464*4882a593Smuzhiyun __u32 user_family; /* Allows 4-byte read, but no write. */ 4465*4882a593Smuzhiyun __u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write. 4466*4882a593Smuzhiyun * Stored in network byte order. 4467*4882a593Smuzhiyun */ 4468*4882a593Smuzhiyun __u32 user_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write. 4469*4882a593Smuzhiyun * Stored in network byte order. 4470*4882a593Smuzhiyun */ 4471*4882a593Smuzhiyun __u32 user_port; /* Allows 1,2,4-byte read and 4-byte write. 4472*4882a593Smuzhiyun * Stored in network byte order 4473*4882a593Smuzhiyun */ 4474*4882a593Smuzhiyun __u32 family; /* Allows 4-byte read, but no write */ 4475*4882a593Smuzhiyun __u32 type; /* Allows 4-byte read, but no write */ 4476*4882a593Smuzhiyun __u32 protocol; /* Allows 4-byte read, but no write */ 4477*4882a593Smuzhiyun __u32 msg_src_ip4; /* Allows 1,2,4-byte read and 4-byte write. 4478*4882a593Smuzhiyun * Stored in network byte order. 4479*4882a593Smuzhiyun */ 4480*4882a593Smuzhiyun __u32 msg_src_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write. 4481*4882a593Smuzhiyun * Stored in network byte order. 4482*4882a593Smuzhiyun */ 4483*4882a593Smuzhiyun __bpf_md_ptr(struct bpf_sock *, sk); 4484*4882a593Smuzhiyun }; 4485*4882a593Smuzhiyun 4486*4882a593Smuzhiyun /* User bpf_sock_ops struct to access socket values and specify request ops 4487*4882a593Smuzhiyun * and their replies. 4488*4882a593Smuzhiyun * Some of this fields are in network (bigendian) byte order and may need 4489*4882a593Smuzhiyun * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h). 4490*4882a593Smuzhiyun * New fields can only be added at the end of this structure 4491*4882a593Smuzhiyun */ 4492*4882a593Smuzhiyun struct bpf_sock_ops { 4493*4882a593Smuzhiyun __u32 op; 4494*4882a593Smuzhiyun union { 4495*4882a593Smuzhiyun __u32 args[4]; /* Optionally passed to bpf program */ 4496*4882a593Smuzhiyun __u32 reply; /* Returned by bpf program */ 4497*4882a593Smuzhiyun __u32 replylong[4]; /* Optionally returned by bpf prog */ 4498*4882a593Smuzhiyun }; 4499*4882a593Smuzhiyun __u32 family; 4500*4882a593Smuzhiyun __u32 remote_ip4; /* Stored in network byte order */ 4501*4882a593Smuzhiyun __u32 local_ip4; /* Stored in network byte order */ 4502*4882a593Smuzhiyun __u32 remote_ip6[4]; /* Stored in network byte order */ 4503*4882a593Smuzhiyun __u32 local_ip6[4]; /* Stored in network byte order */ 4504*4882a593Smuzhiyun __u32 remote_port; /* Stored in network byte order */ 4505*4882a593Smuzhiyun __u32 local_port; /* stored in host byte order */ 4506*4882a593Smuzhiyun __u32 is_fullsock; /* Some TCP fields are only valid if 4507*4882a593Smuzhiyun * there is a full socket. If not, the 4508*4882a593Smuzhiyun * fields read as zero. 4509*4882a593Smuzhiyun */ 4510*4882a593Smuzhiyun __u32 snd_cwnd; 4511*4882a593Smuzhiyun __u32 srtt_us; /* Averaged RTT << 3 in usecs */ 4512*4882a593Smuzhiyun __u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */ 4513*4882a593Smuzhiyun __u32 state; 4514*4882a593Smuzhiyun __u32 rtt_min; 4515*4882a593Smuzhiyun __u32 snd_ssthresh; 4516*4882a593Smuzhiyun __u32 rcv_nxt; 4517*4882a593Smuzhiyun __u32 snd_nxt; 4518*4882a593Smuzhiyun __u32 snd_una; 4519*4882a593Smuzhiyun __u32 mss_cache; 4520*4882a593Smuzhiyun __u32 ecn_flags; 4521*4882a593Smuzhiyun __u32 rate_delivered; 4522*4882a593Smuzhiyun __u32 rate_interval_us; 4523*4882a593Smuzhiyun __u32 packets_out; 4524*4882a593Smuzhiyun __u32 retrans_out; 4525*4882a593Smuzhiyun __u32 total_retrans; 4526*4882a593Smuzhiyun __u32 segs_in; 4527*4882a593Smuzhiyun __u32 data_segs_in; 4528*4882a593Smuzhiyun __u32 segs_out; 4529*4882a593Smuzhiyun __u32 data_segs_out; 4530*4882a593Smuzhiyun __u32 lost_out; 4531*4882a593Smuzhiyun __u32 sacked_out; 4532*4882a593Smuzhiyun __u32 sk_txhash; 4533*4882a593Smuzhiyun __u64 bytes_received; 4534*4882a593Smuzhiyun __u64 bytes_acked; 4535*4882a593Smuzhiyun __bpf_md_ptr(struct bpf_sock *, sk); 4536*4882a593Smuzhiyun /* [skb_data, skb_data_end) covers the whole TCP header. 4537*4882a593Smuzhiyun * 4538*4882a593Smuzhiyun * BPF_SOCK_OPS_PARSE_HDR_OPT_CB: The packet received 4539*4882a593Smuzhiyun * BPF_SOCK_OPS_HDR_OPT_LEN_CB: Not useful because the 4540*4882a593Smuzhiyun * header has not been written. 4541*4882a593Smuzhiyun * BPF_SOCK_OPS_WRITE_HDR_OPT_CB: The header and options have 4542*4882a593Smuzhiyun * been written so far. 4543*4882a593Smuzhiyun * BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB: The SYNACK that concludes 4544*4882a593Smuzhiyun * the 3WHS. 4545*4882a593Smuzhiyun * BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB: The ACK that concludes 4546*4882a593Smuzhiyun * the 3WHS. 4547*4882a593Smuzhiyun * 4548*4882a593Smuzhiyun * bpf_load_hdr_opt() can also be used to read a particular option. 4549*4882a593Smuzhiyun */ 4550*4882a593Smuzhiyun __bpf_md_ptr(void *, skb_data); 4551*4882a593Smuzhiyun __bpf_md_ptr(void *, skb_data_end); 4552*4882a593Smuzhiyun __u32 skb_len; /* The total length of a packet. 4553*4882a593Smuzhiyun * It includes the header, options, 4554*4882a593Smuzhiyun * and payload. 4555*4882a593Smuzhiyun */ 4556*4882a593Smuzhiyun __u32 skb_tcp_flags; /* tcp_flags of the header. It provides 4557*4882a593Smuzhiyun * an easy way to check for tcp_flags 4558*4882a593Smuzhiyun * without parsing skb_data. 4559*4882a593Smuzhiyun * 4560*4882a593Smuzhiyun * In particular, the skb_tcp_flags 4561*4882a593Smuzhiyun * will still be available in 4562*4882a593Smuzhiyun * BPF_SOCK_OPS_HDR_OPT_LEN even though 4563*4882a593Smuzhiyun * the outgoing header has not 4564*4882a593Smuzhiyun * been written yet. 4565*4882a593Smuzhiyun */ 4566*4882a593Smuzhiyun }; 4567*4882a593Smuzhiyun 4568*4882a593Smuzhiyun /* Definitions for bpf_sock_ops_cb_flags */ 4569*4882a593Smuzhiyun enum { 4570*4882a593Smuzhiyun BPF_SOCK_OPS_RTO_CB_FLAG = (1<<0), 4571*4882a593Smuzhiyun BPF_SOCK_OPS_RETRANS_CB_FLAG = (1<<1), 4572*4882a593Smuzhiyun BPF_SOCK_OPS_STATE_CB_FLAG = (1<<2), 4573*4882a593Smuzhiyun BPF_SOCK_OPS_RTT_CB_FLAG = (1<<3), 4574*4882a593Smuzhiyun /* Call bpf for all received TCP headers. The bpf prog will be 4575*4882a593Smuzhiyun * called under sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB 4576*4882a593Smuzhiyun * 4577*4882a593Smuzhiyun * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB 4578*4882a593Smuzhiyun * for the header option related helpers that will be useful 4579*4882a593Smuzhiyun * to the bpf programs. 4580*4882a593Smuzhiyun * 4581*4882a593Smuzhiyun * It could be used at the client/active side (i.e. connect() side) 4582*4882a593Smuzhiyun * when the server told it that the server was in syncookie 4583*4882a593Smuzhiyun * mode and required the active side to resend the bpf-written 4584*4882a593Smuzhiyun * options. The active side can keep writing the bpf-options until 4585*4882a593Smuzhiyun * it received a valid packet from the server side to confirm 4586*4882a593Smuzhiyun * the earlier packet (and options) has been received. The later 4587*4882a593Smuzhiyun * example patch is using it like this at the active side when the 4588*4882a593Smuzhiyun * server is in syncookie mode. 4589*4882a593Smuzhiyun * 4590*4882a593Smuzhiyun * The bpf prog will usually turn this off in the common cases. 4591*4882a593Smuzhiyun */ 4592*4882a593Smuzhiyun BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG = (1<<4), 4593*4882a593Smuzhiyun /* Call bpf when kernel has received a header option that 4594*4882a593Smuzhiyun * the kernel cannot handle. The bpf prog will be called under 4595*4882a593Smuzhiyun * sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB. 4596*4882a593Smuzhiyun * 4597*4882a593Smuzhiyun * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB 4598*4882a593Smuzhiyun * for the header option related helpers that will be useful 4599*4882a593Smuzhiyun * to the bpf programs. 4600*4882a593Smuzhiyun */ 4601*4882a593Smuzhiyun BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = (1<<5), 4602*4882a593Smuzhiyun /* Call bpf when the kernel is writing header options for the 4603*4882a593Smuzhiyun * outgoing packet. The bpf prog will first be called 4604*4882a593Smuzhiyun * to reserve space in a skb under 4605*4882a593Smuzhiyun * sock_ops->op == BPF_SOCK_OPS_HDR_OPT_LEN_CB. Then 4606*4882a593Smuzhiyun * the bpf prog will be called to write the header option(s) 4607*4882a593Smuzhiyun * under sock_ops->op == BPF_SOCK_OPS_WRITE_HDR_OPT_CB. 4608*4882a593Smuzhiyun * 4609*4882a593Smuzhiyun * Please refer to the comment in BPF_SOCK_OPS_HDR_OPT_LEN_CB 4610*4882a593Smuzhiyun * and BPF_SOCK_OPS_WRITE_HDR_OPT_CB for the header option 4611*4882a593Smuzhiyun * related helpers that will be useful to the bpf programs. 4612*4882a593Smuzhiyun * 4613*4882a593Smuzhiyun * The kernel gets its chance to reserve space and write 4614*4882a593Smuzhiyun * options first before the BPF program does. 4615*4882a593Smuzhiyun */ 4616*4882a593Smuzhiyun BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = (1<<6), 4617*4882a593Smuzhiyun /* Mask of all currently supported cb flags */ 4618*4882a593Smuzhiyun BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7F, 4619*4882a593Smuzhiyun }; 4620*4882a593Smuzhiyun 4621*4882a593Smuzhiyun /* List of known BPF sock_ops operators. 4622*4882a593Smuzhiyun * New entries can only be added at the end 4623*4882a593Smuzhiyun */ 4624*4882a593Smuzhiyun enum { 4625*4882a593Smuzhiyun BPF_SOCK_OPS_VOID, 4626*4882a593Smuzhiyun BPF_SOCK_OPS_TIMEOUT_INIT, /* Should return SYN-RTO value to use or 4627*4882a593Smuzhiyun * -1 if default value should be used 4628*4882a593Smuzhiyun */ 4629*4882a593Smuzhiyun BPF_SOCK_OPS_RWND_INIT, /* Should return initial advertized 4630*4882a593Smuzhiyun * window (in packets) or -1 if default 4631*4882a593Smuzhiyun * value should be used 4632*4882a593Smuzhiyun */ 4633*4882a593Smuzhiyun BPF_SOCK_OPS_TCP_CONNECT_CB, /* Calls BPF program right before an 4634*4882a593Smuzhiyun * active connection is initialized 4635*4882a593Smuzhiyun */ 4636*4882a593Smuzhiyun BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, /* Calls BPF program when an 4637*4882a593Smuzhiyun * active connection is 4638*4882a593Smuzhiyun * established 4639*4882a593Smuzhiyun */ 4640*4882a593Smuzhiyun BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, /* Calls BPF program when a 4641*4882a593Smuzhiyun * passive connection is 4642*4882a593Smuzhiyun * established 4643*4882a593Smuzhiyun */ 4644*4882a593Smuzhiyun BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control 4645*4882a593Smuzhiyun * needs ECN 4646*4882a593Smuzhiyun */ 4647*4882a593Smuzhiyun BPF_SOCK_OPS_BASE_RTT, /* Get base RTT. The correct value is 4648*4882a593Smuzhiyun * based on the path and may be 4649*4882a593Smuzhiyun * dependent on the congestion control 4650*4882a593Smuzhiyun * algorithm. In general it indicates 4651*4882a593Smuzhiyun * a congestion threshold. RTTs above 4652*4882a593Smuzhiyun * this indicate congestion 4653*4882a593Smuzhiyun */ 4654*4882a593Smuzhiyun BPF_SOCK_OPS_RTO_CB, /* Called when an RTO has triggered. 4655*4882a593Smuzhiyun * Arg1: value of icsk_retransmits 4656*4882a593Smuzhiyun * Arg2: value of icsk_rto 4657*4882a593Smuzhiyun * Arg3: whether RTO has expired 4658*4882a593Smuzhiyun */ 4659*4882a593Smuzhiyun BPF_SOCK_OPS_RETRANS_CB, /* Called when skb is retransmitted. 4660*4882a593Smuzhiyun * Arg1: sequence number of 1st byte 4661*4882a593Smuzhiyun * Arg2: # segments 4662*4882a593Smuzhiyun * Arg3: return value of 4663*4882a593Smuzhiyun * tcp_transmit_skb (0 => success) 4664*4882a593Smuzhiyun */ 4665*4882a593Smuzhiyun BPF_SOCK_OPS_STATE_CB, /* Called when TCP changes state. 4666*4882a593Smuzhiyun * Arg1: old_state 4667*4882a593Smuzhiyun * Arg2: new_state 4668*4882a593Smuzhiyun */ 4669*4882a593Smuzhiyun BPF_SOCK_OPS_TCP_LISTEN_CB, /* Called on listen(2), right after 4670*4882a593Smuzhiyun * socket transition to LISTEN state. 4671*4882a593Smuzhiyun */ 4672*4882a593Smuzhiyun BPF_SOCK_OPS_RTT_CB, /* Called on every RTT. 4673*4882a593Smuzhiyun */ 4674*4882a593Smuzhiyun BPF_SOCK_OPS_PARSE_HDR_OPT_CB, /* Parse the header option. 4675*4882a593Smuzhiyun * It will be called to handle 4676*4882a593Smuzhiyun * the packets received at 4677*4882a593Smuzhiyun * an already established 4678*4882a593Smuzhiyun * connection. 4679*4882a593Smuzhiyun * 4680*4882a593Smuzhiyun * sock_ops->skb_data: 4681*4882a593Smuzhiyun * Referring to the received skb. 4682*4882a593Smuzhiyun * It covers the TCP header only. 4683*4882a593Smuzhiyun * 4684*4882a593Smuzhiyun * bpf_load_hdr_opt() can also 4685*4882a593Smuzhiyun * be used to search for a 4686*4882a593Smuzhiyun * particular option. 4687*4882a593Smuzhiyun */ 4688*4882a593Smuzhiyun BPF_SOCK_OPS_HDR_OPT_LEN_CB, /* Reserve space for writing the 4689*4882a593Smuzhiyun * header option later in 4690*4882a593Smuzhiyun * BPF_SOCK_OPS_WRITE_HDR_OPT_CB. 4691*4882a593Smuzhiyun * Arg1: bool want_cookie. (in 4692*4882a593Smuzhiyun * writing SYNACK only) 4693*4882a593Smuzhiyun * 4694*4882a593Smuzhiyun * sock_ops->skb_data: 4695*4882a593Smuzhiyun * Not available because no header has 4696*4882a593Smuzhiyun * been written yet. 4697*4882a593Smuzhiyun * 4698*4882a593Smuzhiyun * sock_ops->skb_tcp_flags: 4699*4882a593Smuzhiyun * The tcp_flags of the 4700*4882a593Smuzhiyun * outgoing skb. (e.g. SYN, ACK, FIN). 4701*4882a593Smuzhiyun * 4702*4882a593Smuzhiyun * bpf_reserve_hdr_opt() should 4703*4882a593Smuzhiyun * be used to reserve space. 4704*4882a593Smuzhiyun */ 4705*4882a593Smuzhiyun BPF_SOCK_OPS_WRITE_HDR_OPT_CB, /* Write the header options 4706*4882a593Smuzhiyun * Arg1: bool want_cookie. (in 4707*4882a593Smuzhiyun * writing SYNACK only) 4708*4882a593Smuzhiyun * 4709*4882a593Smuzhiyun * sock_ops->skb_data: 4710*4882a593Smuzhiyun * Referring to the outgoing skb. 4711*4882a593Smuzhiyun * It covers the TCP header 4712*4882a593Smuzhiyun * that has already been written 4713*4882a593Smuzhiyun * by the kernel and the 4714*4882a593Smuzhiyun * earlier bpf-progs. 4715*4882a593Smuzhiyun * 4716*4882a593Smuzhiyun * sock_ops->skb_tcp_flags: 4717*4882a593Smuzhiyun * The tcp_flags of the outgoing 4718*4882a593Smuzhiyun * skb. (e.g. SYN, ACK, FIN). 4719*4882a593Smuzhiyun * 4720*4882a593Smuzhiyun * bpf_store_hdr_opt() should 4721*4882a593Smuzhiyun * be used to write the 4722*4882a593Smuzhiyun * option. 4723*4882a593Smuzhiyun * 4724*4882a593Smuzhiyun * bpf_load_hdr_opt() can also 4725*4882a593Smuzhiyun * be used to search for a 4726*4882a593Smuzhiyun * particular option that 4727*4882a593Smuzhiyun * has already been written 4728*4882a593Smuzhiyun * by the kernel or the 4729*4882a593Smuzhiyun * earlier bpf-progs. 4730*4882a593Smuzhiyun */ 4731*4882a593Smuzhiyun }; 4732*4882a593Smuzhiyun 4733*4882a593Smuzhiyun /* List of TCP states. There is a build check in net/ipv4/tcp.c to detect 4734*4882a593Smuzhiyun * changes between the TCP and BPF versions. Ideally this should never happen. 4735*4882a593Smuzhiyun * If it does, we need to add code to convert them before calling 4736*4882a593Smuzhiyun * the BPF sock_ops function. 4737*4882a593Smuzhiyun */ 4738*4882a593Smuzhiyun enum { 4739*4882a593Smuzhiyun BPF_TCP_ESTABLISHED = 1, 4740*4882a593Smuzhiyun BPF_TCP_SYN_SENT, 4741*4882a593Smuzhiyun BPF_TCP_SYN_RECV, 4742*4882a593Smuzhiyun BPF_TCP_FIN_WAIT1, 4743*4882a593Smuzhiyun BPF_TCP_FIN_WAIT2, 4744*4882a593Smuzhiyun BPF_TCP_TIME_WAIT, 4745*4882a593Smuzhiyun BPF_TCP_CLOSE, 4746*4882a593Smuzhiyun BPF_TCP_CLOSE_WAIT, 4747*4882a593Smuzhiyun BPF_TCP_LAST_ACK, 4748*4882a593Smuzhiyun BPF_TCP_LISTEN, 4749*4882a593Smuzhiyun BPF_TCP_CLOSING, /* Now a valid state */ 4750*4882a593Smuzhiyun BPF_TCP_NEW_SYN_RECV, 4751*4882a593Smuzhiyun 4752*4882a593Smuzhiyun BPF_TCP_MAX_STATES /* Leave at the end! */ 4753*4882a593Smuzhiyun }; 4754*4882a593Smuzhiyun 4755*4882a593Smuzhiyun enum { 4756*4882a593Smuzhiyun TCP_BPF_IW = 1001, /* Set TCP initial congestion window */ 4757*4882a593Smuzhiyun TCP_BPF_SNDCWND_CLAMP = 1002, /* Set sndcwnd_clamp */ 4758*4882a593Smuzhiyun TCP_BPF_DELACK_MAX = 1003, /* Max delay ack in usecs */ 4759*4882a593Smuzhiyun TCP_BPF_RTO_MIN = 1004, /* Min delay ack in usecs */ 4760*4882a593Smuzhiyun /* Copy the SYN pkt to optval 4761*4882a593Smuzhiyun * 4762*4882a593Smuzhiyun * BPF_PROG_TYPE_SOCK_OPS only. It is similar to the 4763*4882a593Smuzhiyun * bpf_getsockopt(TCP_SAVED_SYN) but it does not limit 4764*4882a593Smuzhiyun * to only getting from the saved_syn. It can either get the 4765*4882a593Smuzhiyun * syn packet from: 4766*4882a593Smuzhiyun * 4767*4882a593Smuzhiyun * 1. the just-received SYN packet (only available when writing the 4768*4882a593Smuzhiyun * SYNACK). It will be useful when it is not necessary to 4769*4882a593Smuzhiyun * save the SYN packet for latter use. It is also the only way 4770*4882a593Smuzhiyun * to get the SYN during syncookie mode because the syn 4771*4882a593Smuzhiyun * packet cannot be saved during syncookie. 4772*4882a593Smuzhiyun * 4773*4882a593Smuzhiyun * OR 4774*4882a593Smuzhiyun * 4775*4882a593Smuzhiyun * 2. the earlier saved syn which was done by 4776*4882a593Smuzhiyun * bpf_setsockopt(TCP_SAVE_SYN). 4777*4882a593Smuzhiyun * 4778*4882a593Smuzhiyun * The bpf_getsockopt(TCP_BPF_SYN*) option will hide where the 4779*4882a593Smuzhiyun * SYN packet is obtained. 4780*4882a593Smuzhiyun * 4781*4882a593Smuzhiyun * If the bpf-prog does not need the IP[46] header, the 4782*4882a593Smuzhiyun * bpf-prog can avoid parsing the IP header by using 4783*4882a593Smuzhiyun * TCP_BPF_SYN. Otherwise, the bpf-prog can get both 4784*4882a593Smuzhiyun * IP[46] and TCP header by using TCP_BPF_SYN_IP. 4785*4882a593Smuzhiyun * 4786*4882a593Smuzhiyun * >0: Total number of bytes copied 4787*4882a593Smuzhiyun * -ENOSPC: Not enough space in optval. Only optlen number of 4788*4882a593Smuzhiyun * bytes is copied. 4789*4882a593Smuzhiyun * -ENOENT: The SYN skb is not available now and the earlier SYN pkt 4790*4882a593Smuzhiyun * is not saved by setsockopt(TCP_SAVE_SYN). 4791*4882a593Smuzhiyun */ 4792*4882a593Smuzhiyun TCP_BPF_SYN = 1005, /* Copy the TCP header */ 4793*4882a593Smuzhiyun TCP_BPF_SYN_IP = 1006, /* Copy the IP[46] and TCP header */ 4794*4882a593Smuzhiyun TCP_BPF_SYN_MAC = 1007, /* Copy the MAC, IP[46], and TCP header */ 4795*4882a593Smuzhiyun }; 4796*4882a593Smuzhiyun 4797*4882a593Smuzhiyun enum { 4798*4882a593Smuzhiyun BPF_LOAD_HDR_OPT_TCP_SYN = (1ULL << 0), 4799*4882a593Smuzhiyun }; 4800*4882a593Smuzhiyun 4801*4882a593Smuzhiyun /* args[0] value during BPF_SOCK_OPS_HDR_OPT_LEN_CB and 4802*4882a593Smuzhiyun * BPF_SOCK_OPS_WRITE_HDR_OPT_CB. 4803*4882a593Smuzhiyun */ 4804*4882a593Smuzhiyun enum { 4805*4882a593Smuzhiyun BPF_WRITE_HDR_TCP_CURRENT_MSS = 1, /* Kernel is finding the 4806*4882a593Smuzhiyun * total option spaces 4807*4882a593Smuzhiyun * required for an established 4808*4882a593Smuzhiyun * sk in order to calculate the 4809*4882a593Smuzhiyun * MSS. No skb is actually 4810*4882a593Smuzhiyun * sent. 4811*4882a593Smuzhiyun */ 4812*4882a593Smuzhiyun BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 2, /* Kernel is in syncookie mode 4813*4882a593Smuzhiyun * when sending a SYN. 4814*4882a593Smuzhiyun */ 4815*4882a593Smuzhiyun }; 4816*4882a593Smuzhiyun 4817*4882a593Smuzhiyun struct bpf_perf_event_value { 4818*4882a593Smuzhiyun __u64 counter; 4819*4882a593Smuzhiyun __u64 enabled; 4820*4882a593Smuzhiyun __u64 running; 4821*4882a593Smuzhiyun }; 4822*4882a593Smuzhiyun 4823*4882a593Smuzhiyun enum { 4824*4882a593Smuzhiyun BPF_DEVCG_ACC_MKNOD = (1ULL << 0), 4825*4882a593Smuzhiyun BPF_DEVCG_ACC_READ = (1ULL << 1), 4826*4882a593Smuzhiyun BPF_DEVCG_ACC_WRITE = (1ULL << 2), 4827*4882a593Smuzhiyun }; 4828*4882a593Smuzhiyun 4829*4882a593Smuzhiyun enum { 4830*4882a593Smuzhiyun BPF_DEVCG_DEV_BLOCK = (1ULL << 0), 4831*4882a593Smuzhiyun BPF_DEVCG_DEV_CHAR = (1ULL << 1), 4832*4882a593Smuzhiyun }; 4833*4882a593Smuzhiyun 4834*4882a593Smuzhiyun struct bpf_cgroup_dev_ctx { 4835*4882a593Smuzhiyun /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */ 4836*4882a593Smuzhiyun __u32 access_type; 4837*4882a593Smuzhiyun __u32 major; 4838*4882a593Smuzhiyun __u32 minor; 4839*4882a593Smuzhiyun }; 4840*4882a593Smuzhiyun 4841*4882a593Smuzhiyun struct bpf_raw_tracepoint_args { 4842*4882a593Smuzhiyun __u64 args[0]; 4843*4882a593Smuzhiyun }; 4844*4882a593Smuzhiyun 4845*4882a593Smuzhiyun /* DIRECT: Skip the FIB rules and go to FIB table associated with device 4846*4882a593Smuzhiyun * OUTPUT: Do lookup from egress perspective; default is ingress 4847*4882a593Smuzhiyun */ 4848*4882a593Smuzhiyun enum { 4849*4882a593Smuzhiyun BPF_FIB_LOOKUP_DIRECT = (1U << 0), 4850*4882a593Smuzhiyun BPF_FIB_LOOKUP_OUTPUT = (1U << 1), 4851*4882a593Smuzhiyun }; 4852*4882a593Smuzhiyun 4853*4882a593Smuzhiyun enum { 4854*4882a593Smuzhiyun BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */ 4855*4882a593Smuzhiyun BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */ 4856*4882a593Smuzhiyun BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */ 4857*4882a593Smuzhiyun BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */ 4858*4882a593Smuzhiyun BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */ 4859*4882a593Smuzhiyun BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */ 4860*4882a593Smuzhiyun BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */ 4861*4882a593Smuzhiyun BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */ 4862*4882a593Smuzhiyun BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */ 4863*4882a593Smuzhiyun }; 4864*4882a593Smuzhiyun 4865*4882a593Smuzhiyun struct bpf_fib_lookup { 4866*4882a593Smuzhiyun /* input: network family for lookup (AF_INET, AF_INET6) 4867*4882a593Smuzhiyun * output: network family of egress nexthop 4868*4882a593Smuzhiyun */ 4869*4882a593Smuzhiyun __u8 family; 4870*4882a593Smuzhiyun 4871*4882a593Smuzhiyun /* set if lookup is to consider L4 data - e.g., FIB rules */ 4872*4882a593Smuzhiyun __u8 l4_protocol; 4873*4882a593Smuzhiyun __be16 sport; 4874*4882a593Smuzhiyun __be16 dport; 4875*4882a593Smuzhiyun 4876*4882a593Smuzhiyun /* total length of packet from network header - used for MTU check */ 4877*4882a593Smuzhiyun __u16 tot_len; 4878*4882a593Smuzhiyun 4879*4882a593Smuzhiyun /* input: L3 device index for lookup 4880*4882a593Smuzhiyun * output: device index from FIB lookup 4881*4882a593Smuzhiyun */ 4882*4882a593Smuzhiyun __u32 ifindex; 4883*4882a593Smuzhiyun 4884*4882a593Smuzhiyun union { 4885*4882a593Smuzhiyun /* inputs to lookup */ 4886*4882a593Smuzhiyun __u8 tos; /* AF_INET */ 4887*4882a593Smuzhiyun __be32 flowinfo; /* AF_INET6, flow_label + priority */ 4888*4882a593Smuzhiyun 4889*4882a593Smuzhiyun /* output: metric of fib result (IPv4/IPv6 only) */ 4890*4882a593Smuzhiyun __u32 rt_metric; 4891*4882a593Smuzhiyun }; 4892*4882a593Smuzhiyun 4893*4882a593Smuzhiyun union { 4894*4882a593Smuzhiyun __be32 ipv4_src; 4895*4882a593Smuzhiyun __u32 ipv6_src[4]; /* in6_addr; network order */ 4896*4882a593Smuzhiyun }; 4897*4882a593Smuzhiyun 4898*4882a593Smuzhiyun /* input to bpf_fib_lookup, ipv{4,6}_dst is destination address in 4899*4882a593Smuzhiyun * network header. output: bpf_fib_lookup sets to gateway address 4900*4882a593Smuzhiyun * if FIB lookup returns gateway route 4901*4882a593Smuzhiyun */ 4902*4882a593Smuzhiyun union { 4903*4882a593Smuzhiyun __be32 ipv4_dst; 4904*4882a593Smuzhiyun __u32 ipv6_dst[4]; /* in6_addr; network order */ 4905*4882a593Smuzhiyun }; 4906*4882a593Smuzhiyun 4907*4882a593Smuzhiyun /* output */ 4908*4882a593Smuzhiyun __be16 h_vlan_proto; 4909*4882a593Smuzhiyun __be16 h_vlan_TCI; 4910*4882a593Smuzhiyun __u8 smac[6]; /* ETH_ALEN */ 4911*4882a593Smuzhiyun __u8 dmac[6]; /* ETH_ALEN */ 4912*4882a593Smuzhiyun }; 4913*4882a593Smuzhiyun 4914*4882a593Smuzhiyun struct bpf_redir_neigh { 4915*4882a593Smuzhiyun /* network family for lookup (AF_INET, AF_INET6) */ 4916*4882a593Smuzhiyun __u32 nh_family; 4917*4882a593Smuzhiyun /* network address of nexthop; skips fib lookup to find gateway */ 4918*4882a593Smuzhiyun union { 4919*4882a593Smuzhiyun __be32 ipv4_nh; 4920*4882a593Smuzhiyun __u32 ipv6_nh[4]; /* in6_addr; network order */ 4921*4882a593Smuzhiyun }; 4922*4882a593Smuzhiyun }; 4923*4882a593Smuzhiyun 4924*4882a593Smuzhiyun enum bpf_task_fd_type { 4925*4882a593Smuzhiyun BPF_FD_TYPE_RAW_TRACEPOINT, /* tp name */ 4926*4882a593Smuzhiyun BPF_FD_TYPE_TRACEPOINT, /* tp name */ 4927*4882a593Smuzhiyun BPF_FD_TYPE_KPROBE, /* (symbol + offset) or addr */ 4928*4882a593Smuzhiyun BPF_FD_TYPE_KRETPROBE, /* (symbol + offset) or addr */ 4929*4882a593Smuzhiyun BPF_FD_TYPE_UPROBE, /* filename + offset */ 4930*4882a593Smuzhiyun BPF_FD_TYPE_URETPROBE, /* filename + offset */ 4931*4882a593Smuzhiyun }; 4932*4882a593Smuzhiyun 4933*4882a593Smuzhiyun enum { 4934*4882a593Smuzhiyun BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = (1U << 0), 4935*4882a593Smuzhiyun BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = (1U << 1), 4936*4882a593Smuzhiyun BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = (1U << 2), 4937*4882a593Smuzhiyun }; 4938*4882a593Smuzhiyun 4939*4882a593Smuzhiyun struct bpf_flow_keys { 4940*4882a593Smuzhiyun __u16 nhoff; 4941*4882a593Smuzhiyun __u16 thoff; 4942*4882a593Smuzhiyun __u16 addr_proto; /* ETH_P_* of valid addrs */ 4943*4882a593Smuzhiyun __u8 is_frag; 4944*4882a593Smuzhiyun __u8 is_first_frag; 4945*4882a593Smuzhiyun __u8 is_encap; 4946*4882a593Smuzhiyun __u8 ip_proto; 4947*4882a593Smuzhiyun __be16 n_proto; 4948*4882a593Smuzhiyun __be16 sport; 4949*4882a593Smuzhiyun __be16 dport; 4950*4882a593Smuzhiyun union { 4951*4882a593Smuzhiyun struct { 4952*4882a593Smuzhiyun __be32 ipv4_src; 4953*4882a593Smuzhiyun __be32 ipv4_dst; 4954*4882a593Smuzhiyun }; 4955*4882a593Smuzhiyun struct { 4956*4882a593Smuzhiyun __u32 ipv6_src[4]; /* in6_addr; network order */ 4957*4882a593Smuzhiyun __u32 ipv6_dst[4]; /* in6_addr; network order */ 4958*4882a593Smuzhiyun }; 4959*4882a593Smuzhiyun }; 4960*4882a593Smuzhiyun __u32 flags; 4961*4882a593Smuzhiyun __be32 flow_label; 4962*4882a593Smuzhiyun }; 4963*4882a593Smuzhiyun 4964*4882a593Smuzhiyun struct bpf_func_info { 4965*4882a593Smuzhiyun __u32 insn_off; 4966*4882a593Smuzhiyun __u32 type_id; 4967*4882a593Smuzhiyun }; 4968*4882a593Smuzhiyun 4969*4882a593Smuzhiyun #define BPF_LINE_INFO_LINE_NUM(line_col) ((line_col) >> 10) 4970*4882a593Smuzhiyun #define BPF_LINE_INFO_LINE_COL(line_col) ((line_col) & 0x3ff) 4971*4882a593Smuzhiyun 4972*4882a593Smuzhiyun struct bpf_line_info { 4973*4882a593Smuzhiyun __u32 insn_off; 4974*4882a593Smuzhiyun __u32 file_name_off; 4975*4882a593Smuzhiyun __u32 line_off; 4976*4882a593Smuzhiyun __u32 line_col; 4977*4882a593Smuzhiyun }; 4978*4882a593Smuzhiyun 4979*4882a593Smuzhiyun struct bpf_spin_lock { 4980*4882a593Smuzhiyun __u32 val; 4981*4882a593Smuzhiyun }; 4982*4882a593Smuzhiyun 4983*4882a593Smuzhiyun struct bpf_sysctl { 4984*4882a593Smuzhiyun __u32 write; /* Sysctl is being read (= 0) or written (= 1). 4985*4882a593Smuzhiyun * Allows 1,2,4-byte read, but no write. 4986*4882a593Smuzhiyun */ 4987*4882a593Smuzhiyun __u32 file_pos; /* Sysctl file position to read from, write to. 4988*4882a593Smuzhiyun * Allows 1,2,4-byte read an 4-byte write. 4989*4882a593Smuzhiyun */ 4990*4882a593Smuzhiyun }; 4991*4882a593Smuzhiyun 4992*4882a593Smuzhiyun struct bpf_sockopt { 4993*4882a593Smuzhiyun __bpf_md_ptr(struct bpf_sock *, sk); 4994*4882a593Smuzhiyun __bpf_md_ptr(void *, optval); 4995*4882a593Smuzhiyun __bpf_md_ptr(void *, optval_end); 4996*4882a593Smuzhiyun 4997*4882a593Smuzhiyun __s32 level; 4998*4882a593Smuzhiyun __s32 optname; 4999*4882a593Smuzhiyun __s32 optlen; 5000*4882a593Smuzhiyun __s32 retval; 5001*4882a593Smuzhiyun }; 5002*4882a593Smuzhiyun 5003*4882a593Smuzhiyun struct bpf_pidns_info { 5004*4882a593Smuzhiyun __u32 pid; 5005*4882a593Smuzhiyun __u32 tgid; 5006*4882a593Smuzhiyun }; 5007*4882a593Smuzhiyun 5008*4882a593Smuzhiyun /* User accessible data for SK_LOOKUP programs. Add new fields at the end. */ 5009*4882a593Smuzhiyun struct bpf_sk_lookup { 5010*4882a593Smuzhiyun union { 5011*4882a593Smuzhiyun __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */ 5012*4882a593Smuzhiyun __u64 cookie; /* Non-zero if socket was selected in PROG_TEST_RUN */ 5013*4882a593Smuzhiyun }; 5014*4882a593Smuzhiyun 5015*4882a593Smuzhiyun __u32 family; /* Protocol family (AF_INET, AF_INET6) */ 5016*4882a593Smuzhiyun __u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */ 5017*4882a593Smuzhiyun __u32 remote_ip4; /* Network byte order */ 5018*4882a593Smuzhiyun __u32 remote_ip6[4]; /* Network byte order */ 5019*4882a593Smuzhiyun __u32 remote_port; /* Network byte order */ 5020*4882a593Smuzhiyun __u32 local_ip4; /* Network byte order */ 5021*4882a593Smuzhiyun __u32 local_ip6[4]; /* Network byte order */ 5022*4882a593Smuzhiyun __u32 local_port; /* Host byte order */ 5023*4882a593Smuzhiyun }; 5024*4882a593Smuzhiyun 5025*4882a593Smuzhiyun /* 5026*4882a593Smuzhiyun * struct btf_ptr is used for typed pointer representation; the 5027*4882a593Smuzhiyun * type id is used to render the pointer data as the appropriate type 5028*4882a593Smuzhiyun * via the bpf_snprintf_btf() helper described above. A flags field - 5029*4882a593Smuzhiyun * potentially to specify additional details about the BTF pointer 5030*4882a593Smuzhiyun * (rather than its mode of display) - is included for future use. 5031*4882a593Smuzhiyun * Display flags - BTF_F_* - are passed to bpf_snprintf_btf separately. 5032*4882a593Smuzhiyun */ 5033*4882a593Smuzhiyun struct btf_ptr { 5034*4882a593Smuzhiyun void *ptr; 5035*4882a593Smuzhiyun __u32 type_id; 5036*4882a593Smuzhiyun __u32 flags; /* BTF ptr flags; unused at present. */ 5037*4882a593Smuzhiyun }; 5038*4882a593Smuzhiyun 5039*4882a593Smuzhiyun /* 5040*4882a593Smuzhiyun * Flags to control bpf_snprintf_btf() behaviour. 5041*4882a593Smuzhiyun * - BTF_F_COMPACT: no formatting around type information 5042*4882a593Smuzhiyun * - BTF_F_NONAME: no struct/union member names/types 5043*4882a593Smuzhiyun * - BTF_F_PTR_RAW: show raw (unobfuscated) pointer values; 5044*4882a593Smuzhiyun * equivalent to %px. 5045*4882a593Smuzhiyun * - BTF_F_ZERO: show zero-valued struct/union members; they 5046*4882a593Smuzhiyun * are not displayed by default 5047*4882a593Smuzhiyun */ 5048*4882a593Smuzhiyun enum { 5049*4882a593Smuzhiyun BTF_F_COMPACT = (1ULL << 0), 5050*4882a593Smuzhiyun BTF_F_NONAME = (1ULL << 1), 5051*4882a593Smuzhiyun BTF_F_PTR_RAW = (1ULL << 2), 5052*4882a593Smuzhiyun BTF_F_ZERO = (1ULL << 3), 5053*4882a593Smuzhiyun }; 5054*4882a593Smuzhiyun 5055*4882a593Smuzhiyun #endif /* _UAPI__LINUX_BPF_H__ */ 5056