Lines Matching +full:path +full:- +full:map
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
71 #pragma GCC diagnostic ignored "-Wformat-nonliteral"
116 if (err != -EPERM || geteuid() != 0) in pr_perm_msg()
133 pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n", in pr_perm_msg()
149 fd = -1; \
159 /* v4.14: kernel support for program & map names. */
235 * program. For the entry-point (main) BPF program, this is always
236 * zero. For a sub-program, this gets reset before each of main BPF
238 * whether sub-program was already appended to the main program, and
256 * entry-point BPF programs this includes the size of main program
257 * itself plus all the used sub-programs, appended at the end
304 * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops)
474 char path[]; member
476 #define obj_elf_valid(o) ((o)->efile.elf)
497 * it is possible that prog->instances.nr == -1. in bpf_program__unload()
499 if (prog->instances.nr > 0) { in bpf_program__unload()
500 for (i = 0; i < prog->instances.nr; i++) in bpf_program__unload()
501 zclose(prog->instances.fds[i]); in bpf_program__unload()
502 } else if (prog->instances.nr != -1) { in bpf_program__unload()
504 prog->instances.nr); in bpf_program__unload()
507 prog->instances.nr = -1; in bpf_program__unload()
508 zfree(&prog->instances.fds); in bpf_program__unload()
510 zfree(&prog->func_info); in bpf_program__unload()
511 zfree(&prog->line_info); in bpf_program__unload()
519 if (prog->clear_priv) in bpf_program__exit()
520 prog->clear_priv(prog, prog->priv); in bpf_program__exit()
522 prog->priv = NULL; in bpf_program__exit()
523 prog->clear_priv = NULL; in bpf_program__exit()
526 zfree(&prog->name); in bpf_program__exit()
527 zfree(&prog->sec_name); in bpf_program__exit()
528 zfree(&prog->pin_name); in bpf_program__exit()
529 zfree(&prog->insns); in bpf_program__exit()
530 zfree(&prog->reloc_desc); in bpf_program__exit()
532 prog->nr_reloc = 0; in bpf_program__exit()
533 prog->insns_cnt = 0; in bpf_program__exit()
534 prog->sec_idx = -1; in bpf_program__exit()
541 name = p = strdup(prog->sec_name); in __bpf_program__pin_name()
550 return BPF_CLASS(insn->code) == BPF_JMP && in insn_is_subprog_call()
551 BPF_OP(insn->code) == BPF_CALL && in insn_is_subprog_call()
552 BPF_SRC(insn->code) == BPF_K && in insn_is_subprog_call()
553 insn->src_reg == BPF_PSEUDO_CALL && in insn_is_subprog_call()
554 insn->dst_reg == 0 && in insn_is_subprog_call()
555 insn->off == 0; in insn_is_subprog_call()
566 return -EINVAL; in bpf_object__init_prog()
570 prog->obj = obj; in bpf_object__init_prog()
572 prog->sec_idx = sec_idx; in bpf_object__init_prog()
573 prog->sec_insn_off = sec_off / BPF_INSN_SZ; in bpf_object__init_prog()
574 prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ; in bpf_object__init_prog()
576 prog->insns_cnt = prog->sec_insn_cnt; in bpf_object__init_prog()
578 prog->type = BPF_PROG_TYPE_UNSPEC; in bpf_object__init_prog()
579 prog->load = true; in bpf_object__init_prog()
581 prog->instances.fds = NULL; in bpf_object__init_prog()
582 prog->instances.nr = -1; in bpf_object__init_prog()
584 prog->sec_name = strdup(sec_name); in bpf_object__init_prog()
585 if (!prog->sec_name) in bpf_object__init_prog()
588 prog->name = strdup(name); in bpf_object__init_prog()
589 if (!prog->name) in bpf_object__init_prog()
592 prog->pin_name = __bpf_program__pin_name(prog); in bpf_object__init_prog()
593 if (!prog->pin_name) in bpf_object__init_prog()
596 prog->insns = malloc(insn_data_sz); in bpf_object__init_prog()
597 if (!prog->insns) in bpf_object__init_prog()
599 memcpy(prog->insns, insn_data, insn_data_sz); in bpf_object__init_prog()
605 return -ENOMEM; in bpf_object__init_prog()
613 void *data = sec_data->d_buf; in bpf_object__add_programs()
614 size_t sec_sz = sec_data->d_size, sec_off, prog_sz; in bpf_object__add_programs()
619 progs = obj->programs; in bpf_object__add_programs()
620 nr_progs = obj->nr_programs; in bpf_object__add_programs()
627 return -LIBBPF_ERRNO__FORMAT; in bpf_object__add_programs()
636 return -LIBBPF_ERRNO__FORMAT; in bpf_object__add_programs()
642 return -LIBBPF_ERRNO__FORMAT; in bpf_object__add_programs()
651 * In this case the original obj->programs in bpf_object__add_programs()
657 return -ENOMEM; in bpf_object__add_programs()
659 obj->programs = progs; in bpf_object__add_programs()
669 obj->nr_programs = nr_progs; in bpf_object__add_programs()
710 if (!strcmp(btf__name_by_offset(btf, m->name_off), name)) in find_member_by_name()
762 if (kern_data_member->type == kern_type_id) in find_struct_ops_kern_types()
768 return -EINVAL; in find_struct_ops_kern_types()
780 static bool bpf_map__is_struct_ops(const struct bpf_map *map) in bpf_map__is_struct_ops() argument
782 return map->def.type == BPF_MAP_TYPE_STRUCT_OPS; in bpf_map__is_struct_ops()
785 /* Init the map's fields that depend on kern_btf */
786 static int bpf_map__init_kern_struct_ops(struct bpf_map *map, in bpf_map__init_kern_struct_ops() argument
798 st_ops = map->st_ops; in bpf_map__init_kern_struct_ops()
799 type = st_ops->type; in bpf_map__init_kern_struct_ops()
800 tname = st_ops->tname; in bpf_map__init_kern_struct_ops()
809 map->name, st_ops->type_id, kern_type_id, kern_vtype_id); in bpf_map__init_kern_struct_ops()
811 map->def.value_size = kern_vtype->size; in bpf_map__init_kern_struct_ops()
812 map->btf_vmlinux_value_type_id = kern_vtype_id; in bpf_map__init_kern_struct_ops()
814 st_ops->kern_vdata = calloc(1, kern_vtype->size); in bpf_map__init_kern_struct_ops()
815 if (!st_ops->kern_vdata) in bpf_map__init_kern_struct_ops()
816 return -ENOMEM; in bpf_map__init_kern_struct_ops()
818 data = st_ops->data; in bpf_map__init_kern_struct_ops()
819 kern_data_off = kern_data_member->offset / 8; in bpf_map__init_kern_struct_ops()
820 kern_data = st_ops->kern_vdata + kern_data_off; in bpf_map__init_kern_struct_ops()
832 mname = btf__name_by_offset(btf, member->name_off); in bpf_map__init_kern_struct_ops()
836 map->name, mname); in bpf_map__init_kern_struct_ops()
837 return -ENOTSUP; in bpf_map__init_kern_struct_ops()
840 kern_member_idx = kern_member - btf_members(kern_type); in bpf_map__init_kern_struct_ops()
844 map->name, mname); in bpf_map__init_kern_struct_ops()
845 return -ENOTSUP; in bpf_map__init_kern_struct_ops()
848 moff = member->offset / 8; in bpf_map__init_kern_struct_ops()
849 kern_moff = kern_member->offset / 8; in bpf_map__init_kern_struct_ops()
854 mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id); in bpf_map__init_kern_struct_ops()
855 kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type, in bpf_map__init_kern_struct_ops()
857 if (BTF_INFO_KIND(mtype->info) != in bpf_map__init_kern_struct_ops()
858 BTF_INFO_KIND(kern_mtype->info)) { in bpf_map__init_kern_struct_ops()
860 map->name, mname, BTF_INFO_KIND(mtype->info), in bpf_map__init_kern_struct_ops()
861 BTF_INFO_KIND(kern_mtype->info)); in bpf_map__init_kern_struct_ops()
862 return -ENOTSUP; in bpf_map__init_kern_struct_ops()
868 prog = st_ops->progs[i]; in bpf_map__init_kern_struct_ops()
873 kern_mtype->type, in bpf_map__init_kern_struct_ops()
876 /* mtype->type must be a func_proto which was in bpf_map__init_kern_struct_ops()
882 map->name, mname); in bpf_map__init_kern_struct_ops()
883 return -ENOTSUP; in bpf_map__init_kern_struct_ops()
886 prog->attach_btf_id = kern_type_id; in bpf_map__init_kern_struct_ops()
887 prog->expected_attach_type = kern_member_idx; in bpf_map__init_kern_struct_ops()
889 st_ops->kern_func_off[i] = kern_data_off + kern_moff; in bpf_map__init_kern_struct_ops()
892 map->name, mname, prog->name, moff, in bpf_map__init_kern_struct_ops()
902 map->name, mname, (ssize_t)msize, in bpf_map__init_kern_struct_ops()
904 return -ENOTSUP; in bpf_map__init_kern_struct_ops()
908 map->name, mname, (unsigned int)msize, in bpf_map__init_kern_struct_ops()
918 struct bpf_map *map; in bpf_object__init_kern_struct_ops_maps() local
922 for (i = 0; i < obj->nr_maps; i++) { in bpf_object__init_kern_struct_ops_maps()
923 map = &obj->maps[i]; in bpf_object__init_kern_struct_ops_maps()
925 if (!bpf_map__is_struct_ops(map)) in bpf_object__init_kern_struct_ops_maps()
928 err = bpf_map__init_kern_struct_ops(map, obj->btf, in bpf_object__init_kern_struct_ops_maps()
929 obj->btf_vmlinux); in bpf_object__init_kern_struct_ops_maps()
945 struct bpf_map *map; in bpf_object__init_struct_ops_maps() local
948 if (obj->efile.st_ops_shndx == -1) in bpf_object__init_struct_ops_maps()
951 btf = obj->btf; in bpf_object__init_struct_ops_maps()
957 return -EINVAL; in bpf_object__init_struct_ops_maps()
963 type = btf__type_by_id(obj->btf, vsi->type); in bpf_object__init_struct_ops_maps()
964 var_name = btf__name_by_offset(obj->btf, type->name_off); in bpf_object__init_struct_ops_maps()
966 type_id = btf__resolve_type(obj->btf, vsi->type); in bpf_object__init_struct_ops_maps()
969 vsi->type, STRUCT_OPS_SEC); in bpf_object__init_struct_ops_maps()
970 return -EINVAL; in bpf_object__init_struct_ops_maps()
973 type = btf__type_by_id(obj->btf, type_id); in bpf_object__init_struct_ops_maps()
974 tname = btf__name_by_offset(obj->btf, type->name_off); in bpf_object__init_struct_ops_maps()
977 return -ENOTSUP; in bpf_object__init_struct_ops_maps()
981 return -EINVAL; in bpf_object__init_struct_ops_maps()
984 map = bpf_object__add_map(obj); in bpf_object__init_struct_ops_maps()
985 if (IS_ERR(map)) in bpf_object__init_struct_ops_maps()
986 return PTR_ERR(map); in bpf_object__init_struct_ops_maps()
988 map->sec_idx = obj->efile.st_ops_shndx; in bpf_object__init_struct_ops_maps()
989 map->sec_offset = vsi->offset; in bpf_object__init_struct_ops_maps()
990 map->name = strdup(var_name); in bpf_object__init_struct_ops_maps()
991 if (!map->name) in bpf_object__init_struct_ops_maps()
992 return -ENOMEM; in bpf_object__init_struct_ops_maps()
994 map->def.type = BPF_MAP_TYPE_STRUCT_OPS; in bpf_object__init_struct_ops_maps()
995 map->def.key_size = sizeof(int); in bpf_object__init_struct_ops_maps()
996 map->def.value_size = type->size; in bpf_object__init_struct_ops_maps()
997 map->def.max_entries = 1; in bpf_object__init_struct_ops_maps()
999 map->st_ops = calloc(1, sizeof(*map->st_ops)); in bpf_object__init_struct_ops_maps()
1000 if (!map->st_ops) in bpf_object__init_struct_ops_maps()
1001 return -ENOMEM; in bpf_object__init_struct_ops_maps()
1002 st_ops = map->st_ops; in bpf_object__init_struct_ops_maps()
1003 st_ops->data = malloc(type->size); in bpf_object__init_struct_ops_maps()
1004 st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs)); in bpf_object__init_struct_ops_maps()
1005 st_ops->kern_func_off = malloc(btf_vlen(type) * in bpf_object__init_struct_ops_maps()
1006 sizeof(*st_ops->kern_func_off)); in bpf_object__init_struct_ops_maps()
1007 if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off) in bpf_object__init_struct_ops_maps()
1008 return -ENOMEM; in bpf_object__init_struct_ops_maps()
1010 if (vsi->offset + type->size > obj->efile.st_ops_data->d_size) { in bpf_object__init_struct_ops_maps()
1013 return -EINVAL; in bpf_object__init_struct_ops_maps()
1016 memcpy(st_ops->data, in bpf_object__init_struct_ops_maps()
1017 obj->efile.st_ops_data->d_buf + vsi->offset, in bpf_object__init_struct_ops_maps()
1018 type->size); in bpf_object__init_struct_ops_maps()
1019 st_ops->tname = tname; in bpf_object__init_struct_ops_maps()
1020 st_ops->type = type; in bpf_object__init_struct_ops_maps()
1021 st_ops->type_id = type_id; in bpf_object__init_struct_ops_maps()
1024 tname, type_id, var_name, vsi->offset); in bpf_object__init_struct_ops_maps()
1030 static struct bpf_object *bpf_object__new(const char *path, in bpf_object__new() argument
1038 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1); in bpf_object__new()
1040 pr_warn("alloc memory failed for %s\n", path); in bpf_object__new()
1041 return ERR_PTR(-ENOMEM); in bpf_object__new()
1044 strcpy(obj->path, path); in bpf_object__new()
1046 strncpy(obj->name, obj_name, sizeof(obj->name) - 1); in bpf_object__new()
1047 obj->name[sizeof(obj->name) - 1] = 0; in bpf_object__new()
1050 strncpy(obj->name, basename((void *)path), in bpf_object__new()
1051 sizeof(obj->name) - 1); in bpf_object__new()
1052 end = strchr(obj->name, '.'); in bpf_object__new()
1057 obj->efile.fd = -1; in bpf_object__new()
1064 obj->efile.obj_buf = obj_buf; in bpf_object__new()
1065 obj->efile.obj_buf_sz = obj_buf_sz; in bpf_object__new()
1066 obj->efile.maps_shndx = -1; in bpf_object__new()
1067 obj->efile.btf_maps_shndx = -1; in bpf_object__new()
1068 obj->efile.data_shndx = -1; in bpf_object__new()
1069 obj->efile.rodata_shndx = -1; in bpf_object__new()
1070 obj->efile.bss_shndx = -1; in bpf_object__new()
1071 obj->efile.st_ops_shndx = -1; in bpf_object__new()
1072 obj->kconfig_map_idx = -1; in bpf_object__new()
1073 obj->rodata_map_idx = -1; in bpf_object__new()
1075 obj->kern_version = get_kernel_version(); in bpf_object__new()
1076 obj->loaded = false; in bpf_object__new()
1078 INIT_LIST_HEAD(&obj->list); in bpf_object__new()
1079 list_add(&obj->list, &bpf_objects_list); in bpf_object__new()
1088 if (obj->efile.elf) { in bpf_object__elf_finish()
1089 elf_end(obj->efile.elf); in bpf_object__elf_finish()
1090 obj->efile.elf = NULL; in bpf_object__elf_finish()
1092 obj->efile.symbols = NULL; in bpf_object__elf_finish()
1093 obj->efile.data = NULL; in bpf_object__elf_finish()
1094 obj->efile.rodata = NULL; in bpf_object__elf_finish()
1095 obj->efile.bss = NULL; in bpf_object__elf_finish()
1096 obj->efile.st_ops_data = NULL; in bpf_object__elf_finish()
1098 zfree(&obj->efile.reloc_sects); in bpf_object__elf_finish()
1099 obj->efile.nr_reloc_sects = 0; in bpf_object__elf_finish()
1100 zclose(obj->efile.fd); in bpf_object__elf_finish()
1101 obj->efile.obj_buf = NULL; in bpf_object__elf_finish()
1102 obj->efile.obj_buf_sz = 0; in bpf_object__elf_finish()
1117 return -LIBBPF_ERRNO__LIBELF; in bpf_object__elf_init()
1120 if (obj->efile.obj_buf_sz > 0) { in bpf_object__elf_init()
1125 obj->efile.elf = elf_memory((char *)obj->efile.obj_buf, in bpf_object__elf_init()
1126 obj->efile.obj_buf_sz); in bpf_object__elf_init()
1128 obj->efile.fd = open(obj->path, O_RDONLY); in bpf_object__elf_init()
1129 if (obj->efile.fd < 0) { in bpf_object__elf_init()
1132 err = -errno; in bpf_object__elf_init()
1134 pr_warn("elf: failed to open %s: %s\n", obj->path, cp); in bpf_object__elf_init()
1138 obj->efile.elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL); in bpf_object__elf_init()
1141 if (!obj->efile.elf) { in bpf_object__elf_init()
1142 pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1)); in bpf_object__elf_init()
1143 err = -LIBBPF_ERRNO__LIBELF; in bpf_object__elf_init()
1147 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) { in bpf_object__elf_init()
1148 pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1)); in bpf_object__elf_init()
1149 err = -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_init()
1152 ep = &obj->efile.ehdr; in bpf_object__elf_init()
1154 if (elf_getshdrstrndx(obj->efile.elf, &obj->efile.shstrndx)) { in bpf_object__elf_init()
1156 obj->path, elf_errmsg(-1)); in bpf_object__elf_init()
1157 err = -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_init()
1162 if (!elf_rawdata(elf_getscn(obj->efile.elf, obj->efile.shstrndx), NULL)) { in bpf_object__elf_init()
1164 obj->path, elf_errmsg(-1)); in bpf_object__elf_init()
1165 err = -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_init()
1170 if (ep->e_type != ET_REL || in bpf_object__elf_init()
1171 (ep->e_machine && ep->e_machine != EM_BPF)) { in bpf_object__elf_init()
1172 pr_warn("elf: %s is not a valid eBPF object file\n", obj->path); in bpf_object__elf_init()
1173 err = -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_init()
1186 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB) in bpf_object__check_endianness()
1189 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB) in bpf_object__check_endianness()
1194 pr_warn("elf: endianness mismatch in %s.\n", obj->path); in bpf_object__check_endianness()
1195 return -LIBBPF_ERRNO__ENDIAN; in bpf_object__check_endianness()
1201 memcpy(obj->license, data, min(size, sizeof(obj->license) - 1)); in bpf_object__init_license()
1202 pr_debug("license of %s is %s\n", obj->path, obj->license); in bpf_object__init_license()
1212 pr_warn("invalid kver section in %s\n", obj->path); in bpf_object__init_kversion()
1213 return -LIBBPF_ERRNO__FORMAT; in bpf_object__init_kversion()
1216 obj->kern_version = kver; in bpf_object__init_kversion()
1217 pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version); in bpf_object__init_kversion()
1232 int ret = -ENOENT; in bpf_object__section_size()
1236 return -EINVAL; in bpf_object__section_size()
1238 if (obj->efile.data) in bpf_object__section_size()
1239 *size = obj->efile.data->d_size; in bpf_object__section_size()
1241 if (obj->efile.bss) in bpf_object__section_size()
1242 *size = obj->efile.bss->d_size; in bpf_object__section_size()
1244 if (obj->efile.rodata) in bpf_object__section_size()
1245 *size = obj->efile.rodata->d_size; in bpf_object__section_size()
1247 if (obj->efile.st_ops_data) in bpf_object__section_size()
1248 *size = obj->efile.st_ops_data->d_size; in bpf_object__section_size()
1255 *size = data->d_size; in bpf_object__section_size()
1265 Elf_Data *symbols = obj->efile.symbols; in bpf_object__variable_offset()
1270 return -EINVAL; in bpf_object__variable_offset()
1272 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) { in bpf_object__variable_offset()
1285 return -EIO; in bpf_object__variable_offset()
1293 return -ENOENT; in bpf_object__variable_offset()
1302 if (obj->nr_maps < obj->maps_cap) in bpf_object__add_map()
1303 return &obj->maps[obj->nr_maps++]; in bpf_object__add_map()
1305 new_cap = max((size_t)4, obj->maps_cap * 3 / 2); in bpf_object__add_map()
1306 new_maps = libbpf_reallocarray(obj->maps, new_cap, sizeof(*obj->maps)); in bpf_object__add_map()
1309 return ERR_PTR(-ENOMEM); in bpf_object__add_map()
1312 obj->maps_cap = new_cap; in bpf_object__add_map()
1313 obj->maps = new_maps; in bpf_object__add_map()
1316 memset(obj->maps + obj->nr_maps, 0, in bpf_object__add_map()
1317 (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps)); in bpf_object__add_map()
1319 * fill all fd with -1 so won't close incorrect fd (fd=0 is stdin) in bpf_object__add_map()
1322 for (i = obj->nr_maps; i < obj->maps_cap; i++) { in bpf_object__add_map()
1323 obj->maps[i].fd = -1; in bpf_object__add_map()
1324 obj->maps[i].inner_map_fd = -1; in bpf_object__add_map()
1327 return &obj->maps[obj->nr_maps++]; in bpf_object__add_map()
1330 static size_t bpf_map_mmap_sz(const struct bpf_map *map) in bpf_map_mmap_sz() argument
1335 map_sz = (size_t)roundup(map->def.value_size, 8) * map->def.max_entries; in bpf_map_mmap_sz()
1346 int pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, in internal_map_name()
1347 strlen(obj->name)); in internal_map_name()
1349 snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name, in internal_map_name()
1352 /* sanitise map name to characters allowed by kernel */ in internal_map_name()
1365 struct bpf_map *map; in bpf_object__init_internal_map() local
1368 map = bpf_object__add_map(obj); in bpf_object__init_internal_map()
1369 if (IS_ERR(map)) in bpf_object__init_internal_map()
1370 return PTR_ERR(map); in bpf_object__init_internal_map()
1372 map->libbpf_type = type; in bpf_object__init_internal_map()
1373 map->sec_idx = sec_idx; in bpf_object__init_internal_map()
1374 map->sec_offset = 0; in bpf_object__init_internal_map()
1375 map->name = internal_map_name(obj, type); in bpf_object__init_internal_map()
1376 if (!map->name) { in bpf_object__init_internal_map()
1377 pr_warn("failed to alloc map name\n"); in bpf_object__init_internal_map()
1378 return -ENOMEM; in bpf_object__init_internal_map()
1381 def = &map->def; in bpf_object__init_internal_map()
1382 def->type = BPF_MAP_TYPE_ARRAY; in bpf_object__init_internal_map()
1383 def->key_size = sizeof(int); in bpf_object__init_internal_map()
1384 def->value_size = data_sz; in bpf_object__init_internal_map()
1385 def->max_entries = 1; in bpf_object__init_internal_map()
1386 def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG in bpf_object__init_internal_map()
1388 def->map_flags |= BPF_F_MMAPABLE; in bpf_object__init_internal_map()
1390 pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n", in bpf_object__init_internal_map()
1391 map->name, map->sec_idx, map->sec_offset, def->map_flags); in bpf_object__init_internal_map()
1393 map->mmaped = mmap(NULL, bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE, in bpf_object__init_internal_map()
1394 MAP_SHARED | MAP_ANONYMOUS, -1, 0); in bpf_object__init_internal_map()
1395 if (map->mmaped == MAP_FAILED) { in bpf_object__init_internal_map()
1396 err = -errno; in bpf_object__init_internal_map()
1397 map->mmaped = NULL; in bpf_object__init_internal_map()
1398 pr_warn("failed to alloc map '%s' content buffer: %d\n", in bpf_object__init_internal_map()
1399 map->name, err); in bpf_object__init_internal_map()
1400 zfree(&map->name); in bpf_object__init_internal_map()
1405 memcpy(map->mmaped, data, data_sz); in bpf_object__init_internal_map()
1407 pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name); in bpf_object__init_internal_map()
1416 * Populate obj->maps with libbpf internal maps. in bpf_object__init_global_data_maps()
1418 if (obj->efile.data_shndx >= 0) { in bpf_object__init_global_data_maps()
1420 obj->efile.data_shndx, in bpf_object__init_global_data_maps()
1421 obj->efile.data->d_buf, in bpf_object__init_global_data_maps()
1422 obj->efile.data->d_size); in bpf_object__init_global_data_maps()
1426 if (obj->efile.rodata_shndx >= 0) { in bpf_object__init_global_data_maps()
1428 obj->efile.rodata_shndx, in bpf_object__init_global_data_maps()
1429 obj->efile.rodata->d_buf, in bpf_object__init_global_data_maps()
1430 obj->efile.rodata->d_size); in bpf_object__init_global_data_maps()
1434 obj->rodata_map_idx = obj->nr_maps - 1; in bpf_object__init_global_data_maps()
1436 if (obj->efile.bss_shndx >= 0) { in bpf_object__init_global_data_maps()
1438 obj->efile.bss_shndx, in bpf_object__init_global_data_maps()
1440 obj->efile.bss->d_size); in bpf_object__init_global_data_maps()
1453 for (i = 0; i < obj->nr_extern; i++) { in find_extern_by_name()
1454 if (strcmp(obj->externs[i].name, name) == 0) in find_extern_by_name()
1455 return &obj->externs[i]; in find_extern_by_name()
1463 switch (ext->kcfg.type) { in set_kcfg_value_tri()
1467 ext->name, value); in set_kcfg_value_tri()
1468 return -EINVAL; in set_kcfg_value_tri()
1488 ext->name, value); in set_kcfg_value_tri()
1489 return -EINVAL; in set_kcfg_value_tri()
1491 ext->is_set = true; in set_kcfg_value_tri()
1500 if (ext->kcfg.type != KCFG_CHAR_ARR) { in set_kcfg_value_str()
1501 pr_warn("extern (kcfg) %s=%s should be char array\n", ext->name, value); in set_kcfg_value_str()
1502 return -EINVAL; in set_kcfg_value_str()
1506 if (value[len - 1] != '"') { in set_kcfg_value_str()
1508 ext->name, value); in set_kcfg_value_str()
1509 return -EINVAL; in set_kcfg_value_str()
1513 len -= 2; in set_kcfg_value_str()
1514 if (len >= ext->kcfg.sz) { in set_kcfg_value_str()
1516 ext->name, value, len, ext->kcfg.sz - 1); in set_kcfg_value_str()
1517 len = ext->kcfg.sz - 1; in set_kcfg_value_str()
1521 ext->is_set = true; in set_kcfg_value_str()
1533 err = -errno; in parse_u64()
1539 return -EINVAL; in parse_u64()
1546 int bit_sz = ext->kcfg.sz * 8; in is_kcfg_value_in_range()
1548 if (ext->kcfg.sz == 8) in is_kcfg_value_in_range()
1551 /* Validate that value stored in u64 fits in integer of `ext->sz` in is_kcfg_value_in_range()
1556 * -2^(Y-1) <= X <= 2^(Y-1) - 1 in is_kcfg_value_in_range()
1557 * 0 <= X + 2^(Y-1) <= 2^Y - 1 in is_kcfg_value_in_range()
1558 * 0 <= X + 2^(Y-1) < 2^Y in is_kcfg_value_in_range()
1560 * For unsigned target integer, check that all the (64 - Y) bits are in is_kcfg_value_in_range()
1563 if (ext->kcfg.is_signed) in is_kcfg_value_in_range()
1564 return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz); in is_kcfg_value_in_range()
1572 if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) { in set_kcfg_value_num()
1574 ext->name, (unsigned long long)value); in set_kcfg_value_num()
1575 return -EINVAL; in set_kcfg_value_num()
1579 ext->name, (unsigned long long)value, ext->kcfg.sz); in set_kcfg_value_num()
1580 return -ERANGE; in set_kcfg_value_num()
1582 switch (ext->kcfg.sz) { in set_kcfg_value_num()
1588 return -EINVAL; in set_kcfg_value_num()
1590 ext->is_set = true; in set_kcfg_value_num()
1609 return -EINVAL; in bpf_object__process_kconfig_line()
1614 if (buf[len - 1] == '\n') in bpf_object__process_kconfig_line()
1615 buf[len - 1] = '\0'; in bpf_object__process_kconfig_line()
1621 return -EINVAL; in bpf_object__process_kconfig_line()
1625 if (!ext || ext->is_set) in bpf_object__process_kconfig_line()
1628 ext_val = data + ext->kcfg.data_off; in bpf_object__process_kconfig_line()
1643 ext->name, value); in bpf_object__process_kconfig_line()
1651 pr_debug("extern (kcfg) %s=%s\n", ext->name, value); in bpf_object__process_kconfig_line()
1663 len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release); in bpf_object__read_kconfig_file()
1665 return -EINVAL; in bpf_object__read_kconfig_file()
1667 return -ENAMETOOLONG; in bpf_object__read_kconfig_file()
1676 return -ENOENT; in bpf_object__read_kconfig_file()
1702 err = -errno; in bpf_object__read_kconfig_mem()
1703 pr_warn("failed to open in-memory Kconfig: %d\n", err); in bpf_object__read_kconfig_mem()
1710 pr_warn("error parsing in-memory Kconfig line '%s': %d\n", in bpf_object__read_kconfig_mem()
1726 for (i = 0; i < obj->nr_extern; i++) { in bpf_object__init_kconfig_map()
1727 ext = &obj->externs[i]; in bpf_object__init_kconfig_map()
1728 if (ext->type == EXT_KCFG) in bpf_object__init_kconfig_map()
1735 map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz; in bpf_object__init_kconfig_map()
1737 obj->efile.symbols_shndx, in bpf_object__init_kconfig_map()
1742 obj->kconfig_map_idx = obj->nr_maps - 1; in bpf_object__init_kconfig_map()
1749 Elf_Data *symbols = obj->efile.symbols; in bpf_object__init_user_maps()
1754 if (obj->efile.maps_shndx < 0) in bpf_object__init_user_maps()
1758 return -EINVAL; in bpf_object__init_user_maps()
1761 scn = elf_sec_by_idx(obj, obj->efile.maps_shndx); in bpf_object__init_user_maps()
1764 pr_warn("elf: failed to get legacy map definitions for %s\n", in bpf_object__init_user_maps()
1765 obj->path); in bpf_object__init_user_maps()
1766 return -EINVAL; in bpf_object__init_user_maps()
1770 * Count number of maps. Each map has a name. in bpf_object__init_user_maps()
1774 * TODO: Detect array of map and report error. in bpf_object__init_user_maps()
1776 nr_syms = symbols->d_size / sizeof(GElf_Sym); in bpf_object__init_user_maps()
1782 if (sym.st_shndx != obj->efile.maps_shndx) in bpf_object__init_user_maps()
1786 /* Assume equally sized map definitions */ in bpf_object__init_user_maps()
1787 pr_debug("elf: found %d legacy map definitions (%zd bytes) in %s\n", in bpf_object__init_user_maps()
1788 nr_maps, data->d_size, obj->path); in bpf_object__init_user_maps()
1790 if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) { in bpf_object__init_user_maps()
1791 pr_warn("elf: unable to determine legacy map definition size in %s\n", in bpf_object__init_user_maps()
1792 obj->path); in bpf_object__init_user_maps()
1793 return -EINVAL; in bpf_object__init_user_maps()
1795 map_def_sz = data->d_size / nr_maps; in bpf_object__init_user_maps()
1797 /* Fill obj->maps using data in "maps" section. */ in bpf_object__init_user_maps()
1802 struct bpf_map *map; in bpf_object__init_user_maps() local
1806 if (sym.st_shndx != obj->efile.maps_shndx) in bpf_object__init_user_maps()
1809 map = bpf_object__add_map(obj); in bpf_object__init_user_maps()
1810 if (IS_ERR(map)) in bpf_object__init_user_maps()
1811 return PTR_ERR(map); in bpf_object__init_user_maps()
1815 pr_warn("failed to get map #%d name sym string for obj %s\n", in bpf_object__init_user_maps()
1816 i, obj->path); in bpf_object__init_user_maps()
1817 return -LIBBPF_ERRNO__FORMAT; in bpf_object__init_user_maps()
1820 map->libbpf_type = LIBBPF_MAP_UNSPEC; in bpf_object__init_user_maps()
1821 map->sec_idx = sym.st_shndx; in bpf_object__init_user_maps()
1822 map->sec_offset = sym.st_value; in bpf_object__init_user_maps()
1823 pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n", in bpf_object__init_user_maps()
1824 map_name, map->sec_idx, map->sec_offset); in bpf_object__init_user_maps()
1825 if (sym.st_value + map_def_sz > data->d_size) { in bpf_object__init_user_maps()
1826 pr_warn("corrupted maps section in %s: last map \"%s\" too small\n", in bpf_object__init_user_maps()
1827 obj->path, map_name); in bpf_object__init_user_maps()
1828 return -EINVAL; in bpf_object__init_user_maps()
1831 map->name = strdup(map_name); in bpf_object__init_user_maps()
1832 if (!map->name) { in bpf_object__init_user_maps()
1833 pr_warn("failed to alloc map name\n"); in bpf_object__init_user_maps()
1834 return -ENOMEM; in bpf_object__init_user_maps()
1836 pr_debug("map %d is \"%s\"\n", i, map->name); in bpf_object__init_user_maps()
1837 def = (struct bpf_map_def *)(data->d_buf + sym.st_value); in bpf_object__init_user_maps()
1839 * If the definition of the map in the object file fits in in bpf_object__init_user_maps()
1845 memcpy(&map->def, def, map_def_sz); in bpf_object__init_user_maps()
1848 * Here the map structure being read is bigger than what in bpf_object__init_user_maps()
1850 * If they are not zero, reject this map as in bpf_object__init_user_maps()
1858 pr_warn("maps section in %s: \"%s\" has unrecognized, non-zero options\n", in bpf_object__init_user_maps()
1859 obj->path, map_name); in bpf_object__init_user_maps()
1861 return -EINVAL; in bpf_object__init_user_maps()
1864 memcpy(&map->def, def, sizeof(struct bpf_map_def)); in bpf_object__init_user_maps()
1880 *res_id = t->type; in skip_mods_and_typedefs()
1881 t = btf__type_by_id(btf, t->type); in skip_mods_and_typedefs()
1896 t = skip_mods_and_typedefs(btf, t->type, res_id); in resolve_func_ptr()
1925 * Fetch integer attribute of BTF map definition. Such attributes are
1934 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL); in get_map_field_int()
1935 const char *name = btf__name_by_offset(btf, m->name_off); in get_map_field_int()
1940 pr_warn("map '%s': attr '%s': expected PTR, got %s.\n", in get_map_field_int()
1945 arr_t = btf__type_by_id(btf, t->type); in get_map_field_int()
1947 pr_warn("map '%s': attr '%s': type [%u] not found.\n", in get_map_field_int()
1948 map_name, name, t->type); in get_map_field_int()
1952 pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n", in get_map_field_int()
1957 *res = arr_info->nelems; in get_map_field_int()
1961 static int build_map_pin_path(struct bpf_map *map, const char *path) in build_map_pin_path() argument
1966 if (!path) in build_map_pin_path()
1967 path = "/sys/fs/bpf"; in build_map_pin_path()
1969 len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map)); in build_map_pin_path()
1971 return -EINVAL; in build_map_pin_path()
1973 return -ENAMETOOLONG; in build_map_pin_path()
1975 return bpf_map__set_pin_path(map, buf); in build_map_pin_path()
1980 struct bpf_map *map, in parse_btf_map_def() argument
1992 const char *name = btf__name_by_offset(obj->btf, m->name_off); in parse_btf_map_def()
1995 pr_warn("map '%s': invalid field #%d.\n", map->name, i); in parse_btf_map_def()
1996 return -EINVAL; in parse_btf_map_def()
1999 if (!get_map_field_int(map->name, obj->btf, m, in parse_btf_map_def()
2000 &map->def.type)) in parse_btf_map_def()
2001 return -EINVAL; in parse_btf_map_def()
2002 pr_debug("map '%s': found type = %u.\n", in parse_btf_map_def()
2003 map->name, map->def.type); in parse_btf_map_def()
2005 if (!get_map_field_int(map->name, obj->btf, m, in parse_btf_map_def()
2006 &map->def.max_entries)) in parse_btf_map_def()
2007 return -EINVAL; in parse_btf_map_def()
2008 pr_debug("map '%s': found max_entries = %u.\n", in parse_btf_map_def()
2009 map->name, map->def.max_entries); in parse_btf_map_def()
2011 if (!get_map_field_int(map->name, obj->btf, m, in parse_btf_map_def()
2012 &map->def.map_flags)) in parse_btf_map_def()
2013 return -EINVAL; in parse_btf_map_def()
2014 pr_debug("map '%s': found map_flags = %u.\n", in parse_btf_map_def()
2015 map->name, map->def.map_flags); in parse_btf_map_def()
2017 if (!get_map_field_int(map->name, obj->btf, m, &map->numa_node)) in parse_btf_map_def()
2018 return -EINVAL; in parse_btf_map_def()
2019 pr_debug("map '%s': found numa_node = %u.\n", map->name, map->numa_node); in parse_btf_map_def()
2023 if (!get_map_field_int(map->name, obj->btf, m, &sz)) in parse_btf_map_def()
2024 return -EINVAL; in parse_btf_map_def()
2025 pr_debug("map '%s': found key_size = %u.\n", in parse_btf_map_def()
2026 map->name, sz); in parse_btf_map_def()
2027 if (map->def.key_size && map->def.key_size != sz) { in parse_btf_map_def()
2028 pr_warn("map '%s': conflicting key size %u != %u.\n", in parse_btf_map_def()
2029 map->name, map->def.key_size, sz); in parse_btf_map_def()
2030 return -EINVAL; in parse_btf_map_def()
2032 map->def.key_size = sz; in parse_btf_map_def()
2036 t = btf__type_by_id(obj->btf, m->type); in parse_btf_map_def()
2038 pr_warn("map '%s': key type [%d] not found.\n", in parse_btf_map_def()
2039 map->name, m->type); in parse_btf_map_def()
2040 return -EINVAL; in parse_btf_map_def()
2043 pr_warn("map '%s': key spec is not PTR: %s.\n", in parse_btf_map_def()
2044 map->name, btf_kind_str(t)); in parse_btf_map_def()
2045 return -EINVAL; in parse_btf_map_def()
2047 sz = btf__resolve_size(obj->btf, t->type); in parse_btf_map_def()
2049 pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n", in parse_btf_map_def()
2050 map->name, t->type, (ssize_t)sz); in parse_btf_map_def()
2053 pr_debug("map '%s': found key [%u], sz = %zd.\n", in parse_btf_map_def()
2054 map->name, t->type, (ssize_t)sz); in parse_btf_map_def()
2055 if (map->def.key_size && map->def.key_size != sz) { in parse_btf_map_def()
2056 pr_warn("map '%s': conflicting key size %u != %zd.\n", in parse_btf_map_def()
2057 map->name, map->def.key_size, (ssize_t)sz); in parse_btf_map_def()
2058 return -EINVAL; in parse_btf_map_def()
2060 map->def.key_size = sz; in parse_btf_map_def()
2061 map->btf_key_type_id = t->type; in parse_btf_map_def()
2065 if (!get_map_field_int(map->name, obj->btf, m, &sz)) in parse_btf_map_def()
2066 return -EINVAL; in parse_btf_map_def()
2067 pr_debug("map '%s': found value_size = %u.\n", in parse_btf_map_def()
2068 map->name, sz); in parse_btf_map_def()
2069 if (map->def.value_size && map->def.value_size != sz) { in parse_btf_map_def()
2070 pr_warn("map '%s': conflicting value size %u != %u.\n", in parse_btf_map_def()
2071 map->name, map->def.value_size, sz); in parse_btf_map_def()
2072 return -EINVAL; in parse_btf_map_def()
2074 map->def.value_size = sz; in parse_btf_map_def()
2078 t = btf__type_by_id(obj->btf, m->type); in parse_btf_map_def()
2080 pr_warn("map '%s': value type [%d] not found.\n", in parse_btf_map_def()
2081 map->name, m->type); in parse_btf_map_def()
2082 return -EINVAL; in parse_btf_map_def()
2085 pr_warn("map '%s': value spec is not PTR: %s.\n", in parse_btf_map_def()
2086 map->name, btf_kind_str(t)); in parse_btf_map_def()
2087 return -EINVAL; in parse_btf_map_def()
2089 sz = btf__resolve_size(obj->btf, t->type); in parse_btf_map_def()
2091 pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n", in parse_btf_map_def()
2092 map->name, t->type, (ssize_t)sz); in parse_btf_map_def()
2095 pr_debug("map '%s': found value [%u], sz = %zd.\n", in parse_btf_map_def()
2096 map->name, t->type, (ssize_t)sz); in parse_btf_map_def()
2097 if (map->def.value_size && map->def.value_size != sz) { in parse_btf_map_def()
2098 pr_warn("map '%s': conflicting value size %u != %zd.\n", in parse_btf_map_def()
2099 map->name, map->def.value_size, (ssize_t)sz); in parse_btf_map_def()
2100 return -EINVAL; in parse_btf_map_def()
2102 map->def.value_size = sz; in parse_btf_map_def()
2103 map->btf_value_type_id = t->type; in parse_btf_map_def()
2109 pr_warn("map '%s': multi-level inner maps not supported.\n", in parse_btf_map_def()
2110 map->name); in parse_btf_map_def()
2111 return -ENOTSUP; in parse_btf_map_def()
2113 if (i != vlen - 1) { in parse_btf_map_def()
2114 pr_warn("map '%s': '%s' member should be last.\n", in parse_btf_map_def()
2115 map->name, name); in parse_btf_map_def()
2116 return -EINVAL; in parse_btf_map_def()
2118 if (!bpf_map_type__is_map_in_map(map->def.type)) { in parse_btf_map_def()
2119 pr_warn("map '%s': should be map-in-map.\n", in parse_btf_map_def()
2120 map->name); in parse_btf_map_def()
2121 return -ENOTSUP; in parse_btf_map_def()
2123 if (map->def.value_size && map->def.value_size != 4) { in parse_btf_map_def()
2124 pr_warn("map '%s': conflicting value size %u != 4.\n", in parse_btf_map_def()
2125 map->name, map->def.value_size); in parse_btf_map_def()
2126 return -EINVAL; in parse_btf_map_def()
2128 map->def.value_size = 4; in parse_btf_map_def()
2129 t = btf__type_by_id(obj->btf, m->type); in parse_btf_map_def()
2131 pr_warn("map '%s': map-in-map inner type [%d] not found.\n", in parse_btf_map_def()
2132 map->name, m->type); in parse_btf_map_def()
2133 return -EINVAL; in parse_btf_map_def()
2135 if (!btf_is_array(t) || btf_array(t)->nelems) { in parse_btf_map_def()
2136 pr_warn("map '%s': map-in-map inner spec is not a zero-sized array.\n", in parse_btf_map_def()
2137 map->name); in parse_btf_map_def()
2138 return -EINVAL; in parse_btf_map_def()
2140 t = skip_mods_and_typedefs(obj->btf, btf_array(t)->type, in parse_btf_map_def()
2143 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n", in parse_btf_map_def()
2144 map->name, btf_kind_str(t)); in parse_btf_map_def()
2145 return -EINVAL; in parse_btf_map_def()
2147 t = skip_mods_and_typedefs(obj->btf, t->type, NULL); in parse_btf_map_def()
2149 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n", in parse_btf_map_def()
2150 map->name, btf_kind_str(t)); in parse_btf_map_def()
2151 return -EINVAL; in parse_btf_map_def()
2154 map->inner_map = calloc(1, sizeof(*map->inner_map)); in parse_btf_map_def()
2155 if (!map->inner_map) in parse_btf_map_def()
2156 return -ENOMEM; in parse_btf_map_def()
2157 map->inner_map->sec_idx = obj->efile.btf_maps_shndx; in parse_btf_map_def()
2158 map->inner_map->name = malloc(strlen(map->name) + in parse_btf_map_def()
2160 if (!map->inner_map->name) in parse_btf_map_def()
2161 return -ENOMEM; in parse_btf_map_def()
2162 sprintf(map->inner_map->name, "%s.inner", map->name); in parse_btf_map_def()
2164 err = parse_btf_map_def(obj, map->inner_map, t, strict, in parse_btf_map_def()
2173 pr_debug("map '%s': inner def can't be pinned.\n", in parse_btf_map_def()
2174 map->name); in parse_btf_map_def()
2175 return -EINVAL; in parse_btf_map_def()
2177 if (!get_map_field_int(map->name, obj->btf, m, &val)) in parse_btf_map_def()
2178 return -EINVAL; in parse_btf_map_def()
2179 pr_debug("map '%s': found pinning = %u.\n", in parse_btf_map_def()
2180 map->name, val); in parse_btf_map_def()
2184 pr_warn("map '%s': invalid pinning value %u.\n", in parse_btf_map_def()
2185 map->name, val); in parse_btf_map_def()
2186 return -EINVAL; in parse_btf_map_def()
2189 err = build_map_pin_path(map, pin_root_path); in parse_btf_map_def()
2191 pr_warn("map '%s': couldn't build pin path.\n", in parse_btf_map_def()
2192 map->name); in parse_btf_map_def()
2198 pr_warn("map '%s': unknown field '%s'.\n", in parse_btf_map_def()
2199 map->name, name); in parse_btf_map_def()
2200 return -ENOTSUP; in parse_btf_map_def()
2202 pr_debug("map '%s': ignoring unknown field '%s'.\n", in parse_btf_map_def()
2203 map->name, name); in parse_btf_map_def()
2207 if (map->def.type == BPF_MAP_TYPE_UNSPEC) { in parse_btf_map_def()
2208 pr_warn("map '%s': map type isn't specified.\n", map->name); in parse_btf_map_def()
2209 return -EINVAL; in parse_btf_map_def()
2225 struct bpf_map *map; in bpf_object__init_user_btf_map() local
2228 var = btf__type_by_id(obj->btf, vi->type); in bpf_object__init_user_btf_map()
2230 map_name = btf__name_by_offset(obj->btf, var->name_off); in bpf_object__init_user_btf_map()
2233 pr_warn("map #%d: empty name.\n", var_idx); in bpf_object__init_user_btf_map()
2234 return -EINVAL; in bpf_object__init_user_btf_map()
2236 if ((__u64)vi->offset + vi->size > data->d_size) { in bpf_object__init_user_btf_map()
2237 pr_warn("map '%s' BTF data is corrupted.\n", map_name); in bpf_object__init_user_btf_map()
2238 return -EINVAL; in bpf_object__init_user_btf_map()
2241 pr_warn("map '%s': unexpected var kind %s.\n", in bpf_object__init_user_btf_map()
2243 return -EINVAL; in bpf_object__init_user_btf_map()
2245 if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED && in bpf_object__init_user_btf_map()
2246 var_extra->linkage != BTF_VAR_STATIC) { in bpf_object__init_user_btf_map()
2247 pr_warn("map '%s': unsupported var linkage %u.\n", in bpf_object__init_user_btf_map()
2248 map_name, var_extra->linkage); in bpf_object__init_user_btf_map()
2249 return -EOPNOTSUPP; in bpf_object__init_user_btf_map()
2252 def = skip_mods_and_typedefs(obj->btf, var->type, NULL); in bpf_object__init_user_btf_map()
2254 pr_warn("map '%s': unexpected def kind %s.\n", in bpf_object__init_user_btf_map()
2256 return -EINVAL; in bpf_object__init_user_btf_map()
2258 if (def->size > vi->size) { in bpf_object__init_user_btf_map()
2259 pr_warn("map '%s': invalid def size.\n", map_name); in bpf_object__init_user_btf_map()
2260 return -EINVAL; in bpf_object__init_user_btf_map()
2263 map = bpf_object__add_map(obj); in bpf_object__init_user_btf_map()
2264 if (IS_ERR(map)) in bpf_object__init_user_btf_map()
2265 return PTR_ERR(map); in bpf_object__init_user_btf_map()
2266 map->name = strdup(map_name); in bpf_object__init_user_btf_map()
2267 if (!map->name) { in bpf_object__init_user_btf_map()
2268 pr_warn("map '%s': failed to alloc map name.\n", map_name); in bpf_object__init_user_btf_map()
2269 return -ENOMEM; in bpf_object__init_user_btf_map()
2271 map->libbpf_type = LIBBPF_MAP_UNSPEC; in bpf_object__init_user_btf_map()
2272 map->def.type = BPF_MAP_TYPE_UNSPEC; in bpf_object__init_user_btf_map()
2273 map->sec_idx = sec_idx; in bpf_object__init_user_btf_map()
2274 map->sec_offset = vi->offset; in bpf_object__init_user_btf_map()
2275 map->btf_var_idx = var_idx; in bpf_object__init_user_btf_map()
2276 pr_debug("map '%s': at sec_idx %d, offset %zu.\n", in bpf_object__init_user_btf_map()
2277 map_name, map->sec_idx, map->sec_offset); in bpf_object__init_user_btf_map()
2279 return parse_btf_map_def(obj, map, def, strict, false, pin_root_path); in bpf_object__init_user_btf_map()
2292 if (obj->efile.btf_maps_shndx < 0) in bpf_object__init_user_btf_maps()
2295 scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx); in bpf_object__init_user_btf_maps()
2298 pr_warn("elf: failed to get %s map definitions for %s\n", in bpf_object__init_user_btf_maps()
2299 MAPS_ELF_SEC, obj->path); in bpf_object__init_user_btf_maps()
2300 return -EINVAL; in bpf_object__init_user_btf_maps()
2303 nr_types = btf__get_nr_types(obj->btf); in bpf_object__init_user_btf_maps()
2305 t = btf__type_by_id(obj->btf, i); in bpf_object__init_user_btf_maps()
2308 name = btf__name_by_offset(obj->btf, t->name_off); in bpf_object__init_user_btf_maps()
2311 obj->efile.btf_maps_sec_btf_id = i; in bpf_object__init_user_btf_maps()
2318 return -ENOENT; in bpf_object__init_user_btf_maps()
2324 obj->efile.btf_maps_shndx, in bpf_object__init_user_btf_maps()
2387 t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0); in bpf_object__sanitize_btf()
2393 t->size = 1; in bpf_object__sanitize_btf()
2402 name = (char *)btf__name_by_offset(btf, t->name_off); in bpf_object__sanitize_btf()
2410 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen); in bpf_object__sanitize_btf()
2413 m->offset = v->offset * 8; in bpf_object__sanitize_btf()
2414 m->type = v->type; in bpf_object__sanitize_btf()
2416 vt = (void *)btf__type_by_id(btf, v->type); in bpf_object__sanitize_btf()
2417 m->name_off = vt->name_off; in bpf_object__sanitize_btf()
2422 t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen); in bpf_object__sanitize_btf()
2423 t->size = sizeof(__u32); /* kernel enforced */ in bpf_object__sanitize_btf()
2426 t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0); in bpf_object__sanitize_btf()
2429 t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0); in bpf_object__sanitize_btf()
2436 return obj->efile.btf_maps_shndx >= 0 || in libbpf_needs_btf()
2437 obj->efile.st_ops_shndx >= 0 || in libbpf_needs_btf()
2438 obj->nr_extern > 0; in libbpf_needs_btf()
2443 return obj->efile.st_ops_shndx >= 0; in kernel_needs_btf()
2450 int err = -ENOENT; in bpf_object__init_btf()
2453 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size); in bpf_object__init_btf()
2454 if (IS_ERR(obj->btf)) { in bpf_object__init_btf()
2455 err = PTR_ERR(obj->btf); in bpf_object__init_btf()
2456 obj->btf = NULL; in bpf_object__init_btf()
2461 /* enforce 8-byte pointers for BPF-targeted BTFs */ in bpf_object__init_btf()
2462 btf__set_pointer_size(obj->btf, 8); in bpf_object__init_btf()
2466 if (!obj->btf) { in bpf_object__init_btf()
2471 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, in bpf_object__init_btf()
2472 btf_ext_data->d_size); in bpf_object__init_btf()
2473 if (IS_ERR(obj->btf_ext)) { in bpf_object__init_btf()
2475 BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext)); in bpf_object__init_btf()
2476 obj->btf_ext = NULL; in bpf_object__init_btf()
2492 if (!obj->btf) in bpf_object__finalize_btf()
2495 err = btf__finalize_data(obj, obj->btf); in bpf_object__finalize_btf()
2506 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS || in libbpf_prog_needs_vmlinux_btf()
2507 prog->type == BPF_PROG_TYPE_LSM) in libbpf_prog_needs_vmlinux_btf()
2513 if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd) in libbpf_prog_needs_vmlinux_btf()
2525 /* CO-RE relocations need kernel BTF */ in bpf_object__load_vmlinux_btf()
2526 if (obj->btf_ext && obj->btf_ext->core_relo_info.len) in bpf_object__load_vmlinux_btf()
2530 for (i = 0; i < obj->nr_extern; i++) { in bpf_object__load_vmlinux_btf()
2533 ext = &obj->externs[i]; in bpf_object__load_vmlinux_btf()
2534 if (ext->type == EXT_KSYM && ext->ksym.type_id) { in bpf_object__load_vmlinux_btf()
2541 if (!prog->load) in bpf_object__load_vmlinux_btf()
2552 obj->btf_vmlinux = libbpf_find_kernel_btf(); in bpf_object__load_vmlinux_btf()
2553 if (IS_ERR(obj->btf_vmlinux)) { in bpf_object__load_vmlinux_btf()
2554 err = PTR_ERR(obj->btf_vmlinux); in bpf_object__load_vmlinux_btf()
2556 obj->btf_vmlinux = NULL; in bpf_object__load_vmlinux_btf()
2564 struct btf *kern_btf = obj->btf; in bpf_object__sanitize_and_load_btf()
2568 if (!obj->btf) in bpf_object__sanitize_and_load_btf()
2573 err = -EOPNOTSUPP; in bpf_object__sanitize_and_load_btf()
2586 raw_data = btf__get_raw_data(obj->btf, &sz); in bpf_object__sanitize_and_load_btf()
2591 /* enforce 8-byte pointers for BPF-targeted BTFs */ in bpf_object__sanitize_and_load_btf()
2592 btf__set_pointer_size(obj->btf, 8); in bpf_object__sanitize_and_load_btf()
2600 btf__set_fd(obj->btf, btf__fd(kern_btf)); in bpf_object__sanitize_and_load_btf()
2601 btf__set_fd(kern_btf, -1); in bpf_object__sanitize_and_load_btf()
2621 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off); in elf_sym_str()
2624 off, obj->path, elf_errmsg(-1)); in elf_sym_str()
2635 name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off); in elf_sec_str()
2638 off, obj->path, elf_errmsg(-1)); in elf_sec_str()
2649 scn = elf_getscn(obj->efile.elf, idx); in elf_sec_by_idx()
2652 idx, obj->path, elf_errmsg(-1)); in elf_sec_by_idx()
2661 Elf *elf = obj->efile.elf; in elf_sec_by_name()
2680 return -EINVAL; in elf_sec_hdr()
2684 elf_ndxscn(scn), obj->path, elf_errmsg(-1)); in elf_sec_hdr()
2685 return -EINVAL; in elf_sec_hdr()
2705 elf_ndxscn(scn), obj->path, elf_errmsg(-1)); in elf_sec_name()
2723 obj->path, elf_errmsg(-1)); in elf_sec_data()
2733 Elf_Data *symbols = obj->efile.symbols; in elf_sym_by_sec_off()
2734 size_t n = symbols->d_size / sizeof(GElf_Sym); in elf_sym_by_sec_off()
2740 if (sym->st_shndx != sec_idx || sym->st_value != off) in elf_sym_by_sec_off()
2742 if (GELF_ST_TYPE(sym->st_info) != sym_type) in elf_sym_by_sec_off()
2747 return -ENOENT; in elf_sym_by_sec_off()
2753 return strncmp(name, ".debug_", sizeof(".debug_") - 1) == 0; in is_sec_name_dwarf()
2759 if (hdr->sh_type == SHT_STRTAB) in ignore_elf_section()
2763 if (hdr->sh_type == 0x6FFF4C03 /* SHT_LLVM_ADDRSIG */) in ignore_elf_section()
2767 if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 && in ignore_elf_section()
2775 if (strncmp(name, ".rel", sizeof(".rel") - 1) == 0) { in ignore_elf_section()
2776 name += sizeof(".rel") - 1; in ignore_elf_section()
2795 if (a->sec_idx != b->sec_idx) in cmp_progs()
2796 return a->sec_idx < b->sec_idx ? -1 : 1; in cmp_progs()
2799 return a->sec_insn_off < b->sec_insn_off ? -1 : 1; in cmp_progs()
2804 Elf *elf = obj->efile.elf; in bpf_object__elf_collect()
2819 return -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_collect()
2822 if (obj->efile.symbols) { in bpf_object__elf_collect()
2823 pr_warn("elf: multiple symbol tables in %s\n", obj->path); in bpf_object__elf_collect()
2824 return -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_collect()
2829 return -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_collect()
2831 obj->efile.symbols = data; in bpf_object__elf_collect()
2832 obj->efile.symbols_shndx = elf_ndxscn(scn); in bpf_object__elf_collect()
2833 obj->efile.strtabidx = sh.sh_link; in bpf_object__elf_collect()
2842 return -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_collect()
2846 return -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_collect()
2853 return -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_collect()
2856 idx, name, (unsigned long)data->d_size, in bpf_object__elf_collect()
2861 err = bpf_object__init_license(obj, data->d_buf, data->d_size); in bpf_object__elf_collect()
2865 err = bpf_object__init_kversion(obj, data->d_buf, data->d_size); in bpf_object__elf_collect()
2869 obj->efile.maps_shndx = idx; in bpf_object__elf_collect()
2871 obj->efile.btf_maps_shndx = idx; in bpf_object__elf_collect()
2878 } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) { in bpf_object__elf_collect()
2881 obj->efile.text_shndx = idx; in bpf_object__elf_collect()
2886 obj->efile.data = data; in bpf_object__elf_collect()
2887 obj->efile.data_shndx = idx; in bpf_object__elf_collect()
2889 obj->efile.rodata = data; in bpf_object__elf_collect()
2890 obj->efile.rodata_shndx = idx; in bpf_object__elf_collect()
2892 obj->efile.st_ops_data = data; in bpf_object__elf_collect()
2893 obj->efile.st_ops_shndx = idx; in bpf_object__elf_collect()
2899 int nr_sects = obj->efile.nr_reloc_sects; in bpf_object__elf_collect()
2900 void *sects = obj->efile.reloc_sects; in bpf_object__elf_collect()
2914 sizeof(*obj->efile.reloc_sects)); in bpf_object__elf_collect()
2916 return -ENOMEM; in bpf_object__elf_collect()
2918 obj->efile.reloc_sects = sects; in bpf_object__elf_collect()
2919 obj->efile.nr_reloc_sects++; in bpf_object__elf_collect()
2921 obj->efile.reloc_sects[nr_sects].shdr = sh; in bpf_object__elf_collect()
2922 obj->efile.reloc_sects[nr_sects].data = data; in bpf_object__elf_collect()
2924 obj->efile.bss = data; in bpf_object__elf_collect()
2925 obj->efile.bss_shndx = idx; in bpf_object__elf_collect()
2932 if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) { in bpf_object__elf_collect()
2933 pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path); in bpf_object__elf_collect()
2934 return -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_collect()
2937 /* sort BPF programs by section name and in-section instruction offset in bpf_object__elf_collect()
2939 qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs); in bpf_object__elf_collect()
2946 int bind = GELF_ST_BIND(sym->st_info); in sym_is_extern()
2948 return sym->st_shndx == SHN_UNDEF && in sym_is_extern()
2950 GELF_ST_TYPE(sym->st_info) == STT_NOTYPE; in sym_is_extern()
2960 return -ESRCH; in find_extern_btf_id()
2969 var_name = btf__name_by_offset(btf, t->name_off); in find_extern_btf_id()
2973 if (btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN) in find_extern_btf_id()
2974 return -EINVAL; in find_extern_btf_id()
2979 return -ENOENT; in find_extern_btf_id()
2988 return -ESRCH; in find_extern_sec_btf_id()
2999 if (vs->type == ext_btf_id) in find_extern_sec_btf_id()
3004 return -ENOENT; in find_extern_sec_btf_id()
3014 name = btf__name_by_offset(btf, t->name_off); in find_kcfg_type()
3023 return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN; in find_kcfg_type()
3026 if (t->size == 1) in find_kcfg_type()
3028 if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1))) in find_kcfg_type()
3033 if (t->size != 4) in find_kcfg_type()
3039 if (btf_array(t)->nelems == 0) in find_kcfg_type()
3041 if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR) in find_kcfg_type()
3054 if (a->type != b->type) in cmp_externs()
3055 return a->type < b->type ? -1 : 1; in cmp_externs()
3057 if (a->type == EXT_KCFG) { in cmp_externs()
3059 if (a->kcfg.align != b->kcfg.align) in cmp_externs()
3060 return a->kcfg.align > b->kcfg.align ? -1 : 1; in cmp_externs()
3062 if (a->kcfg.sz != b->kcfg.sz) in cmp_externs()
3063 return a->kcfg.sz < b->kcfg.sz ? -1 : 1; in cmp_externs()
3067 return strcmp(a->name, b->name); in cmp_externs()
3096 if (!obj->efile.symbols) in bpf_object__collect_externs()
3099 scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx); in bpf_object__collect_externs()
3101 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_externs()
3109 if (!gelf_getsym(obj->efile.symbols, i, &sym)) in bpf_object__collect_externs()
3110 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_externs()
3117 ext = obj->externs; in bpf_object__collect_externs()
3118 ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext)); in bpf_object__collect_externs()
3120 return -ENOMEM; in bpf_object__collect_externs()
3121 obj->externs = ext; in bpf_object__collect_externs()
3122 ext = &ext[obj->nr_extern]; in bpf_object__collect_externs()
3124 obj->nr_extern++; in bpf_object__collect_externs()
3126 ext->btf_id = find_extern_btf_id(obj->btf, ext_name); in bpf_object__collect_externs()
3127 if (ext->btf_id <= 0) { in bpf_object__collect_externs()
3129 ext_name, ext->btf_id); in bpf_object__collect_externs()
3130 return ext->btf_id; in bpf_object__collect_externs()
3132 t = btf__type_by_id(obj->btf, ext->btf_id); in bpf_object__collect_externs()
3133 ext->name = btf__name_by_offset(obj->btf, t->name_off); in bpf_object__collect_externs()
3134 ext->sym_idx = i; in bpf_object__collect_externs()
3135 ext->is_weak = GELF_ST_BIND(sym.st_info) == STB_WEAK; in bpf_object__collect_externs()
3137 ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id); in bpf_object__collect_externs()
3138 if (ext->sec_btf_id <= 0) { in bpf_object__collect_externs()
3140 ext_name, ext->btf_id, ext->sec_btf_id); in bpf_object__collect_externs()
3141 return ext->sec_btf_id; in bpf_object__collect_externs()
3143 sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id); in bpf_object__collect_externs()
3144 sec_name = btf__name_by_offset(obj->btf, sec->name_off); in bpf_object__collect_externs()
3148 ext->type = EXT_KCFG; in bpf_object__collect_externs()
3149 ext->kcfg.sz = btf__resolve_size(obj->btf, t->type); in bpf_object__collect_externs()
3150 if (ext->kcfg.sz <= 0) { in bpf_object__collect_externs()
3152 ext_name, ext->kcfg.sz); in bpf_object__collect_externs()
3153 return ext->kcfg.sz; in bpf_object__collect_externs()
3155 ext->kcfg.align = btf__align_of(obj->btf, t->type); in bpf_object__collect_externs()
3156 if (ext->kcfg.align <= 0) { in bpf_object__collect_externs()
3158 ext_name, ext->kcfg.align); in bpf_object__collect_externs()
3159 return -EINVAL; in bpf_object__collect_externs()
3161 ext->kcfg.type = find_kcfg_type(obj->btf, t->type, in bpf_object__collect_externs()
3162 &ext->kcfg.is_signed); in bpf_object__collect_externs()
3163 if (ext->kcfg.type == KCFG_UNKNOWN) { in bpf_object__collect_externs()
3165 return -ENOTSUP; in bpf_object__collect_externs()
3169 ext->type = EXT_KSYM; in bpf_object__collect_externs()
3170 skip_mods_and_typedefs(obj->btf, t->type, in bpf_object__collect_externs()
3171 &ext->ksym.type_id); in bpf_object__collect_externs()
3174 return -ENOTSUP; in bpf_object__collect_externs()
3177 pr_debug("collected %d externs total\n", obj->nr_extern); in bpf_object__collect_externs()
3179 if (!obj->nr_extern) in bpf_object__collect_externs()
3183 qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs); in bpf_object__collect_externs()
3187 * pretending that each extern is a 8-byte variable in bpf_object__collect_externs()
3190 /* find existing 4-byte integer type in BTF to use for fake in bpf_object__collect_externs()
3193 int int_btf_id = find_int_btf_id(obj->btf); in bpf_object__collect_externs()
3195 for (i = 0; i < obj->nr_extern; i++) { in bpf_object__collect_externs()
3196 ext = &obj->externs[i]; in bpf_object__collect_externs()
3197 if (ext->type != EXT_KSYM) in bpf_object__collect_externs()
3200 i, ext->sym_idx, ext->name); in bpf_object__collect_externs()
3209 vt = (void *)btf__type_by_id(obj->btf, vs->type); in bpf_object__collect_externs()
3210 ext_name = btf__name_by_offset(obj->btf, vt->name_off); in bpf_object__collect_externs()
3215 return -ESRCH; in bpf_object__collect_externs()
3217 btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED; in bpf_object__collect_externs()
3218 vt->type = int_btf_id; in bpf_object__collect_externs()
3219 vs->offset = off; in bpf_object__collect_externs()
3220 vs->size = sizeof(int); in bpf_object__collect_externs()
3222 sec->size = off; in bpf_object__collect_externs()
3227 /* for kcfg externs calculate their offsets within a .kconfig map */ in bpf_object__collect_externs()
3229 for (i = 0; i < obj->nr_extern; i++) { in bpf_object__collect_externs()
3230 ext = &obj->externs[i]; in bpf_object__collect_externs()
3231 if (ext->type != EXT_KCFG) in bpf_object__collect_externs()
3234 ext->kcfg.data_off = roundup(off, ext->kcfg.align); in bpf_object__collect_externs()
3235 off = ext->kcfg.data_off + ext->kcfg.sz; in bpf_object__collect_externs()
3237 i, ext->sym_idx, ext->kcfg.data_off, ext->name); in bpf_object__collect_externs()
3239 sec->size = off; in bpf_object__collect_externs()
3244 t = btf__type_by_id(obj->btf, vs->type); in bpf_object__collect_externs()
3245 ext_name = btf__name_by_offset(obj->btf, t->name_off); in bpf_object__collect_externs()
3250 return -ESRCH; in bpf_object__collect_externs()
3252 btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED; in bpf_object__collect_externs()
3253 vs->offset = ext->kcfg.data_off; in bpf_object__collect_externs()
3266 if (pos->sec_name && !strcmp(pos->sec_name, title)) in bpf_object__find_program_by_title()
3275 /* For legacy reasons, libbpf supports an entry-point BPF programs in prog_is_subprog()
3278 * must be subprograms called from entry-point BPF programs in in prog_is_subprog()
3285 * SEC()-designated BPF programs and .text entry-point BPF programs. in prog_is_subprog()
3287 return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1; in prog_is_subprog()
3299 if (!strcmp(prog->name, name)) in bpf_object__find_program_by_name()
3308 return shndx == obj->efile.data_shndx || in bpf_object__shndx_is_data()
3309 shndx == obj->efile.bss_shndx || in bpf_object__shndx_is_data()
3310 shndx == obj->efile.rodata_shndx; in bpf_object__shndx_is_data()
3316 return shndx == obj->efile.maps_shndx || in bpf_object__shndx_is_maps()
3317 shndx == obj->efile.btf_maps_shndx; in bpf_object__shndx_is_maps()
3323 if (shndx == obj->efile.data_shndx) in bpf_object__section_to_libbpf_map_type()
3325 else if (shndx == obj->efile.bss_shndx) in bpf_object__section_to_libbpf_map_type()
3327 else if (shndx == obj->efile.rodata_shndx) in bpf_object__section_to_libbpf_map_type()
3329 else if (shndx == obj->efile.symbols_shndx) in bpf_object__section_to_libbpf_map_type()
3340 struct bpf_insn *insn = &prog->insns[insn_idx]; in bpf_program__record_reloc()
3341 size_t map_idx, nr_maps = prog->obj->nr_maps; in bpf_program__record_reloc()
3342 struct bpf_object *obj = prog->obj; in bpf_program__record_reloc()
3343 __u32 shdr_idx = sym->st_shndx; in bpf_program__record_reloc()
3346 struct bpf_map *map; in bpf_program__record_reloc() local
3348 reloc_desc->processed = false; in bpf_program__record_reloc()
3350 /* sub-program call relocation */ in bpf_program__record_reloc()
3351 if (insn->code == (BPF_JMP | BPF_CALL)) { in bpf_program__record_reloc()
3352 if (insn->src_reg != BPF_PSEUDO_CALL) { in bpf_program__record_reloc()
3353 pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name); in bpf_program__record_reloc()
3354 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3357 if (!shdr_idx || shdr_idx != obj->efile.text_shndx) { in bpf_program__record_reloc()
3360 prog->name, sym_name, sym_sec_name); in bpf_program__record_reloc()
3361 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3363 if (sym->st_value % BPF_INSN_SZ) { in bpf_program__record_reloc()
3365 prog->name, sym_name, (size_t)sym->st_value); in bpf_program__record_reloc()
3366 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3368 reloc_desc->type = RELO_CALL; in bpf_program__record_reloc()
3369 reloc_desc->insn_idx = insn_idx; in bpf_program__record_reloc()
3370 reloc_desc->sym_off = sym->st_value; in bpf_program__record_reloc()
3374 if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) { in bpf_program__record_reloc()
3376 prog->name, sym_name, insn_idx, insn->code); in bpf_program__record_reloc()
3377 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3381 int sym_idx = GELF_R_SYM(rel->r_info); in bpf_program__record_reloc()
3382 int i, n = obj->nr_extern; in bpf_program__record_reloc()
3386 ext = &obj->externs[i]; in bpf_program__record_reloc()
3387 if (ext->sym_idx == sym_idx) in bpf_program__record_reloc()
3392 prog->name, sym_name, sym_idx); in bpf_program__record_reloc()
3393 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3396 prog->name, i, ext->name, ext->sym_idx, insn_idx); in bpf_program__record_reloc()
3397 reloc_desc->type = RELO_EXTERN; in bpf_program__record_reloc()
3398 reloc_desc->insn_idx = insn_idx; in bpf_program__record_reloc()
3399 reloc_desc->sym_off = i; /* sym_off stores extern index */ in bpf_program__record_reloc()
3405 prog->name, sym_name, shdr_idx); in bpf_program__record_reloc()
3406 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3412 /* generic map reference relocation */ in bpf_program__record_reloc()
3415 pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n", in bpf_program__record_reloc()
3416 prog->name, sym_name, sym_sec_name); in bpf_program__record_reloc()
3417 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3420 map = &obj->maps[map_idx]; in bpf_program__record_reloc()
3421 if (map->libbpf_type != type || in bpf_program__record_reloc()
3422 map->sec_idx != sym->st_shndx || in bpf_program__record_reloc()
3423 map->sec_offset != sym->st_value) in bpf_program__record_reloc()
3425 pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n", in bpf_program__record_reloc()
3426 prog->name, map_idx, map->name, map->sec_idx, in bpf_program__record_reloc()
3427 map->sec_offset, insn_idx); in bpf_program__record_reloc()
3431 pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n", in bpf_program__record_reloc()
3432 prog->name, sym_sec_name, (size_t)sym->st_value); in bpf_program__record_reloc()
3433 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3435 reloc_desc->type = RELO_LD64; in bpf_program__record_reloc()
3436 reloc_desc->insn_idx = insn_idx; in bpf_program__record_reloc()
3437 reloc_desc->map_idx = map_idx; in bpf_program__record_reloc()
3438 reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */ in bpf_program__record_reloc()
3442 /* global data map relocation */ in bpf_program__record_reloc()
3445 prog->name, sym_sec_name); in bpf_program__record_reloc()
3446 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3449 map = &obj->maps[map_idx]; in bpf_program__record_reloc()
3450 if (map->libbpf_type != type) in bpf_program__record_reloc()
3452 pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n", in bpf_program__record_reloc()
3453 prog->name, map_idx, map->name, map->sec_idx, in bpf_program__record_reloc()
3454 map->sec_offset, insn_idx); in bpf_program__record_reloc()
3458 pr_warn("prog '%s': data relo failed to find map for section '%s'\n", in bpf_program__record_reloc()
3459 prog->name, sym_sec_name); in bpf_program__record_reloc()
3460 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3463 reloc_desc->type = RELO_DATA; in bpf_program__record_reloc()
3464 reloc_desc->insn_idx = insn_idx; in bpf_program__record_reloc()
3465 reloc_desc->map_idx = map_idx; in bpf_program__record_reloc()
3466 reloc_desc->sym_off = sym->st_value; in bpf_program__record_reloc()
3472 return insn_idx >= prog->sec_insn_off && in prog_contains_insn()
3473 insn_idx < prog->sec_insn_off + prog->sec_insn_cnt; in prog_contains_insn()
3479 int l = 0, r = obj->nr_programs - 1, m; in find_prog_by_sec_insn()
3483 m = l + (r - l + 1) / 2; in find_prog_by_sec_insn()
3484 prog = &obj->programs[m]; in find_prog_by_sec_insn()
3486 if (prog->sec_idx < sec_idx || in find_prog_by_sec_insn()
3487 (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx)) in find_prog_by_sec_insn()
3490 r = m - 1; in find_prog_by_sec_insn()
3495 prog = &obj->programs[l]; in find_prog_by_sec_insn()
3496 if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx)) in find_prog_by_sec_insn()
3504 Elf_Data *symbols = obj->efile.symbols; in bpf_object__collect_prog_relos()
3506 size_t sec_idx = shdr->sh_info; in bpf_object__collect_prog_relos()
3515 relo_sec_name = elf_sec_str(obj, shdr->sh_name); in bpf_object__collect_prog_relos()
3518 return -EINVAL; in bpf_object__collect_prog_relos()
3522 nrels = shdr->sh_size / shdr->sh_entsize; in bpf_object__collect_prog_relos()
3527 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_prog_relos()
3532 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_prog_relos()
3537 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_prog_relos()
3560 return -LIBBPF_ERRNO__RELOC; in bpf_object__collect_prog_relos()
3563 relos = libbpf_reallocarray(prog->reloc_desc, in bpf_object__collect_prog_relos()
3564 prog->nr_reloc + 1, sizeof(*relos)); in bpf_object__collect_prog_relos()
3566 return -ENOMEM; in bpf_object__collect_prog_relos()
3567 prog->reloc_desc = relos; in bpf_object__collect_prog_relos()
3570 insn_idx -= prog->sec_insn_off; in bpf_object__collect_prog_relos()
3571 err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc], in bpf_object__collect_prog_relos()
3576 prog->nr_reloc++; in bpf_object__collect_prog_relos()
3581 static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map) in bpf_map_find_btf_info() argument
3583 struct bpf_map_def *def = &map->def; in bpf_map_find_btf_info()
3587 /* if it's BTF-defined map, we don't need to search for type IDs. in bpf_map_find_btf_info()
3588 * For struct_ops map, it does not need btf_key_type_id and in bpf_map_find_btf_info()
3591 if (map->sec_idx == obj->efile.btf_maps_shndx || in bpf_map_find_btf_info()
3592 bpf_map__is_struct_ops(map)) in bpf_map_find_btf_info()
3595 if (!bpf_map__is_internal(map)) { in bpf_map_find_btf_info()
3596 ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size, in bpf_map_find_btf_info()
3597 def->value_size, &key_type_id, in bpf_map_find_btf_info()
3604 ret = btf__find_by_name(obj->btf, in bpf_map_find_btf_info()
3605 libbpf_type_to_btf_name[map->libbpf_type]); in bpf_map_find_btf_info()
3610 map->btf_key_type_id = key_type_id; in bpf_map_find_btf_info()
3611 map->btf_value_type_id = bpf_map__is_internal(map) ? in bpf_map_find_btf_info()
3628 err = -errno; in bpf_get_map_info_from_fdinfo()
3636 info->type = val; in bpf_get_map_info_from_fdinfo()
3638 info->key_size = val; in bpf_get_map_info_from_fdinfo()
3640 info->value_size = val; in bpf_get_map_info_from_fdinfo()
3642 info->max_entries = val; in bpf_get_map_info_from_fdinfo()
3644 info->map_flags = val; in bpf_get_map_info_from_fdinfo()
3652 int bpf_map__reuse_fd(struct bpf_map *map, int fd) in bpf_map__reuse_fd() argument
3666 if (name_len == BPF_OBJ_NAME_LEN - 1 && strncmp(map->name, info.name, name_len) == 0) in bpf_map__reuse_fd()
3667 new_name = strdup(map->name); in bpf_map__reuse_fd()
3672 return -errno; in bpf_map__reuse_fd()
3676 err = -errno; in bpf_map__reuse_fd()
3682 err = -errno; in bpf_map__reuse_fd()
3686 err = zclose(map->fd); in bpf_map__reuse_fd()
3688 err = -errno; in bpf_map__reuse_fd()
3691 free(map->name); in bpf_map__reuse_fd()
3693 map->fd = new_fd; in bpf_map__reuse_fd()
3694 map->name = new_name; in bpf_map__reuse_fd()
3695 map->def.type = info.type; in bpf_map__reuse_fd()
3696 map->def.key_size = info.key_size; in bpf_map__reuse_fd()
3697 map->def.value_size = info.value_size; in bpf_map__reuse_fd()
3698 map->def.max_entries = info.max_entries; in bpf_map__reuse_fd()
3699 map->def.map_flags = info.map_flags; in bpf_map__reuse_fd()
3700 map->btf_key_type_id = info.btf_key_type_id; in bpf_map__reuse_fd()
3701 map->btf_value_type_id = info.btf_value_type_id; in bpf_map__reuse_fd()
3702 map->reused = true; in bpf_map__reuse_fd()
3713 __u32 bpf_map__max_entries(const struct bpf_map *map) in bpf_map__max_entries() argument
3715 return map->def.max_entries; in bpf_map__max_entries()
3718 int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries) in bpf_map__set_max_entries() argument
3720 if (map->fd >= 0) in bpf_map__set_max_entries()
3721 return -EBUSY; in bpf_map__set_max_entries()
3722 map->def.max_entries = max_entries; in bpf_map__set_max_entries()
3726 int bpf_map__resize(struct bpf_map *map, __u32 max_entries) in bpf_map__resize() argument
3728 if (!map || !max_entries) in bpf_map__resize()
3729 return -EINVAL; in bpf_map__resize()
3731 return bpf_map__set_max_entries(map, max_entries); in bpf_map__resize()
3761 return -ret; in bpf_object__probe_loading()
3807 int ret, map; in probe_kern_global_data() local
3815 map = bpf_create_map_xattr(&map_attr); in probe_kern_global_data()
3816 if (map < 0) { in probe_kern_global_data()
3817 ret = -errno; in probe_kern_global_data()
3819 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n", in probe_kern_global_data()
3820 __func__, cp, -ret); in probe_kern_global_data()
3824 insns[0].imm = map; in probe_kern_global_data()
3833 close(map); in probe_kern_global_data()
3927 * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS) in probe_kern_exp_attach_type()
3945 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */ in probe_kern_probe_read_kernel()
3970 int ret, map, prog; in probe_prog_bind_map() local
3978 map = bpf_create_map_xattr(&map_attr); in probe_prog_bind_map()
3979 if (map < 0) { in probe_prog_bind_map()
3980 ret = -errno; in probe_prog_bind_map()
3982 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n", in probe_prog_bind_map()
3983 __func__, cp, -ret); in probe_prog_bind_map()
3995 close(map); in probe_prog_bind_map()
3999 ret = bpf_prog_bind_map(prog, map, NULL); in probe_prog_bind_map()
4001 close(map); in probe_prog_bind_map()
4039 "ARRAY map mmap()", probe_kern_array_mmap,
4058 if (READ_ONCE(feat->res) == FEAT_UNKNOWN) { in kernel_supports()
4059 ret = feat->probe(); in kernel_supports()
4061 WRITE_ONCE(feat->res, FEAT_SUPPORTED); in kernel_supports()
4063 WRITE_ONCE(feat->res, FEAT_MISSING); in kernel_supports()
4065 pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret); in kernel_supports()
4066 WRITE_ONCE(feat->res, FEAT_MISSING); in kernel_supports()
4070 return READ_ONCE(feat->res) == FEAT_SUPPORTED; in kernel_supports()
4073 static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd) in map_is_reuse_compat() argument
4086 pr_warn("failed to get map info for map FD %d: %s\n", map_fd, in map_is_reuse_compat()
4091 return (map_info.type == map->def.type && in map_is_reuse_compat()
4092 map_info.key_size == map->def.key_size && in map_is_reuse_compat()
4093 map_info.value_size == map->def.value_size && in map_is_reuse_compat()
4094 map_info.max_entries == map->def.max_entries && in map_is_reuse_compat()
4095 map_info.map_flags == map->def.map_flags); in map_is_reuse_compat()
4099 bpf_object__reuse_map(struct bpf_map *map) in bpf_object__reuse_map() argument
4104 pin_fd = bpf_obj_get(map->pin_path); in bpf_object__reuse_map()
4106 err = -errno; in bpf_object__reuse_map()
4107 if (err == -ENOENT) { in bpf_object__reuse_map()
4108 pr_debug("found no pinned map to reuse at '%s'\n", in bpf_object__reuse_map()
4109 map->pin_path); in bpf_object__reuse_map()
4113 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); in bpf_object__reuse_map()
4114 pr_warn("couldn't retrieve pinned map '%s': %s\n", in bpf_object__reuse_map()
4115 map->pin_path, cp); in bpf_object__reuse_map()
4119 if (!map_is_reuse_compat(map, pin_fd)) { in bpf_object__reuse_map()
4120 pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n", in bpf_object__reuse_map()
4121 map->pin_path); in bpf_object__reuse_map()
4123 return -EINVAL; in bpf_object__reuse_map()
4126 err = bpf_map__reuse_fd(map, pin_fd); in bpf_object__reuse_map()
4131 map->pinned = true; in bpf_object__reuse_map()
4132 pr_debug("reused pinned map at '%s'\n", map->pin_path); in bpf_object__reuse_map()
4138 bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map) in bpf_object__populate_internal_map() argument
4140 enum libbpf_map_type map_type = map->libbpf_type; in bpf_object__populate_internal_map()
4144 err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0); in bpf_object__populate_internal_map()
4146 err = -errno; in bpf_object__populate_internal_map()
4148 pr_warn("Error setting initial map(%s) contents: %s\n", in bpf_object__populate_internal_map()
4149 map->name, cp); in bpf_object__populate_internal_map()
4153 /* Freeze .rodata and .kconfig map as read-only from syscall side. */ in bpf_object__populate_internal_map()
4155 err = bpf_map_freeze(map->fd); in bpf_object__populate_internal_map()
4157 err = -errno; in bpf_object__populate_internal_map()
4159 pr_warn("Error freezing map(%s) as read-only: %s\n", in bpf_object__populate_internal_map()
4160 map->name, cp); in bpf_object__populate_internal_map()
4167 static void bpf_map__destroy(struct bpf_map *map);
4169 static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map) in bpf_object__create_map() argument
4172 struct bpf_map_def *def = &map->def; in bpf_object__create_map()
4178 create_attr.name = map->name; in bpf_object__create_map()
4179 create_attr.map_ifindex = map->map_ifindex; in bpf_object__create_map()
4180 create_attr.map_type = def->type; in bpf_object__create_map()
4181 create_attr.map_flags = def->map_flags; in bpf_object__create_map()
4182 create_attr.key_size = def->key_size; in bpf_object__create_map()
4183 create_attr.value_size = def->value_size; in bpf_object__create_map()
4184 create_attr.numa_node = map->numa_node; in bpf_object__create_map()
4186 if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) { in bpf_object__create_map()
4191 pr_warn("map '%s': failed to determine number of system CPUs: %d\n", in bpf_object__create_map()
4192 map->name, nr_cpus); in bpf_object__create_map()
4195 pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus); in bpf_object__create_map()
4198 create_attr.max_entries = def->max_entries; in bpf_object__create_map()
4201 if (bpf_map__is_struct_ops(map)) in bpf_object__create_map()
4203 map->btf_vmlinux_value_type_id; in bpf_object__create_map()
4208 if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) { in bpf_object__create_map()
4209 create_attr.btf_fd = btf__fd(obj->btf); in bpf_object__create_map()
4210 create_attr.btf_key_type_id = map->btf_key_type_id; in bpf_object__create_map()
4211 create_attr.btf_value_type_id = map->btf_value_type_id; in bpf_object__create_map()
4214 if (bpf_map_type__is_map_in_map(def->type)) { in bpf_object__create_map()
4215 if (map->inner_map) { in bpf_object__create_map()
4216 err = bpf_object__create_map(obj, map->inner_map); in bpf_object__create_map()
4218 pr_warn("map '%s': failed to create inner map: %d\n", in bpf_object__create_map()
4219 map->name, err); in bpf_object__create_map()
4222 map->inner_map_fd = bpf_map__fd(map->inner_map); in bpf_object__create_map()
4224 if (map->inner_map_fd >= 0) in bpf_object__create_map()
4225 create_attr.inner_map_fd = map->inner_map_fd; in bpf_object__create_map()
4228 map->fd = bpf_create_map_xattr(&create_attr); in bpf_object__create_map()
4229 if (map->fd < 0 && (create_attr.btf_key_type_id || in bpf_object__create_map()
4233 err = -errno; in bpf_object__create_map()
4236 map->name, cp, err); in bpf_object__create_map()
4240 map->btf_key_type_id = 0; in bpf_object__create_map()
4241 map->btf_value_type_id = 0; in bpf_object__create_map()
4242 map->fd = bpf_create_map_xattr(&create_attr); in bpf_object__create_map()
4245 err = map->fd < 0 ? -errno : 0; in bpf_object__create_map()
4247 if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) { in bpf_object__create_map()
4248 bpf_map__destroy(map->inner_map); in bpf_object__create_map()
4249 zfree(&map->inner_map); in bpf_object__create_map()
4255 static int init_map_slots(struct bpf_map *map) in init_map_slots() argument
4261 for (i = 0; i < map->init_slots_sz; i++) { in init_map_slots()
4262 if (!map->init_slots[i]) in init_map_slots()
4265 targ_map = map->init_slots[i]; in init_map_slots()
4267 err = bpf_map_update_elem(map->fd, &i, &fd, 0); in init_map_slots()
4269 err = -errno; in init_map_slots()
4270 pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n", in init_map_slots()
4271 map->name, i, targ_map->name, in init_map_slots()
4275 pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n", in init_map_slots()
4276 map->name, i, targ_map->name, fd); in init_map_slots()
4279 zfree(&map->init_slots); in init_map_slots()
4280 map->init_slots_sz = 0; in init_map_slots()
4288 struct bpf_map *map; in bpf_object__create_maps() local
4294 for (i = 0; i < obj->nr_maps; i++) { in bpf_object__create_maps()
4295 map = &obj->maps[i]; in bpf_object__create_maps()
4299 if (map->pin_path) { in bpf_object__create_maps()
4300 err = bpf_object__reuse_map(map); in bpf_object__create_maps()
4302 pr_warn("map '%s': error reusing pinned map\n", in bpf_object__create_maps()
4303 map->name); in bpf_object__create_maps()
4306 if (retried && map->fd < 0) { in bpf_object__create_maps()
4307 pr_warn("map '%s': cannot find pinned map\n", in bpf_object__create_maps()
4308 map->name); in bpf_object__create_maps()
4309 err = -ENOENT; in bpf_object__create_maps()
4314 if (map->fd >= 0) { in bpf_object__create_maps()
4315 pr_debug("map '%s': skipping creation (preset fd=%d)\n", in bpf_object__create_maps()
4316 map->name, map->fd); in bpf_object__create_maps()
4318 err = bpf_object__create_map(obj, map); in bpf_object__create_maps()
4322 pr_debug("map '%s': created successfully, fd=%d\n", in bpf_object__create_maps()
4323 map->name, map->fd); in bpf_object__create_maps()
4325 if (bpf_map__is_internal(map)) { in bpf_object__create_maps()
4326 err = bpf_object__populate_internal_map(obj, map); in bpf_object__create_maps()
4328 zclose(map->fd); in bpf_object__create_maps()
4333 if (map->init_slots_sz) { in bpf_object__create_maps()
4334 err = init_map_slots(map); in bpf_object__create_maps()
4336 zclose(map->fd); in bpf_object__create_maps()
4342 if (map->pin_path && !map->pinned) { in bpf_object__create_maps()
4343 err = bpf_map__pin(map, NULL); in bpf_object__create_maps()
4345 zclose(map->fd); in bpf_object__create_maps()
4346 if (!retried && err == -EEXIST) { in bpf_object__create_maps()
4350 pr_warn("map '%s': failed to auto-pin at '%s': %d\n", in bpf_object__create_maps()
4351 map->name, map->pin_path, err); in bpf_object__create_maps()
4361 pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err); in bpf_object__create_maps()
4364 zclose(obj->maps[j].fd); in bpf_object__create_maps()
4370 /* represents BPF CO-RE field or array element accessor */
4379 /* high-level spec: named fields and array indices only */
4383 /* CO-RE relocation kind */
4385 /* high-level spec length */
4387 /* raw, low-level spec: 1-to-1 with accessor spec string */
4406 /* not a flexible array, if not inside a struct or has non-zero size */ in is_flex_arr()
4407 if (!acc->name || arr->nelems > 0) in is_flex_arr()
4411 t = btf__type_by_id(btf, acc->type_id); in is_flex_arr()
4412 return acc->idx == btf_vlen(t) - 1; in is_flex_arr()
4474 * Turn bpf_core_relo into a low- and high-level spec representation,
4476 * field bit offset, specified by accessor string. Low-level spec captures
4478 * struct/union members. High-level one only captures semantically meaningful
4493 * int x = &s->a[3]; // access string = '0:1:2:3'
4495 * Low-level spec has 1:1 mapping with each element of access string (it's
4498 * High-level spec will capture only 3 points:
4499 * - intial zero-index access by pointer (&s->... is the same as &s[0]...);
4500 * - field 'a' access (corresponds to '2' in low-level spec);
4501 * - array element #3 access (corresponds to '3' in low-level spec).
4503 * Type-based relocations (TYPE_EXISTS/TYPE_SIZE,
4507 * Enum value-based relocations (ENUMVAL_EXISTS/ENUMVAL_VALUE) use access
4524 return -EINVAL; in bpf_core_parse_spec()
4527 spec->btf = btf; in bpf_core_parse_spec()
4528 spec->root_type_id = type_id; in bpf_core_parse_spec()
4529 spec->relo_kind = relo_kind; in bpf_core_parse_spec()
4531 /* type-based relocations don't have a field access string */ in bpf_core_parse_spec()
4534 return -EINVAL; in bpf_core_parse_spec()
4543 return -EINVAL; in bpf_core_parse_spec()
4544 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN) in bpf_core_parse_spec()
4545 return -E2BIG; in bpf_core_parse_spec()
4547 spec->raw_spec[spec->raw_len++] = access_idx; in bpf_core_parse_spec()
4550 if (spec->raw_len == 0) in bpf_core_parse_spec()
4551 return -EINVAL; in bpf_core_parse_spec()
4555 return -EINVAL; in bpf_core_parse_spec()
4557 access_idx = spec->raw_spec[0]; in bpf_core_parse_spec()
4558 acc = &spec->spec[0]; in bpf_core_parse_spec()
4559 acc->type_id = id; in bpf_core_parse_spec()
4560 acc->idx = access_idx; in bpf_core_parse_spec()
4561 spec->len++; in bpf_core_parse_spec()
4564 if (!btf_is_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t)) in bpf_core_parse_spec()
4565 return -EINVAL; in bpf_core_parse_spec()
4568 acc->name = btf__name_by_offset(btf, btf_enum(t)[access_idx].name_off); in bpf_core_parse_spec()
4573 return -EINVAL; in bpf_core_parse_spec()
4578 spec->bit_offset = access_idx * sz * 8; in bpf_core_parse_spec()
4580 for (i = 1; i < spec->raw_len; i++) { in bpf_core_parse_spec()
4583 return -EINVAL; in bpf_core_parse_spec()
4585 access_idx = spec->raw_spec[i]; in bpf_core_parse_spec()
4586 acc = &spec->spec[spec->len]; in bpf_core_parse_spec()
4593 return -EINVAL; in bpf_core_parse_spec()
4596 spec->bit_offset += bit_offset; in bpf_core_parse_spec()
4599 if (m->name_off) { in bpf_core_parse_spec()
4600 name = btf__name_by_offset(btf, m->name_off); in bpf_core_parse_spec()
4602 return -EINVAL; in bpf_core_parse_spec()
4604 acc->type_id = id; in bpf_core_parse_spec()
4605 acc->idx = access_idx; in bpf_core_parse_spec()
4606 acc->name = name; in bpf_core_parse_spec()
4607 spec->len++; in bpf_core_parse_spec()
4610 id = m->type; in bpf_core_parse_spec()
4615 t = skip_mods_and_typedefs(btf, a->type, &id); in bpf_core_parse_spec()
4617 return -EINVAL; in bpf_core_parse_spec()
4619 flex = is_flex_arr(btf, acc - 1, a); in bpf_core_parse_spec()
4620 if (!flex && access_idx >= a->nelems) in bpf_core_parse_spec()
4621 return -EINVAL; in bpf_core_parse_spec()
4623 spec->spec[spec->len].type_id = id; in bpf_core_parse_spec()
4624 spec->spec[spec->len].idx = access_idx; in bpf_core_parse_spec()
4625 spec->len++; in bpf_core_parse_spec()
4630 spec->bit_offset += access_idx * sz * 8; in bpf_core_parse_spec()
4634 return -EINVAL; in bpf_core_parse_spec()
4651 * underscore is ignored by BPF CO-RE relocation during relocation matching.
4658 for (i = n - 5; i >= 0; i--) { in bpf_core_essential_name_len()
4673 free(cand_ids->data); in bpf_core_free_cands()
4690 return ERR_PTR(-EINVAL); in bpf_core_find_cands()
4692 local_name = btf__name_by_offset(local_btf, local_t->name_off); in bpf_core_find_cands()
4694 return ERR_PTR(-EINVAL); in bpf_core_find_cands()
4699 return ERR_PTR(-ENOMEM); in bpf_core_find_cands()
4707 targ_name = btf__name_by_offset(targ_btf, t->name_off); in bpf_core_find_cands()
4716 pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s\n", in bpf_core_find_cands()
4719 new_ids = libbpf_reallocarray(cand_ids->data, in bpf_core_find_cands()
4720 cand_ids->len + 1, in bpf_core_find_cands()
4721 sizeof(*cand_ids->data)); in bpf_core_find_cands()
4723 err = -ENOMEM; in bpf_core_find_cands()
4726 cand_ids->data = new_ids; in bpf_core_find_cands()
4727 cand_ids->data[cand_ids->len++] = i; in bpf_core_find_cands()
4739 * - any two STRUCTs/UNIONs are compatible and can be mixed;
4740 * - any two FWDs are compatible, if their names match (modulo flavor suffix);
4741 * - any two PTRs are always compatible;
4742 * - for ENUMs, names should be the same (ignoring flavor suffix) or at
4744 * - for ENUMs, check sizes, names are ignored;
4745 * - for INT, size and signedness are ignored;
4746 * - for ARRAY, dimensionality is ignored, element types are checked for
4748 * - everything else shouldn't be ever a target of relocation.
4750 * more experience with using BPF CO-RE relocations.
4763 return -EINVAL; in bpf_core_fields_are_compat()
4779 local_type->name_off); in bpf_core_fields_are_compat()
4780 targ_name = btf__name_by_offset(targ_btf, targ_type->name_off); in bpf_core_fields_are_compat()
4783 /* one of them is anonymous or both w/ same flavor-less names */ in bpf_core_fields_are_compat()
4789 /* just reject deprecated bitfield-like integers; all other in bpf_core_fields_are_compat()
4795 local_id = btf_array(local_type)->type; in bpf_core_fields_are_compat()
4796 targ_id = btf_array(targ_type)->type; in bpf_core_fields_are_compat()
4806 * Given single high-level named field accessor in local type, find
4807 * corresponding high-level accessor for a target type. Along the way,
4808 * maintain low-level spec for target as well. Also keep updating target
4836 return -EINVAL; in bpf_core_match_member()
4840 local_id = local_acc->type_id; in bpf_core_match_member()
4842 local_member = btf_members(local_type) + local_acc->idx; in bpf_core_match_member()
4843 local_name = btf__name_by_offset(local_btf, local_member->name_off); in bpf_core_match_member()
4853 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN) in bpf_core_match_member()
4854 return -E2BIG; in bpf_core_match_member()
4857 spec->bit_offset += bit_offset; in bpf_core_match_member()
4858 spec->raw_spec[spec->raw_len++] = i; in bpf_core_match_member()
4860 targ_name = btf__name_by_offset(targ_btf, m->name_off); in bpf_core_match_member()
4864 targ_btf, m->type, in bpf_core_match_member()
4872 targ_acc = &spec->spec[spec->len++]; in bpf_core_match_member()
4873 targ_acc->type_id = targ_id; in bpf_core_match_member()
4874 targ_acc->idx = i; in bpf_core_match_member()
4875 targ_acc->name = targ_name; in bpf_core_match_member()
4877 *next_targ_id = m->type; in bpf_core_match_member()
4879 local_member->type, in bpf_core_match_member()
4880 targ_btf, m->type); in bpf_core_match_member()
4882 spec->len--; /* pop accessor */ in bpf_core_match_member()
4886 spec->bit_offset -= bit_offset; in bpf_core_match_member()
4887 spec->raw_len--; in bpf_core_match_member()
4894 * type-based CO-RE relocations and follow slightly different rules than
4895 * field-based relocations. This function assumes that root types were already
4896 * checked for name match. Beyond that initial root-level name check, names
4898 * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
4901 * - for ENUMs, the size is ignored;
4902 * - for INT, size and signedness are ignored;
4903 * - for ARRAY, dimensionality is ignored, element types are checked for
4905 * - CONST/VOLATILE/RESTRICT modifiers are ignored;
4906 * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
4907 * - FUNC_PROTOs are compatible if they have compatible signature: same
4910 * more experience with using BPF CO-RE relocations.
4925 depth--; in bpf_core_types_are_compat()
4927 return -EINVAL; in bpf_core_types_are_compat()
4932 return -EINVAL; in bpf_core_types_are_compat()
4945 /* just reject deprecated bitfield-like integers; all other in bpf_core_types_are_compat()
4950 local_id = local_type->type; in bpf_core_types_are_compat()
4951 targ_id = targ_type->type; in bpf_core_types_are_compat()
4954 local_id = btf_array(local_type)->type; in bpf_core_types_are_compat()
4955 targ_id = btf_array(targ_type)->type; in bpf_core_types_are_compat()
4968 skip_mods_and_typedefs(local_btf, local_p->type, &local_id); in bpf_core_types_are_compat()
4969 skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id); in bpf_core_types_are_compat()
4976 skip_mods_and_typedefs(local_btf, local_type->type, &local_id); in bpf_core_types_are_compat()
4977 skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id); in bpf_core_types_are_compat()
4989 * target spec (high-level, low-level + bit offset).
5001 targ_spec->btf = targ_btf; in bpf_core_spec_match()
5002 targ_spec->root_type_id = targ_id; in bpf_core_spec_match()
5003 targ_spec->relo_kind = local_spec->relo_kind; in bpf_core_spec_match()
5005 if (core_relo_is_type_based(local_spec->relo_kind)) { in bpf_core_spec_match()
5006 return bpf_core_types_are_compat(local_spec->btf, in bpf_core_spec_match()
5007 local_spec->root_type_id, in bpf_core_spec_match()
5011 local_acc = &local_spec->spec[0]; in bpf_core_spec_match()
5012 targ_acc = &targ_spec->spec[0]; in bpf_core_spec_match()
5014 if (core_relo_is_enumval_based(local_spec->relo_kind)) { in bpf_core_spec_match()
5020 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id); in bpf_core_spec_match()
5024 local_essent_len = bpf_core_essential_name_len(local_acc->name); in bpf_core_spec_match()
5027 targ_name = btf__name_by_offset(targ_spec->btf, e->name_off); in bpf_core_spec_match()
5031 if (strncmp(local_acc->name, targ_name, local_essent_len) == 0) { in bpf_core_spec_match()
5032 targ_acc->type_id = targ_id; in bpf_core_spec_match()
5033 targ_acc->idx = i; in bpf_core_spec_match()
5034 targ_acc->name = targ_name; in bpf_core_spec_match()
5035 targ_spec->len++; in bpf_core_spec_match()
5036 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx; in bpf_core_spec_match()
5037 targ_spec->raw_len++; in bpf_core_spec_match()
5044 if (!core_relo_is_field_based(local_spec->relo_kind)) in bpf_core_spec_match()
5045 return -EINVAL; in bpf_core_spec_match()
5047 for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) { in bpf_core_spec_match()
5048 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, in bpf_core_spec_match()
5051 return -EINVAL; in bpf_core_spec_match()
5053 if (local_acc->name) { in bpf_core_spec_match()
5054 matched = bpf_core_match_member(local_spec->btf, in bpf_core_spec_match()
5073 flex = is_flex_arr(targ_btf, targ_acc - 1, a); in bpf_core_spec_match()
5074 if (!flex && local_acc->idx >= a->nelems) in bpf_core_spec_match()
5076 if (!skip_mods_and_typedefs(targ_btf, a->type, in bpf_core_spec_match()
5078 return -EINVAL; in bpf_core_spec_match()
5082 if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN) in bpf_core_spec_match()
5083 return -E2BIG; in bpf_core_spec_match()
5085 targ_acc->type_id = targ_id; in bpf_core_spec_match()
5086 targ_acc->idx = local_acc->idx; in bpf_core_spec_match()
5087 targ_acc->name = NULL; in bpf_core_spec_match()
5088 targ_spec->len++; in bpf_core_spec_match()
5089 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx; in bpf_core_spec_match()
5090 targ_spec->raw_len++; in bpf_core_spec_match()
5095 targ_spec->bit_offset += local_acc->idx * sz * 8; in bpf_core_spec_match()
5118 if (relo->kind == BPF_FIELD_EXISTS) { in bpf_core_calc_field_relo()
5124 return -EUCLEAN; /* request instruction poisoning */ in bpf_core_calc_field_relo()
5126 acc = &spec->spec[spec->len - 1]; in bpf_core_calc_field_relo()
5127 t = btf__type_by_id(spec->btf, acc->type_id); in bpf_core_calc_field_relo()
5130 if (!acc->name) { in bpf_core_calc_field_relo()
5131 if (relo->kind == BPF_FIELD_BYTE_OFFSET) { in bpf_core_calc_field_relo()
5132 *val = spec->bit_offset / 8; in bpf_core_calc_field_relo()
5134 sz = btf__resolve_size(spec->btf, acc->type_id); in bpf_core_calc_field_relo()
5136 return -EINVAL; in bpf_core_calc_field_relo()
5138 *type_id = acc->type_id; in bpf_core_calc_field_relo()
5139 } else if (relo->kind == BPF_FIELD_BYTE_SIZE) { in bpf_core_calc_field_relo()
5140 sz = btf__resolve_size(spec->btf, acc->type_id); in bpf_core_calc_field_relo()
5142 return -EINVAL; in bpf_core_calc_field_relo()
5146 prog->name, relo->kind, relo->insn_off / 8); in bpf_core_calc_field_relo()
5147 return -EINVAL; in bpf_core_calc_field_relo()
5154 m = btf_members(t) + acc->idx; in bpf_core_calc_field_relo()
5155 mt = skip_mods_and_typedefs(spec->btf, m->type, &field_type_id); in bpf_core_calc_field_relo()
5156 bit_off = spec->bit_offset; in bpf_core_calc_field_relo()
5157 bit_sz = btf_member_bitfield_size(t, acc->idx); in bpf_core_calc_field_relo()
5161 byte_sz = mt->size; in bpf_core_calc_field_relo()
5164 while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) { in bpf_core_calc_field_relo()
5166 /* bitfield can't be read with 64-bit read */ in bpf_core_calc_field_relo()
5168 prog->name, relo->kind, relo->insn_off / 8); in bpf_core_calc_field_relo()
5169 return -E2BIG; in bpf_core_calc_field_relo()
5175 sz = btf__resolve_size(spec->btf, field_type_id); in bpf_core_calc_field_relo()
5177 return -EINVAL; in bpf_core_calc_field_relo()
5179 byte_off = spec->bit_offset / 8; in bpf_core_calc_field_relo()
5190 switch (relo->kind) { in bpf_core_calc_field_relo()
5210 *val = 64 - (bit_off + bit_sz - byte_off * 8); in bpf_core_calc_field_relo()
5212 *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8); in bpf_core_calc_field_relo()
5216 *val = 64 - bit_sz; in bpf_core_calc_field_relo()
5222 return -EOPNOTSUPP; in bpf_core_calc_field_relo()
5234 /* type-based relos return zero when target type is not found */ in bpf_core_calc_type_relo()
5240 switch (relo->kind) { in bpf_core_calc_type_relo()
5242 *val = spec->root_type_id; in bpf_core_calc_type_relo()
5248 sz = btf__resolve_size(spec->btf, spec->root_type_id); in bpf_core_calc_type_relo()
5250 return -EINVAL; in bpf_core_calc_type_relo()
5256 return -EOPNOTSUPP; in bpf_core_calc_type_relo()
5269 switch (relo->kind) { in bpf_core_calc_enumval_relo()
5275 return -EUCLEAN; /* request instruction poisoning */ in bpf_core_calc_enumval_relo()
5276 t = btf__type_by_id(spec->btf, spec->spec[0].type_id); in bpf_core_calc_enumval_relo()
5277 e = btf_enum(t) + spec->spec[0].idx; in bpf_core_calc_enumval_relo()
5278 *val = e->val; in bpf_core_calc_enumval_relo()
5281 return -EOPNOTSUPP; in bpf_core_calc_enumval_relo()
5301 * memory loads of pointers and integers; this is necessary for 32-bit
5325 int err = -EOPNOTSUPP; in bpf_core_calc_relo()
5327 res->orig_val = 0; in bpf_core_calc_relo()
5328 res->new_val = 0; in bpf_core_calc_relo()
5329 res->poison = false; in bpf_core_calc_relo()
5330 res->validate = true; in bpf_core_calc_relo()
5331 res->fail_memsz_adjust = false; in bpf_core_calc_relo()
5332 res->orig_sz = res->new_sz = 0; in bpf_core_calc_relo()
5333 res->orig_type_id = res->new_type_id = 0; in bpf_core_calc_relo()
5335 if (core_relo_is_field_based(relo->kind)) { in bpf_core_calc_relo()
5337 &res->orig_val, &res->orig_sz, in bpf_core_calc_relo()
5338 &res->orig_type_id, &res->validate); in bpf_core_calc_relo()
5340 &res->new_val, &res->new_sz, in bpf_core_calc_relo()
5341 &res->new_type_id, NULL); in bpf_core_calc_relo()
5348 res->fail_memsz_adjust = false; in bpf_core_calc_relo()
5349 if (res->orig_sz != res->new_sz) { in bpf_core_calc_relo()
5352 orig_t = btf__type_by_id(local_spec->btf, res->orig_type_id); in bpf_core_calc_relo()
5353 new_t = btf__type_by_id(targ_spec->btf, res->new_type_id); in bpf_core_calc_relo()
5357 * - reading a 32-bit kernel pointer, while on BPF in bpf_core_calc_relo()
5358 * size pointers are always 64-bit; in this case in bpf_core_calc_relo()
5361 * zero-extended upper 32-bits; in bpf_core_calc_relo()
5362 * - reading unsigned integers, again due to in bpf_core_calc_relo()
5363 * zero-extension is preserving the value correctly. in bpf_core_calc_relo()
5379 res->fail_memsz_adjust = true; in bpf_core_calc_relo()
5381 } else if (core_relo_is_type_based(relo->kind)) { in bpf_core_calc_relo()
5382 err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val); in bpf_core_calc_relo()
5383 err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val); in bpf_core_calc_relo()
5384 } else if (core_relo_is_enumval_based(relo->kind)) { in bpf_core_calc_relo()
5385 err = bpf_core_calc_enumval_relo(relo, local_spec, &res->orig_val); in bpf_core_calc_relo()
5386 err = err ?: bpf_core_calc_enumval_relo(relo, targ_spec, &res->new_val); in bpf_core_calc_relo()
5390 if (err == -EUCLEAN) { in bpf_core_calc_relo()
5392 res->poison = true; in bpf_core_calc_relo()
5394 } else if (err == -EOPNOTSUPP) { in bpf_core_calc_relo()
5396 pr_warn("prog '%s': relo #%d: unrecognized CO-RE relocation %s (%d) at insn #%d\n", in bpf_core_calc_relo()
5397 prog->name, relo_idx, core_relo_kind_str(relo->kind), in bpf_core_calc_relo()
5398 relo->kind, relo->insn_off / 8); in bpf_core_calc_relo()
5412 prog->name, relo_idx, insn_idx); in bpf_core_poison_insn()
5413 insn->code = BPF_JMP | BPF_CALL; in bpf_core_poison_insn()
5414 insn->dst_reg = 0; in bpf_core_poison_insn()
5415 insn->src_reg = 0; in bpf_core_poison_insn()
5416 insn->off = 0; in bpf_core_poison_insn()
5421 insn->imm = 195896080; /* => 0xbad2310 => "bad relo" */ in bpf_core_poison_insn()
5426 return insn->code == (BPF_LD | BPF_IMM | BPF_DW); in is_ldimm64()
5431 switch (BPF_SIZE(insn->code)) { in insn_bpf_size_to_bytes()
5436 default: return -1; in insn_bpf_size_to_bytes()
5447 default: return -1; in insn_bytes_to_bpf_size()
5456 * Expected insn->imm value is determined using relocation kind and local
5457 * spec, and is checked before patching instruction. If actual insn->imm value
5463 * 3. rX = <imm64> (load with 64-bit immediate value);
5478 if (relo->insn_off % BPF_INSN_SZ) in bpf_core_patch_insn()
5479 return -EINVAL; in bpf_core_patch_insn()
5480 insn_idx = relo->insn_off / BPF_INSN_SZ; in bpf_core_patch_insn()
5482 * program's frame of reference; (sub-)program code is not yet in bpf_core_patch_insn()
5483 * relocated, so it's enough to just subtract in-section offset in bpf_core_patch_insn()
5485 insn_idx = insn_idx - prog->sec_insn_off; in bpf_core_patch_insn()
5486 insn = &prog->insns[insn_idx]; in bpf_core_patch_insn()
5487 class = BPF_CLASS(insn->code); in bpf_core_patch_insn()
5489 if (res->poison) { in bpf_core_patch_insn()
5500 orig_val = res->orig_val; in bpf_core_patch_insn()
5501 new_val = res->new_val; in bpf_core_patch_insn()
5506 if (BPF_SRC(insn->code) != BPF_K) in bpf_core_patch_insn()
5507 return -EINVAL; in bpf_core_patch_insn()
5508 if (res->validate && insn->imm != orig_val) { in bpf_core_patch_insn()
5509 pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %u -> %u\n", in bpf_core_patch_insn()
5510 prog->name, relo_idx, in bpf_core_patch_insn()
5511 insn_idx, insn->imm, orig_val, new_val); in bpf_core_patch_insn()
5512 return -EINVAL; in bpf_core_patch_insn()
5514 orig_val = insn->imm; in bpf_core_patch_insn()
5515 insn->imm = new_val; in bpf_core_patch_insn()
5516 pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %u -> %u\n", in bpf_core_patch_insn()
5517 prog->name, relo_idx, insn_idx, in bpf_core_patch_insn()
5523 if (res->validate && insn->off != orig_val) { in bpf_core_patch_insn()
5524 pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %u -> %u\n", in bpf_core_patch_insn()
5525 prog->name, relo_idx, insn_idx, insn->off, orig_val, new_val); in bpf_core_patch_insn()
5526 return -EINVAL; in bpf_core_patch_insn()
5530 prog->name, relo_idx, insn_idx, new_val); in bpf_core_patch_insn()
5531 return -ERANGE; in bpf_core_patch_insn()
5533 if (res->fail_memsz_adjust) { in bpf_core_patch_insn()
5536 prog->name, relo_idx, insn_idx); in bpf_core_patch_insn()
5540 orig_val = insn->off; in bpf_core_patch_insn()
5541 insn->off = new_val; in bpf_core_patch_insn()
5542 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %u -> %u\n", in bpf_core_patch_insn()
5543 prog->name, relo_idx, insn_idx, orig_val, new_val); in bpf_core_patch_insn()
5545 if (res->new_sz != res->orig_sz) { in bpf_core_patch_insn()
5549 if (insn_bytes_sz != res->orig_sz) { in bpf_core_patch_insn()
5551 prog->name, relo_idx, insn_idx, insn_bytes_sz, res->orig_sz); in bpf_core_patch_insn()
5552 return -EINVAL; in bpf_core_patch_insn()
5555 insn_bpf_sz = insn_bytes_to_bpf_size(res->new_sz); in bpf_core_patch_insn()
5558 prog->name, relo_idx, insn_idx, res->new_sz); in bpf_core_patch_insn()
5559 return -EINVAL; in bpf_core_patch_insn()
5562 insn->code = BPF_MODE(insn->code) | insn_bpf_sz | BPF_CLASS(insn->code); in bpf_core_patch_insn()
5563 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) mem_sz %u -> %u\n", in bpf_core_patch_insn()
5564 prog->name, relo_idx, insn_idx, res->orig_sz, res->new_sz); in bpf_core_patch_insn()
5572 insn_idx + 1 >= prog->insns_cnt || in bpf_core_patch_insn()
5576 prog->name, relo_idx, insn_idx); in bpf_core_patch_insn()
5577 return -EINVAL; in bpf_core_patch_insn()
5581 if (res->validate && imm != orig_val) { in bpf_core_patch_insn()
5582 pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %u -> %u\n", in bpf_core_patch_insn()
5583 prog->name, relo_idx, in bpf_core_patch_insn()
5586 return -EINVAL; in bpf_core_patch_insn()
5590 insn[1].imm = 0; /* currently only 32-bit values are supported */ in bpf_core_patch_insn()
5591 pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %u\n", in bpf_core_patch_insn()
5592 prog->name, relo_idx, insn_idx, in bpf_core_patch_insn()
5598 prog->name, relo_idx, insn_idx, insn->code, in bpf_core_patch_insn()
5599 insn->src_reg, insn->dst_reg, insn->off, insn->imm); in bpf_core_patch_insn()
5600 return -EINVAL; in bpf_core_patch_insn()
5607 * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
5608 * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
5618 type_id = spec->root_type_id; in bpf_core_dump_spec()
5619 t = btf__type_by_id(spec->btf, type_id); in bpf_core_dump_spec()
5620 s = btf__name_by_offset(spec->btf, t->name_off); in bpf_core_dump_spec()
5624 if (core_relo_is_type_based(spec->relo_kind)) in bpf_core_dump_spec()
5627 if (core_relo_is_enumval_based(spec->relo_kind)) { in bpf_core_dump_spec()
5628 t = skip_mods_and_typedefs(spec->btf, type_id, NULL); in bpf_core_dump_spec()
5629 e = btf_enum(t) + spec->raw_spec[0]; in bpf_core_dump_spec()
5630 s = btf__name_by_offset(spec->btf, e->name_off); in bpf_core_dump_spec()
5632 libbpf_print(level, "::%s = %u", s, e->val); in bpf_core_dump_spec()
5636 if (core_relo_is_field_based(spec->relo_kind)) { in bpf_core_dump_spec()
5637 for (i = 0; i < spec->len; i++) { in bpf_core_dump_spec()
5638 if (spec->spec[i].name) in bpf_core_dump_spec()
5639 libbpf_print(level, ".%s", spec->spec[i].name); in bpf_core_dump_spec()
5640 else if (i > 0 || spec->spec[i].idx > 0) in bpf_core_dump_spec()
5641 libbpf_print(level, "[%u]", spec->spec[i].idx); in bpf_core_dump_spec()
5645 for (i = 0; i < spec->raw_len; i++) in bpf_core_dump_spec()
5646 libbpf_print(level, "%s%d", i == 0 ? "" : ":", spec->raw_spec[i]); in bpf_core_dump_spec()
5648 if (spec->bit_offset % 8) in bpf_core_dump_spec()
5650 spec->bit_offset / 8, spec->bit_offset % 8); in bpf_core_dump_spec()
5652 libbpf_print(level, " @ offset %u)", spec->bit_offset / 8); in bpf_core_dump_spec()
5673 * CO-RE relocate single instruction.
5686 * N.B. Struct "flavors" could be generated by bpftool's BTF-to-C
5698 * high-level spec accessors, meaning that all named fields should match,
5704 * ambiguity, CO-RE relocation will fail. This is necessary to accomodate
5714 * efficient memory-wise and not significantly worse (if not better)
5715 * CPU-wise compared to prebuilding a map from all local type names to
5730 const void *type_key = u32_as_hash_key(relo->type_id); in bpf_core_apply_relo()
5739 local_id = relo->type_id; in bpf_core_apply_relo()
5742 return -EINVAL; in bpf_core_apply_relo()
5744 local_name = btf__name_by_offset(local_btf, local_type->name_off); in bpf_core_apply_relo()
5746 return -EINVAL; in bpf_core_apply_relo()
5748 spec_str = btf__name_by_offset(local_btf, relo->access_str_off); in bpf_core_apply_relo()
5750 return -EINVAL; in bpf_core_apply_relo()
5752 err = bpf_core_parse_spec(local_btf, local_id, spec_str, relo->kind, &local_spec); in bpf_core_apply_relo()
5755 prog->name, relo_idx, local_id, btf_kind_str(local_type), in bpf_core_apply_relo()
5758 return -EINVAL; in bpf_core_apply_relo()
5761 pr_debug("prog '%s': relo #%d: kind <%s> (%d), spec is ", prog->name, in bpf_core_apply_relo()
5762 relo_idx, core_relo_kind_str(relo->kind), relo->kind); in bpf_core_apply_relo()
5767 if (relo->kind == BPF_TYPE_ID_LOCAL) { in bpf_core_apply_relo()
5778 prog->name, relo_idx, core_relo_kind_str(relo->kind), relo->kind); in bpf_core_apply_relo()
5779 return -EOPNOTSUPP; in bpf_core_apply_relo()
5786 prog->name, relo_idx, local_id, btf_kind_str(local_type), in bpf_core_apply_relo()
5797 for (i = 0, j = 0; i < cand_ids->len; i++) { in bpf_core_apply_relo()
5798 cand_id = cand_ids->data[i]; in bpf_core_apply_relo()
5802 prog->name, relo_idx, i); in bpf_core_apply_relo()
5808 pr_debug("prog '%s': relo #%d: %s candidate #%d ", prog->name, in bpf_core_apply_relo()
5809 relo_idx, err == 0 ? "non-matching" : "matching", i); in bpf_core_apply_relo()
5828 prog->name, relo_idx, cand_spec.bit_offset, in bpf_core_apply_relo()
5830 return -EINVAL; in bpf_core_apply_relo()
5837 prog->name, relo_idx, in bpf_core_apply_relo()
5840 return -EINVAL; in bpf_core_apply_relo()
5843 cand_ids->data[j++] = cand_spec.root_type_id; in bpf_core_apply_relo()
5855 cand_ids->len = j; in bpf_core_apply_relo()
5870 prog->name, relo_idx); in bpf_core_apply_relo()
5883 prog->name, relo_idx, relo->insn_off, err); in bpf_core_apply_relo()
5884 return -EINVAL; in bpf_core_apply_relo()
5903 if (obj->btf_ext->core_relo_info.len == 0) in bpf_object__relocate_core()
5909 targ_btf = obj->btf_vmlinux; in bpf_object__relocate_core()
5921 seg = &obj->btf_ext->core_relo_info; in bpf_object__relocate_core()
5923 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off); in bpf_object__relocate_core()
5925 err = -EINVAL; in bpf_object__relocate_core()
5931 * prog->sec_idx to do a proper search by section index and in bpf_object__relocate_core()
5935 for (i = 0; i < obj->nr_programs; i++) { in bpf_object__relocate_core()
5936 if (strcmp(obj->programs[i].sec_name, sec_name) == 0) { in bpf_object__relocate_core()
5937 prog = &obj->programs[i]; in bpf_object__relocate_core()
5943 return -ENOENT; in bpf_object__relocate_core()
5945 sec_idx = prog->sec_idx; in bpf_object__relocate_core()
5947 pr_debug("sec '%s': found %d CO-RE relocations\n", in bpf_object__relocate_core()
5948 sec_name, sec->num_info); in bpf_object__relocate_core()
5951 insn_idx = rec->insn_off / BPF_INSN_SZ; in bpf_object__relocate_core()
5958 * This is similar to what x86-64 linker does for relocations. in bpf_object__relocate_core()
5962 …pr_debug("sec '%s': skipping CO-RE relocation #%d for insn #%d belonging to eliminated weak subpro… in bpf_object__relocate_core()
5966 /* no need to apply CO-RE relocation if the program is in bpf_object__relocate_core()
5969 if (!prog->load) in bpf_object__relocate_core()
5972 err = bpf_core_apply_relo(prog, rec, i, obj->btf, in bpf_object__relocate_core()
5976 prog->name, i, err); in bpf_object__relocate_core()
5983 /* obj->btf_vmlinux is freed at the end of object load phase */ in bpf_object__relocate_core()
5984 if (targ_btf != obj->btf_vmlinux) in bpf_object__relocate_core()
5988 bpf_core_free_cands(entry->value); in bpf_object__relocate_core()
5996 * - map references;
5997 * - global variable references;
5998 * - extern references.
6005 for (i = 0; i < prog->nr_reloc; i++) { in bpf_object__relocate_data()
6006 struct reloc_desc *relo = &prog->reloc_desc[i]; in bpf_object__relocate_data()
6007 struct bpf_insn *insn = &prog->insns[relo->insn_idx]; in bpf_object__relocate_data()
6010 switch (relo->type) { in bpf_object__relocate_data()
6013 insn[0].imm = obj->maps[relo->map_idx].fd; in bpf_object__relocate_data()
6014 relo->processed = true; in bpf_object__relocate_data()
6018 insn[1].imm = insn[0].imm + relo->sym_off; in bpf_object__relocate_data()
6019 insn[0].imm = obj->maps[relo->map_idx].fd; in bpf_object__relocate_data()
6020 relo->processed = true; in bpf_object__relocate_data()
6023 ext = &obj->externs[relo->sym_off]; in bpf_object__relocate_data()
6024 if (ext->type == EXT_KCFG) { in bpf_object__relocate_data()
6026 insn[0].imm = obj->maps[obj->kconfig_map_idx].fd; in bpf_object__relocate_data()
6027 insn[1].imm = ext->kcfg.data_off; in bpf_object__relocate_data()
6029 if (ext->ksym.type_id) { /* typed ksyms */ in bpf_object__relocate_data()
6031 insn[0].imm = ext->ksym.vmlinux_btf_id; in bpf_object__relocate_data()
6033 insn[0].imm = (__u32)ext->ksym.addr; in bpf_object__relocate_data()
6034 insn[1].imm = ext->ksym.addr >> 32; in bpf_object__relocate_data()
6037 relo->processed = true; in bpf_object__relocate_data()
6044 prog->name, i, relo->type); in bpf_object__relocate_data()
6045 return -EINVAL; in bpf_object__relocate_data()
6066 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off); in adjust_prog_btf_ext_info()
6068 return -EINVAL; in adjust_prog_btf_ext_info()
6069 if (strcmp(sec_name, prog->sec_name) != 0) in adjust_prog_btf_ext_info()
6075 if (insn_off < prog->sec_insn_off) in adjust_prog_btf_ext_info()
6077 if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt) in adjust_prog_btf_ext_info()
6082 copy_end = rec + ext_info->rec_size; in adjust_prog_btf_ext_info()
6086 return -ENOENT; in adjust_prog_btf_ext_info()
6088 /* append func/line info of a given (sub-)program to the main in adjust_prog_btf_ext_info()
6091 old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size; in adjust_prog_btf_ext_info()
6092 new_sz = old_sz + (copy_end - copy_start); in adjust_prog_btf_ext_info()
6095 return -ENOMEM; in adjust_prog_btf_ext_info()
6097 *prog_rec_cnt = new_sz / ext_info->rec_size; in adjust_prog_btf_ext_info()
6098 memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start); in adjust_prog_btf_ext_info()
6100 /* Kernel instruction offsets are in units of 8-byte in adjust_prog_btf_ext_info()
6106 off_adj = prog->sub_insn_off - prog->sec_insn_off; in adjust_prog_btf_ext_info()
6109 for (; rec < rec_end; rec += ext_info->rec_size) { in adjust_prog_btf_ext_info()
6114 *prog_rec_sz = ext_info->rec_size; in adjust_prog_btf_ext_info()
6118 return -ENOENT; in adjust_prog_btf_ext_info()
6131 if (!obj->btf_ext || !kernel_supports(FEAT_BTF_FUNC)) in reloc_prog_func_and_line_info()
6137 if (main_prog != prog && !main_prog->func_info) in reloc_prog_func_and_line_info()
6140 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info, in reloc_prog_func_and_line_info()
6141 &main_prog->func_info, in reloc_prog_func_and_line_info()
6142 &main_prog->func_info_cnt, in reloc_prog_func_and_line_info()
6143 &main_prog->func_info_rec_size); in reloc_prog_func_and_line_info()
6145 if (err != -ENOENT) { in reloc_prog_func_and_line_info()
6147 prog->name, err); in reloc_prog_func_and_line_info()
6150 if (main_prog->func_info) { in reloc_prog_func_and_line_info()
6155 pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name); in reloc_prog_func_and_line_info()
6160 prog->name); in reloc_prog_func_and_line_info()
6165 if (main_prog != prog && !main_prog->line_info) in reloc_prog_func_and_line_info()
6168 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info, in reloc_prog_func_and_line_info()
6169 &main_prog->line_info, in reloc_prog_func_and_line_info()
6170 &main_prog->line_info_cnt, in reloc_prog_func_and_line_info()
6171 &main_prog->line_info_rec_size); in reloc_prog_func_and_line_info()
6173 if (err != -ENOENT) { in reloc_prog_func_and_line_info()
6175 prog->name, err); in reloc_prog_func_and_line_info()
6178 if (main_prog->line_info) { in reloc_prog_func_and_line_info()
6183 pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name); in reloc_prog_func_and_line_info()
6188 prog->name); in reloc_prog_func_and_line_info()
6198 if (insn_idx == relo->insn_idx) in cmp_relo_by_insn_idx()
6200 return insn_idx < relo->insn_idx ? -1 : 1; in cmp_relo_by_insn_idx()
6205 return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc, in find_prog_insn_relo()
6206 sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx); in find_prog_insn_relo()
6223 for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) { in bpf_object__reloc_code()
6224 insn = &main_prog->insns[prog->sub_insn_off + insn_idx]; in bpf_object__reloc_code()
6229 if (relo && relo->type != RELO_CALL) { in bpf_object__reloc_code()
6231 prog->name, insn_idx, relo->type); in bpf_object__reloc_code()
6232 return -LIBBPF_ERRNO__RELOC; in bpf_object__reloc_code()
6235 /* sub-program instruction index is a combination of in bpf_object__reloc_code()
6238 * call always has imm = -1, but for static functions in bpf_object__reloc_code()
6239 * relocation is against STT_SECTION and insn->imm in bpf_object__reloc_code()
6242 sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1; in bpf_object__reloc_code()
6247 * offset necessary, insns->imm is relative to in bpf_object__reloc_code()
6250 sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1; in bpf_object__reloc_code()
6253 /* we enforce that sub-programs should be in .text section */ in bpf_object__reloc_code()
6254 subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx); in bpf_object__reloc_code()
6256 pr_warn("prog '%s': no .text section found yet sub-program call exists\n", in bpf_object__reloc_code()
6257 prog->name); in bpf_object__reloc_code()
6258 return -LIBBPF_ERRNO__RELOC; in bpf_object__reloc_code()
6264 * - append it at the end of main program's instructions blog; in bpf_object__reloc_code()
6265 * - process is recursively, while current program is put on hold; in bpf_object__reloc_code()
6266 * - if that subprogram calls some other not yet processes in bpf_object__reloc_code()
6271 if (subprog->sub_insn_off == 0) { in bpf_object__reloc_code()
6272 subprog->sub_insn_off = main_prog->insns_cnt; in bpf_object__reloc_code()
6274 new_cnt = main_prog->insns_cnt + subprog->insns_cnt; in bpf_object__reloc_code()
6275 insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns)); in bpf_object__reloc_code()
6277 pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name); in bpf_object__reloc_code()
6278 return -ENOMEM; in bpf_object__reloc_code()
6280 main_prog->insns = insns; in bpf_object__reloc_code()
6281 main_prog->insns_cnt = new_cnt; in bpf_object__reloc_code()
6283 memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns, in bpf_object__reloc_code()
6284 subprog->insns_cnt * sizeof(*insns)); in bpf_object__reloc_code()
6286 pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n", in bpf_object__reloc_code()
6287 main_prog->name, subprog->insns_cnt, subprog->name); in bpf_object__reloc_code()
6294 /* main_prog->insns memory could have been re-allocated, so in bpf_object__reloc_code()
6297 insn = &main_prog->insns[prog->sub_insn_off + insn_idx]; in bpf_object__reloc_code()
6303 insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1; in bpf_object__reloc_code()
6306 relo->processed = true; in bpf_object__reloc_code()
6309 prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off); in bpf_object__reloc_code()
6316 * Relocate sub-program calls.
6318 * Algorithm operates as follows. Each entry-point BPF program (referred to as
6319 * main prog) is processed separately. For each subprog (non-entry functions,
6328 * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off
6344 * subprog->sub_insn_off as zero at all times and won't be appended to current
6353 * +--------+ +-------+
6355 * +--+---+ +--+-+-+ +---+--+
6357 * +--+---+ +------+ +---+--+
6360 * +---+-------+ +------+----+
6362 * +-----------+ +-----------+
6367 * +-----------+------+
6369 * +-----------+------+
6374 * +-----------+------+------+
6376 * +-----------+------+------+
6385 * +-----------+------+
6387 * +-----------+------+
6390 * +-----------+------+------+
6392 * +-----------+------+------+
6405 for (i = 0; i < obj->nr_programs; i++) { in bpf_object__relocate_calls()
6406 subprog = &obj->programs[i]; in bpf_object__relocate_calls()
6410 subprog->sub_insn_off = 0; in bpf_object__relocate_calls()
6411 for (j = 0; j < subprog->nr_reloc; j++) in bpf_object__relocate_calls()
6412 if (subprog->reloc_desc[j].type == RELO_CALL) in bpf_object__relocate_calls()
6413 subprog->reloc_desc[j].processed = false; in bpf_object__relocate_calls()
6431 if (obj->btf_ext) { in bpf_object__relocate()
6434 pr_warn("failed to perform CO-RE relocations: %d\n", in bpf_object__relocate()
6439 /* relocate data references first for all programs and sub-programs, in bpf_object__relocate()
6441 * subprogram processing won't need to re-calculate any of them in bpf_object__relocate()
6443 for (i = 0; i < obj->nr_programs; i++) { in bpf_object__relocate()
6444 prog = &obj->programs[i]; in bpf_object__relocate()
6448 prog->name, err); in bpf_object__relocate()
6457 for (i = 0; i < obj->nr_programs; i++) { in bpf_object__relocate()
6458 prog = &obj->programs[i]; in bpf_object__relocate()
6459 /* sub-program's sub-calls are relocated within the context of in bpf_object__relocate()
6468 prog->name, err); in bpf_object__relocate()
6473 for (i = 0; i < obj->nr_programs; i++) { in bpf_object__relocate()
6474 prog = &obj->programs[i]; in bpf_object__relocate()
6475 zfree(&prog->reloc_desc); in bpf_object__relocate()
6476 prog->nr_reloc = 0; in bpf_object__relocate()
6491 struct bpf_map *map = NULL, *targ_map; in bpf_object__collect_map_relos() local
6500 if (!obj->efile.btf_maps_sec_btf_id || !obj->btf) in bpf_object__collect_map_relos()
6501 return -EINVAL; in bpf_object__collect_map_relos()
6502 sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id); in bpf_object__collect_map_relos()
6504 return -EINVAL; in bpf_object__collect_map_relos()
6506 symbols = obj->efile.symbols; in bpf_object__collect_map_relos()
6507 nrels = shdr->sh_size / shdr->sh_entsize; in bpf_object__collect_map_relos()
6511 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_map_relos()
6516 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_map_relos()
6519 if (sym.st_shndx != obj->efile.btf_maps_shndx) { in bpf_object__collect_map_relos()
6520 pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n", in bpf_object__collect_map_relos()
6522 return -LIBBPF_ERRNO__RELOC; in bpf_object__collect_map_relos()
6529 for (j = 0; j < obj->nr_maps; j++) { in bpf_object__collect_map_relos()
6530 map = &obj->maps[j]; in bpf_object__collect_map_relos()
6531 if (map->sec_idx != obj->efile.btf_maps_shndx) in bpf_object__collect_map_relos()
6534 vi = btf_var_secinfos(sec) + map->btf_var_idx; in bpf_object__collect_map_relos()
6535 if (vi->offset <= rel.r_offset && in bpf_object__collect_map_relos()
6536 rel.r_offset + bpf_ptr_sz <= vi->offset + vi->size) in bpf_object__collect_map_relos()
6539 if (j == obj->nr_maps) { in bpf_object__collect_map_relos()
6540 pr_warn(".maps relo #%d: cannot find map '%s' at rel.r_offset %zu\n", in bpf_object__collect_map_relos()
6542 return -EINVAL; in bpf_object__collect_map_relos()
6545 if (!bpf_map_type__is_map_in_map(map->def.type)) in bpf_object__collect_map_relos()
6546 return -EINVAL; in bpf_object__collect_map_relos()
6547 if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS && in bpf_object__collect_map_relos()
6548 map->def.key_size != sizeof(int)) { in bpf_object__collect_map_relos()
6549 pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n", in bpf_object__collect_map_relos()
6550 i, map->name, sizeof(int)); in bpf_object__collect_map_relos()
6551 return -EINVAL; in bpf_object__collect_map_relos()
6556 return -ESRCH; in bpf_object__collect_map_relos()
6558 var = btf__type_by_id(obj->btf, vi->type); in bpf_object__collect_map_relos()
6559 def = skip_mods_and_typedefs(obj->btf, var->type, NULL); in bpf_object__collect_map_relos()
6561 return -EINVAL; in bpf_object__collect_map_relos()
6562 member = btf_members(def) + btf_vlen(def) - 1; in bpf_object__collect_map_relos()
6563 mname = btf__name_by_offset(obj->btf, member->name_off); in bpf_object__collect_map_relos()
6565 return -EINVAL; in bpf_object__collect_map_relos()
6567 moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8; in bpf_object__collect_map_relos()
6568 if (rel.r_offset - vi->offset < moff) in bpf_object__collect_map_relos()
6569 return -EINVAL; in bpf_object__collect_map_relos()
6571 moff = rel.r_offset - vi->offset - moff; in bpf_object__collect_map_relos()
6576 return -EINVAL; in bpf_object__collect_map_relos()
6578 if (moff >= map->init_slots_sz) { in bpf_object__collect_map_relos()
6580 tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz); in bpf_object__collect_map_relos()
6582 return -ENOMEM; in bpf_object__collect_map_relos()
6583 map->init_slots = tmp; in bpf_object__collect_map_relos()
6584 memset(map->init_slots + map->init_slots_sz, 0, in bpf_object__collect_map_relos()
6585 (new_sz - map->init_slots_sz) * host_ptr_sz); in bpf_object__collect_map_relos()
6586 map->init_slots_sz = new_sz; in bpf_object__collect_map_relos()
6588 map->init_slots[moff] = targ_map; in bpf_object__collect_map_relos()
6590 pr_debug(".maps relo #%d: map '%s' slot [%d] points to map '%s'\n", in bpf_object__collect_map_relos()
6591 i, map->name, moff, name); in bpf_object__collect_map_relos()
6602 if (a->insn_idx != b->insn_idx) in cmp_relocs()
6603 return a->insn_idx < b->insn_idx ? -1 : 1; in cmp_relocs()
6606 if (a->type != b->type) in cmp_relocs()
6607 return a->type < b->type ? -1 : 1; in cmp_relocs()
6616 for (i = 0; i < obj->efile.nr_reloc_sects; i++) { in bpf_object__collect_relos()
6617 GElf_Shdr *shdr = &obj->efile.reloc_sects[i].shdr; in bpf_object__collect_relos()
6618 Elf_Data *data = obj->efile.reloc_sects[i].data; in bpf_object__collect_relos()
6619 int idx = shdr->sh_info; in bpf_object__collect_relos()
6621 if (shdr->sh_type != SHT_REL) { in bpf_object__collect_relos()
6623 return -LIBBPF_ERRNO__INTERNAL; in bpf_object__collect_relos()
6626 if (idx == obj->efile.st_ops_shndx) in bpf_object__collect_relos()
6628 else if (idx == obj->efile.btf_maps_shndx) in bpf_object__collect_relos()
6636 for (i = 0; i < obj->nr_programs; i++) { in bpf_object__collect_relos()
6637 struct bpf_program *p = &obj->programs[i]; in bpf_object__collect_relos()
6639 if (!p->nr_reloc) in bpf_object__collect_relos()
6642 qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs); in bpf_object__collect_relos()
6649 if (BPF_CLASS(insn->code) == BPF_JMP && in insn_is_helper_call()
6650 BPF_OP(insn->code) == BPF_CALL && in insn_is_helper_call()
6651 BPF_SRC(insn->code) == BPF_K && in insn_is_helper_call()
6652 insn->src_reg == 0 && in insn_is_helper_call()
6653 insn->dst_reg == 0) { in insn_is_helper_call()
6654 *func_id = insn->imm; in insn_is_helper_call()
6662 struct bpf_insn *insn = prog->insns; in bpf_object__sanitize_prog()
6666 for (i = 0; i < prog->insns_cnt; i++, insn++) { in bpf_object__sanitize_prog()
6678 insn->imm = BPF_FUNC_probe_read; in bpf_object__sanitize_prog()
6683 insn->imm = BPF_FUNC_probe_read_str; in bpf_object__sanitize_prog()
6703 return -EINVAL; in load_program()
6706 load_attr.prog_type = prog->type; in load_program()
6708 if (!kernel_supports(FEAT_EXP_ATTACH_TYPE) && prog->sec_def && in load_program()
6709 prog->sec_def->is_exp_attach_type_optional) in load_program()
6712 load_attr.expected_attach_type = prog->expected_attach_type; in load_program()
6714 load_attr.name = prog->name; in load_program()
6718 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS || in load_program()
6719 prog->type == BPF_PROG_TYPE_LSM) { in load_program()
6720 load_attr.attach_btf_id = prog->attach_btf_id; in load_program()
6721 } else if (prog->type == BPF_PROG_TYPE_TRACING || in load_program()
6722 prog->type == BPF_PROG_TYPE_EXT) { in load_program()
6723 load_attr.attach_prog_fd = prog->attach_prog_fd; in load_program()
6724 load_attr.attach_btf_id = prog->attach_btf_id; in load_program()
6727 load_attr.prog_ifindex = prog->prog_ifindex; in load_program()
6730 btf_fd = bpf_object__btf_fd(prog->obj); in load_program()
6733 load_attr.func_info = prog->func_info; in load_program()
6734 load_attr.func_info_rec_size = prog->func_info_rec_size; in load_program()
6735 load_attr.func_info_cnt = prog->func_info_cnt; in load_program()
6736 load_attr.line_info = prog->line_info; in load_program()
6737 load_attr.line_info_rec_size = prog->line_info_rec_size; in load_program()
6738 load_attr.line_info_cnt = prog->line_info_cnt; in load_program()
6740 load_attr.log_level = prog->log_level; in load_program()
6741 load_attr.prog_flags = prog->prog_flags; in load_program()
6747 return -ENOMEM; in load_program()
6758 if (prog->obj->rodata_map_idx >= 0 && in load_program()
6761 &prog->obj->maps[prog->obj->rodata_map_idx]; in load_program()
6765 pr_warn("prog '%s': failed to bind .rodata map: %s\n", in load_program()
6766 prog->name, cp); in load_program()
6783 ret = errno ? -errno : -LIBBPF_ERRNO__LOAD; in load_program()
6789 ret = -LIBBPF_ERRNO__VERIFY; in load_program()
6790 pr_warn("-- BEGIN DUMP LOG ---\n"); in load_program()
6792 pr_warn("-- END LOG --\n"); in load_program()
6796 ret = -LIBBPF_ERRNO__PROG2BIG; in load_program()
6806 ret = -LIBBPF_ERRNO__PROGTYPE; in load_program()
6822 if (prog->obj->loaded) { in bpf_program__load()
6823 pr_warn("prog '%s': can't load after object was loaded\n", prog->name); in bpf_program__load()
6824 return -EINVAL; in bpf_program__load()
6827 if ((prog->type == BPF_PROG_TYPE_TRACING || in bpf_program__load()
6828 prog->type == BPF_PROG_TYPE_LSM || in bpf_program__load()
6829 prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) { in bpf_program__load()
6833 prog->attach_btf_id = btf_id; in bpf_program__load()
6836 if (prog->instances.nr < 0 || !prog->instances.fds) { in bpf_program__load()
6837 if (prog->preprocessor) { in bpf_program__load()
6839 prog->name); in bpf_program__load()
6840 return -LIBBPF_ERRNO__INTERNAL; in bpf_program__load()
6843 prog->instances.fds = malloc(sizeof(int)); in bpf_program__load()
6844 if (!prog->instances.fds) { in bpf_program__load()
6846 return -ENOMEM; in bpf_program__load()
6848 prog->instances.nr = 1; in bpf_program__load()
6849 prog->instances.fds[0] = -1; in bpf_program__load()
6852 if (!prog->preprocessor) { in bpf_program__load()
6853 if (prog->instances.nr != 1) { in bpf_program__load()
6855 prog->name, prog->instances.nr); in bpf_program__load()
6857 err = load_program(prog, prog->insns, prog->insns_cnt, in bpf_program__load()
6860 prog->instances.fds[0] = fd; in bpf_program__load()
6864 for (i = 0; i < prog->instances.nr; i++) { in bpf_program__load()
6866 bpf_program_prep_t preprocessor = prog->preprocessor; in bpf_program__load()
6869 err = preprocessor(prog, i, prog->insns, in bpf_program__load()
6870 prog->insns_cnt, &result); in bpf_program__load()
6873 i, prog->name); in bpf_program__load()
6879 i, prog->name); in bpf_program__load()
6880 prog->instances.fds[i] = -1; in bpf_program__load()
6882 *result.pfd = -1; in bpf_program__load()
6890 i, prog->name); in bpf_program__load()
6896 prog->instances.fds[i] = fd; in bpf_program__load()
6900 pr_warn("failed to load program '%s'\n", prog->name); in bpf_program__load()
6901 zfree(&prog->insns); in bpf_program__load()
6902 prog->insns_cnt = 0; in bpf_program__load()
6913 for (i = 0; i < obj->nr_programs; i++) { in bpf_object__load_progs()
6914 prog = &obj->programs[i]; in bpf_object__load_progs()
6920 for (i = 0; i < obj->nr_programs; i++) { in bpf_object__load_progs()
6921 prog = &obj->programs[i]; in bpf_object__load_progs()
6924 if (!prog->load) { in bpf_object__load_progs()
6925 pr_debug("prog '%s': skipped loading\n", prog->name); in bpf_object__load_progs()
6928 prog->log_level |= log_level; in bpf_object__load_progs()
6929 err = bpf_program__load(prog, obj->license, obj->kern_version); in bpf_object__load_progs()
6939 __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, in __bpf_object__open() argument
6950 path ? : "(mem buf)"); in __bpf_object__open()
6951 return ERR_PTR(-LIBBPF_ERRNO__LIBELF); in __bpf_object__open()
6955 return ERR_PTR(-EINVAL); in __bpf_object__open()
6960 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx", in __bpf_object__open()
6965 path = obj_name; in __bpf_object__open()
6969 obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name); in __bpf_object__open()
6975 obj->kconfig = strdup(kconfig); in __bpf_object__open()
6976 if (!obj->kconfig) { in __bpf_object__open()
6977 err = -ENOMEM; in __bpf_object__open()
6994 prog->sec_def = find_sec_def(prog->sec_name); in __bpf_object__open()
6995 if (!prog->sec_def) in __bpf_object__open()
6999 if (prog->sec_def->is_sleepable) in __bpf_object__open()
7000 prog->prog_flags |= BPF_F_SLEEPABLE; in __bpf_object__open()
7001 bpf_program__set_type(prog, prog->sec_def->prog_type); in __bpf_object__open()
7003 prog->sec_def->expected_attach_type); in __bpf_object__open()
7005 if (prog->sec_def->prog_type == BPF_PROG_TYPE_TRACING || in __bpf_object__open()
7006 prog->sec_def->prog_type == BPF_PROG_TYPE_EXT) in __bpf_object__open()
7007 prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0); in __bpf_object__open()
7024 if (!attr->file) in __bpf_object__open_xattr()
7027 pr_debug("loading %s\n", attr->file); in __bpf_object__open_xattr()
7028 return __bpf_object__open(attr->file, NULL, 0, &opts); in __bpf_object__open_xattr()
7036 struct bpf_object *bpf_object__open(const char *path) in bpf_object__open() argument
7039 .file = path, in bpf_object__open()
7047 bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts) in bpf_object__open_file() argument
7049 if (!path) in bpf_object__open_file()
7050 return ERR_PTR(-EINVAL); in bpf_object__open_file()
7052 pr_debug("loading %s\n", path); in bpf_object__open_file()
7054 return __bpf_object__open(path, NULL, 0, opts); in bpf_object__open_file()
7062 return ERR_PTR(-EINVAL); in bpf_object__open_mem()
7073 /* wrong default, but backwards-compatible */ in bpf_object__open_buffer()
7077 /* returning NULL is wrong, but backwards-compatible */ in bpf_object__open_buffer()
7089 return -EINVAL; in bpf_object__unload()
7091 for (i = 0; i < obj->nr_maps; i++) { in bpf_object__unload()
7092 zclose(obj->maps[i].fd); in bpf_object__unload()
7093 if (obj->maps[i].st_ops) in bpf_object__unload()
7094 zfree(&obj->maps[i].st_ops->kern_vdata); in bpf_object__unload()
7097 for (i = 0; i < obj->nr_programs; i++) in bpf_object__unload()
7098 bpf_program__unload(&obj->programs[i]); in bpf_object__unload()
7112 return -ENOTSUP; in bpf_object__sanitize_maps()
7115 m->def.map_flags ^= BPF_F_MMAPABLE; in bpf_object__sanitize_maps()
7131 err = -errno; in bpf_object__read_kallsyms_file()
7143 err = -EINVAL; in bpf_object__read_kallsyms_file()
7148 if (!ext || ext->type != EXT_KSYM) in bpf_object__read_kallsyms_file()
7151 if (ext->is_set && ext->ksym.addr != sym_addr) { in bpf_object__read_kallsyms_file()
7153 sym_name, ext->ksym.addr, sym_addr); in bpf_object__read_kallsyms_file()
7154 err = -EINVAL; in bpf_object__read_kallsyms_file()
7157 if (!ext->is_set) { in bpf_object__read_kallsyms_file()
7158 ext->is_set = true; in bpf_object__read_kallsyms_file()
7159 ext->ksym.addr = sym_addr; in bpf_object__read_kallsyms_file()
7174 for (i = 0; i < obj->nr_extern; i++) { in bpf_object__resolve_ksyms_btf_id()
7180 ext = &obj->externs[i]; in bpf_object__resolve_ksyms_btf_id()
7181 if (ext->type != EXT_KSYM || !ext->ksym.type_id) in bpf_object__resolve_ksyms_btf_id()
7184 id = btf__find_by_name_kind(obj->btf_vmlinux, ext->name, in bpf_object__resolve_ksyms_btf_id()
7188 ext->name); in bpf_object__resolve_ksyms_btf_id()
7189 return -ESRCH; in bpf_object__resolve_ksyms_btf_id()
7193 local_type_id = ext->ksym.type_id; in bpf_object__resolve_ksyms_btf_id()
7196 targ_var = btf__type_by_id(obj->btf_vmlinux, id); in bpf_object__resolve_ksyms_btf_id()
7197 targ_var_name = btf__name_by_offset(obj->btf_vmlinux, in bpf_object__resolve_ksyms_btf_id()
7198 targ_var->name_off); in bpf_object__resolve_ksyms_btf_id()
7199 targ_type = skip_mods_and_typedefs(obj->btf_vmlinux, in bpf_object__resolve_ksyms_btf_id()
7200 targ_var->type, in bpf_object__resolve_ksyms_btf_id()
7203 ret = bpf_core_types_are_compat(obj->btf, local_type_id, in bpf_object__resolve_ksyms_btf_id()
7204 obj->btf_vmlinux, targ_type_id); in bpf_object__resolve_ksyms_btf_id()
7209 local_type = btf__type_by_id(obj->btf, local_type_id); in bpf_object__resolve_ksyms_btf_id()
7210 local_name = btf__name_by_offset(obj->btf, in bpf_object__resolve_ksyms_btf_id()
7211 local_type->name_off); in bpf_object__resolve_ksyms_btf_id()
7212 targ_name = btf__name_by_offset(obj->btf_vmlinux, in bpf_object__resolve_ksyms_btf_id()
7213 targ_type->name_off); in bpf_object__resolve_ksyms_btf_id()
7216 ext->name, local_type_id, in bpf_object__resolve_ksyms_btf_id()
7219 return -EINVAL; in bpf_object__resolve_ksyms_btf_id()
7222 ext->is_set = true; in bpf_object__resolve_ksyms_btf_id()
7223 ext->ksym.vmlinux_btf_id = id; in bpf_object__resolve_ksyms_btf_id()
7225 ext->name, id, btf_kind_str(targ_var), targ_var_name); in bpf_object__resolve_ksyms_btf_id()
7239 if (obj->nr_extern == 0) in bpf_object__resolve_externs()
7242 if (obj->kconfig_map_idx >= 0) in bpf_object__resolve_externs()
7243 kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped; in bpf_object__resolve_externs()
7245 for (i = 0; i < obj->nr_extern; i++) { in bpf_object__resolve_externs()
7246 ext = &obj->externs[i]; in bpf_object__resolve_externs()
7248 if (ext->type == EXT_KCFG && in bpf_object__resolve_externs()
7249 strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) { in bpf_object__resolve_externs()
7250 void *ext_val = kcfg_data + ext->kcfg.data_off; in bpf_object__resolve_externs()
7255 return -EINVAL; in bpf_object__resolve_externs()
7260 pr_debug("extern (kcfg) %s=0x%x\n", ext->name, kver); in bpf_object__resolve_externs()
7261 } else if (ext->type == EXT_KCFG && in bpf_object__resolve_externs()
7262 strncmp(ext->name, "CONFIG_", 7) == 0) { in bpf_object__resolve_externs()
7264 } else if (ext->type == EXT_KSYM) { in bpf_object__resolve_externs()
7265 if (ext->ksym.type_id) in bpf_object__resolve_externs()
7270 pr_warn("unrecognized extern '%s'\n", ext->name); in bpf_object__resolve_externs()
7271 return -EINVAL; in bpf_object__resolve_externs()
7277 return -EINVAL; in bpf_object__resolve_externs()
7279 for (i = 0; i < obj->nr_extern; i++) { in bpf_object__resolve_externs()
7280 ext = &obj->externs[i]; in bpf_object__resolve_externs()
7281 if (ext->type == EXT_KCFG && !ext->is_set) { in bpf_object__resolve_externs()
7290 return -EINVAL; in bpf_object__resolve_externs()
7295 return -EINVAL; in bpf_object__resolve_externs()
7300 return -EINVAL; in bpf_object__resolve_externs()
7302 for (i = 0; i < obj->nr_extern; i++) { in bpf_object__resolve_externs()
7303 ext = &obj->externs[i]; in bpf_object__resolve_externs()
7305 if (!ext->is_set && !ext->is_weak) { in bpf_object__resolve_externs()
7306 pr_warn("extern %s (strong) not resolved\n", ext->name); in bpf_object__resolve_externs()
7307 return -ESRCH; in bpf_object__resolve_externs()
7308 } else if (!ext->is_set) { in bpf_object__resolve_externs()
7310 ext->name); in bpf_object__resolve_externs()
7323 return -EINVAL; in bpf_object__load_xattr()
7324 obj = attr->obj; in bpf_object__load_xattr()
7326 return -EINVAL; in bpf_object__load_xattr()
7328 if (obj->loaded) { in bpf_object__load_xattr()
7329 pr_warn("object '%s': load can't be attempted twice\n", obj->name); in bpf_object__load_xattr()
7330 return -EINVAL; in bpf_object__load_xattr()
7335 err = err ? : bpf_object__resolve_externs(obj, obj->kconfig); in bpf_object__load_xattr()
7340 err = err ? : bpf_object__relocate(obj, attr->target_btf_path); in bpf_object__load_xattr()
7341 err = err ? : bpf_object__load_progs(obj, attr->log_level); in bpf_object__load_xattr()
7343 btf__free(obj->btf_vmlinux); in bpf_object__load_xattr()
7344 obj->btf_vmlinux = NULL; in bpf_object__load_xattr()
7346 obj->loaded = true; /* doesn't matter if successfully or not */ in bpf_object__load_xattr()
7353 /* unpin any maps that were auto-pinned during load */ in bpf_object__load_xattr()
7354 for (i = 0; i < obj->nr_maps; i++) in bpf_object__load_xattr()
7355 if (obj->maps[i].pinned && !obj->maps[i].reused) in bpf_object__load_xattr()
7356 bpf_map__unpin(&obj->maps[i], NULL); in bpf_object__load_xattr()
7359 pr_warn("failed to load object '%s'\n", obj->path); in bpf_object__load_xattr()
7372 static int make_parent_dir(const char *path) in make_parent_dir() argument
7378 dname = strdup(path); in make_parent_dir()
7380 return -ENOMEM; in make_parent_dir()
7384 err = -errno; in make_parent_dir()
7388 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); in make_parent_dir()
7389 pr_warn("failed to mkdir %s: %s\n", path, cp); in make_parent_dir()
7394 static int check_path(const char *path) in check_path() argument
7401 if (path == NULL) in check_path()
7402 return -EINVAL; in check_path()
7404 dname = strdup(path); in check_path()
7406 return -ENOMEM; in check_path()
7412 err = -errno; in check_path()
7417 pr_warn("specified path %s is not on BPF FS\n", path); in check_path()
7418 err = -EINVAL; in check_path()
7424 int bpf_program__pin_instance(struct bpf_program *prog, const char *path, in bpf_program__pin_instance() argument
7430 err = make_parent_dir(path); in bpf_program__pin_instance()
7434 err = check_path(path); in bpf_program__pin_instance()
7440 return -EINVAL; in bpf_program__pin_instance()
7443 if (instance < 0 || instance >= prog->instances.nr) { in bpf_program__pin_instance()
7445 instance, prog->name, prog->instances.nr); in bpf_program__pin_instance()
7446 return -EINVAL; in bpf_program__pin_instance()
7449 if (bpf_obj_pin(prog->instances.fds[instance], path)) { in bpf_program__pin_instance()
7450 err = -errno; in bpf_program__pin_instance()
7455 pr_debug("pinned program '%s'\n", path); in bpf_program__pin_instance()
7460 int bpf_program__unpin_instance(struct bpf_program *prog, const char *path, in bpf_program__unpin_instance() argument
7465 err = check_path(path); in bpf_program__unpin_instance()
7471 return -EINVAL; in bpf_program__unpin_instance()
7474 if (instance < 0 || instance >= prog->instances.nr) { in bpf_program__unpin_instance()
7476 instance, prog->name, prog->instances.nr); in bpf_program__unpin_instance()
7477 return -EINVAL; in bpf_program__unpin_instance()
7480 err = unlink(path); in bpf_program__unpin_instance()
7482 return -errno; in bpf_program__unpin_instance()
7483 pr_debug("unpinned program '%s'\n", path); in bpf_program__unpin_instance()
7488 int bpf_program__pin(struct bpf_program *prog, const char *path) in bpf_program__pin() argument
7492 err = make_parent_dir(path); in bpf_program__pin()
7496 err = check_path(path); in bpf_program__pin()
7502 return -EINVAL; in bpf_program__pin()
7505 if (prog->instances.nr <= 0) { in bpf_program__pin()
7506 pr_warn("no instances of prog %s to pin\n", prog->name); in bpf_program__pin()
7507 return -EINVAL; in bpf_program__pin()
7510 if (prog->instances.nr == 1) { in bpf_program__pin()
7512 return bpf_program__pin_instance(prog, path, 0); in bpf_program__pin()
7515 for (i = 0; i < prog->instances.nr; i++) { in bpf_program__pin()
7519 len = snprintf(buf, PATH_MAX, "%s/%d", path, i); in bpf_program__pin()
7521 err = -EINVAL; in bpf_program__pin()
7524 err = -ENAMETOOLONG; in bpf_program__pin()
7536 for (i = i - 1; i >= 0; i--) { in bpf_program__pin()
7540 len = snprintf(buf, PATH_MAX, "%s/%d", path, i); in bpf_program__pin()
7549 rmdir(path); in bpf_program__pin()
7554 int bpf_program__unpin(struct bpf_program *prog, const char *path) in bpf_program__unpin() argument
7558 err = check_path(path); in bpf_program__unpin()
7564 return -EINVAL; in bpf_program__unpin()
7567 if (prog->instances.nr <= 0) { in bpf_program__unpin()
7568 pr_warn("no instances of prog %s to pin\n", prog->name); in bpf_program__unpin()
7569 return -EINVAL; in bpf_program__unpin()
7572 if (prog->instances.nr == 1) { in bpf_program__unpin()
7574 return bpf_program__unpin_instance(prog, path, 0); in bpf_program__unpin()
7577 for (i = 0; i < prog->instances.nr; i++) { in bpf_program__unpin()
7581 len = snprintf(buf, PATH_MAX, "%s/%d", path, i); in bpf_program__unpin()
7583 return -EINVAL; in bpf_program__unpin()
7585 return -ENAMETOOLONG; in bpf_program__unpin()
7592 err = rmdir(path); in bpf_program__unpin()
7594 return -errno; in bpf_program__unpin()
7599 int bpf_map__pin(struct bpf_map *map, const char *path) in bpf_map__pin() argument
7604 if (map == NULL) { in bpf_map__pin()
7605 pr_warn("invalid map pointer\n"); in bpf_map__pin()
7606 return -EINVAL; in bpf_map__pin()
7609 if (map->pin_path) { in bpf_map__pin()
7610 if (path && strcmp(path, map->pin_path)) { in bpf_map__pin()
7611 pr_warn("map '%s' already has pin path '%s' different from '%s'\n", in bpf_map__pin()
7612 bpf_map__name(map), map->pin_path, path); in bpf_map__pin()
7613 return -EINVAL; in bpf_map__pin()
7614 } else if (map->pinned) { in bpf_map__pin()
7615 pr_debug("map '%s' already pinned at '%s'; not re-pinning\n", in bpf_map__pin()
7616 bpf_map__name(map), map->pin_path); in bpf_map__pin()
7620 if (!path) { in bpf_map__pin()
7621 pr_warn("missing a path to pin map '%s' at\n", in bpf_map__pin()
7622 bpf_map__name(map)); in bpf_map__pin()
7623 return -EINVAL; in bpf_map__pin()
7624 } else if (map->pinned) { in bpf_map__pin()
7625 pr_warn("map '%s' already pinned\n", bpf_map__name(map)); in bpf_map__pin()
7626 return -EEXIST; in bpf_map__pin()
7629 map->pin_path = strdup(path); in bpf_map__pin()
7630 if (!map->pin_path) { in bpf_map__pin()
7631 err = -errno; in bpf_map__pin()
7636 err = make_parent_dir(map->pin_path); in bpf_map__pin()
7640 err = check_path(map->pin_path); in bpf_map__pin()
7644 if (bpf_obj_pin(map->fd, map->pin_path)) { in bpf_map__pin()
7645 err = -errno; in bpf_map__pin()
7649 map->pinned = true; in bpf_map__pin()
7650 pr_debug("pinned map '%s'\n", map->pin_path); in bpf_map__pin()
7655 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); in bpf_map__pin()
7656 pr_warn("failed to pin map: %s\n", cp); in bpf_map__pin()
7660 int bpf_map__unpin(struct bpf_map *map, const char *path) in bpf_map__unpin() argument
7664 if (map == NULL) { in bpf_map__unpin()
7665 pr_warn("invalid map pointer\n"); in bpf_map__unpin()
7666 return -EINVAL; in bpf_map__unpin()
7669 if (map->pin_path) { in bpf_map__unpin()
7670 if (path && strcmp(path, map->pin_path)) { in bpf_map__unpin()
7671 pr_warn("map '%s' already has pin path '%s' different from '%s'\n", in bpf_map__unpin()
7672 bpf_map__name(map), map->pin_path, path); in bpf_map__unpin()
7673 return -EINVAL; in bpf_map__unpin()
7675 path = map->pin_path; in bpf_map__unpin()
7676 } else if (!path) { in bpf_map__unpin()
7677 pr_warn("no path to unpin map '%s' from\n", in bpf_map__unpin()
7678 bpf_map__name(map)); in bpf_map__unpin()
7679 return -EINVAL; in bpf_map__unpin()
7682 err = check_path(path); in bpf_map__unpin()
7686 err = unlink(path); in bpf_map__unpin()
7688 return -errno; in bpf_map__unpin()
7690 map->pinned = false; in bpf_map__unpin()
7691 pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path); in bpf_map__unpin()
7696 int bpf_map__set_pin_path(struct bpf_map *map, const char *path) in bpf_map__set_pin_path() argument
7700 if (path) { in bpf_map__set_pin_path()
7701 new = strdup(path); in bpf_map__set_pin_path()
7703 return -errno; in bpf_map__set_pin_path()
7706 free(map->pin_path); in bpf_map__set_pin_path()
7707 map->pin_path = new; in bpf_map__set_pin_path()
7711 const char *bpf_map__get_pin_path(const struct bpf_map *map) in bpf_map__get_pin_path() argument
7713 return map->pin_path; in bpf_map__get_pin_path()
7716 bool bpf_map__is_pinned(const struct bpf_map *map) in bpf_map__is_pinned() argument
7718 return map->pinned; in bpf_map__is_pinned()
7723 /* bpffs disallows periods in path names */ in sanitize_pin_path()
7731 int bpf_object__pin_maps(struct bpf_object *obj, const char *path) in bpf_object__pin_maps() argument
7733 struct bpf_map *map; in bpf_object__pin_maps() local
7737 return -ENOENT; in bpf_object__pin_maps()
7739 if (!obj->loaded) { in bpf_object__pin_maps()
7741 return -ENOENT; in bpf_object__pin_maps()
7744 bpf_object__for_each_map(map, obj) { in bpf_object__pin_maps()
7748 if (path) { in bpf_object__pin_maps()
7751 len = snprintf(buf, PATH_MAX, "%s/%s", path, in bpf_object__pin_maps()
7752 bpf_map__name(map)); in bpf_object__pin_maps()
7754 err = -EINVAL; in bpf_object__pin_maps()
7757 err = -ENAMETOOLONG; in bpf_object__pin_maps()
7762 } else if (!map->pin_path) { in bpf_object__pin_maps()
7766 err = bpf_map__pin(map, pin_path); in bpf_object__pin_maps()
7774 while ((map = bpf_map__prev(map, obj))) { in bpf_object__pin_maps()
7775 if (!map->pin_path) in bpf_object__pin_maps()
7778 bpf_map__unpin(map, NULL); in bpf_object__pin_maps()
7784 int bpf_object__unpin_maps(struct bpf_object *obj, const char *path) in bpf_object__unpin_maps() argument
7786 struct bpf_map *map; in bpf_object__unpin_maps() local
7790 return -ENOENT; in bpf_object__unpin_maps()
7792 bpf_object__for_each_map(map, obj) { in bpf_object__unpin_maps()
7796 if (path) { in bpf_object__unpin_maps()
7799 len = snprintf(buf, PATH_MAX, "%s/%s", path, in bpf_object__unpin_maps()
7800 bpf_map__name(map)); in bpf_object__unpin_maps()
7802 return -EINVAL; in bpf_object__unpin_maps()
7804 return -ENAMETOOLONG; in bpf_object__unpin_maps()
7807 } else if (!map->pin_path) { in bpf_object__unpin_maps()
7811 err = bpf_map__unpin(map, pin_path); in bpf_object__unpin_maps()
7819 int bpf_object__pin_programs(struct bpf_object *obj, const char *path) in bpf_object__pin_programs() argument
7825 return -ENOENT; in bpf_object__pin_programs()
7827 if (!obj->loaded) { in bpf_object__pin_programs()
7829 return -ENOENT; in bpf_object__pin_programs()
7836 len = snprintf(buf, PATH_MAX, "%s/%s", path, in bpf_object__pin_programs()
7837 prog->pin_name); in bpf_object__pin_programs()
7839 err = -EINVAL; in bpf_object__pin_programs()
7842 err = -ENAMETOOLONG; in bpf_object__pin_programs()
7858 len = snprintf(buf, PATH_MAX, "%s/%s", path, in bpf_object__pin_programs()
7859 prog->pin_name); in bpf_object__pin_programs()
7871 int bpf_object__unpin_programs(struct bpf_object *obj, const char *path) in bpf_object__unpin_programs() argument
7877 return -ENOENT; in bpf_object__unpin_programs()
7883 len = snprintf(buf, PATH_MAX, "%s/%s", path, in bpf_object__unpin_programs()
7884 prog->pin_name); in bpf_object__unpin_programs()
7886 return -EINVAL; in bpf_object__unpin_programs()
7888 return -ENAMETOOLONG; in bpf_object__unpin_programs()
7898 int bpf_object__pin(struct bpf_object *obj, const char *path) in bpf_object__pin() argument
7902 err = bpf_object__pin_maps(obj, path); in bpf_object__pin()
7906 err = bpf_object__pin_programs(obj, path); in bpf_object__pin()
7908 bpf_object__unpin_maps(obj, path); in bpf_object__pin()
7915 static void bpf_map__destroy(struct bpf_map *map) in bpf_map__destroy() argument
7917 if (map->clear_priv) in bpf_map__destroy()
7918 map->clear_priv(map, map->priv); in bpf_map__destroy()
7919 map->priv = NULL; in bpf_map__destroy()
7920 map->clear_priv = NULL; in bpf_map__destroy()
7922 if (map->inner_map) { in bpf_map__destroy()
7923 bpf_map__destroy(map->inner_map); in bpf_map__destroy()
7924 zfree(&map->inner_map); in bpf_map__destroy()
7927 zfree(&map->init_slots); in bpf_map__destroy()
7928 map->init_slots_sz = 0; in bpf_map__destroy()
7930 if (map->mmaped) { in bpf_map__destroy()
7931 munmap(map->mmaped, bpf_map_mmap_sz(map)); in bpf_map__destroy()
7932 map->mmaped = NULL; in bpf_map__destroy()
7935 if (map->st_ops) { in bpf_map__destroy()
7936 zfree(&map->st_ops->data); in bpf_map__destroy()
7937 zfree(&map->st_ops->progs); in bpf_map__destroy()
7938 zfree(&map->st_ops->kern_func_off); in bpf_map__destroy()
7939 zfree(&map->st_ops); in bpf_map__destroy()
7942 zfree(&map->name); in bpf_map__destroy()
7943 zfree(&map->pin_path); in bpf_map__destroy()
7945 if (map->fd >= 0) in bpf_map__destroy()
7946 zclose(map->fd); in bpf_map__destroy()
7956 if (obj->clear_priv) in bpf_object__close()
7957 obj->clear_priv(obj, obj->priv); in bpf_object__close()
7961 btf__free(obj->btf); in bpf_object__close()
7962 btf_ext__free(obj->btf_ext); in bpf_object__close()
7964 for (i = 0; i < obj->nr_maps; i++) in bpf_object__close()
7965 bpf_map__destroy(&obj->maps[i]); in bpf_object__close()
7967 zfree(&obj->kconfig); in bpf_object__close()
7968 zfree(&obj->externs); in bpf_object__close()
7969 obj->nr_extern = 0; in bpf_object__close()
7971 zfree(&obj->maps); in bpf_object__close()
7972 obj->nr_maps = 0; in bpf_object__close()
7974 if (obj->programs && obj->nr_programs) { in bpf_object__close()
7975 for (i = 0; i < obj->nr_programs; i++) in bpf_object__close()
7976 bpf_program__exit(&obj->programs[i]); in bpf_object__close()
7978 zfree(&obj->programs); in bpf_object__close()
7980 list_del(&obj->list); in bpf_object__close()
7997 if (&next->list == &bpf_objects_list) in bpf_object__next()
8005 return obj ? obj->name : ERR_PTR(-EINVAL); in bpf_object__name()
8010 return obj ? obj->kern_version : 0; in bpf_object__kversion()
8015 return obj ? obj->btf : NULL; in bpf_object__btf()
8020 return obj->btf ? btf__fd(obj->btf) : -1; in bpf_object__btf_fd()
8026 if (obj->priv && obj->clear_priv) in bpf_object__set_priv()
8027 obj->clear_priv(obj, obj->priv); in bpf_object__set_priv()
8029 obj->priv = priv; in bpf_object__set_priv()
8030 obj->clear_priv = clear_priv; in bpf_object__set_priv()
8036 return obj ? obj->priv : ERR_PTR(-EINVAL); in bpf_object__priv()
8043 size_t nr_programs = obj->nr_programs; in __bpf_program__iter()
8051 return forward ? &obj->programs[0] : in __bpf_program__iter()
8052 &obj->programs[nr_programs - 1]; in __bpf_program__iter()
8054 if (p->obj != obj) { in __bpf_program__iter()
8059 idx = (p - obj->programs) + (forward ? 1 : -1); in __bpf_program__iter()
8060 if (idx >= obj->nr_programs || idx < 0) in __bpf_program__iter()
8062 return &obj->programs[idx]; in __bpf_program__iter()
8092 if (prog->priv && prog->clear_priv) in bpf_program__set_priv()
8093 prog->clear_priv(prog, prog->priv); in bpf_program__set_priv()
8095 prog->priv = priv; in bpf_program__set_priv()
8096 prog->clear_priv = clear_priv; in bpf_program__set_priv()
8102 return prog ? prog->priv : ERR_PTR(-EINVAL); in bpf_program__priv()
8107 prog->prog_ifindex = ifindex; in bpf_program__set_ifindex()
8112 return prog->name; in bpf_program__name()
8117 return prog->sec_name; in bpf_program__section_name()
8124 title = prog->sec_name; in bpf_program__title()
8129 return ERR_PTR(-ENOMEM); in bpf_program__title()
8138 return prog->load; in bpf_program__autoload()
8143 if (prog->obj->loaded) in bpf_program__set_autoload()
8144 return -EINVAL; in bpf_program__set_autoload()
8146 prog->load = autoload; in bpf_program__set_autoload()
8157 return prog->insns_cnt * BPF_INSN_SZ; in bpf_program__size()
8166 return -EINVAL; in bpf_program__set_prep()
8168 if (prog->instances.nr > 0 || prog->instances.fds) { in bpf_program__set_prep()
8169 pr_warn("Can't set pre-processor after loading\n"); in bpf_program__set_prep()
8170 return -EINVAL; in bpf_program__set_prep()
8176 return -ENOMEM; in bpf_program__set_prep()
8179 /* fill all fd with -1 */ in bpf_program__set_prep()
8180 memset(instances_fds, -1, sizeof(int) * nr_instances); in bpf_program__set_prep()
8182 prog->instances.nr = nr_instances; in bpf_program__set_prep()
8183 prog->instances.fds = instances_fds; in bpf_program__set_prep()
8184 prog->preprocessor = prep; in bpf_program__set_prep()
8193 return -EINVAL; in bpf_program__nth_fd()
8195 if (n >= prog->instances.nr || n < 0) { in bpf_program__nth_fd()
8197 n, prog->name, prog->instances.nr); in bpf_program__nth_fd()
8198 return -EINVAL; in bpf_program__nth_fd()
8201 fd = prog->instances.fds[n]; in bpf_program__nth_fd()
8204 n, prog->name); in bpf_program__nth_fd()
8205 return -ENOENT; in bpf_program__nth_fd()
8213 return prog->type; in bpf_program__get_type()
8218 prog->type = type; in bpf_program__set_type()
8224 return prog ? (prog->type == type) : false; in bpf_program__is_type()
8231 return -EINVAL; \
8258 return prog->expected_attach_type; in bpf_program__get_expected_attach_type()
8264 prog->expected_attach_type = type; in bpf_program__set_expected_attach_type()
8271 .len = sizeof(string) - 1, \
8301 .len = sizeof(sec_pfx) - 1, \
8516 return -EINVAL; in libbpf_prog_type_by_name()
8520 *prog_type = sec_def->prog_type; in libbpf_prog_type_by_name()
8521 *expected_attach_type = sec_def->expected_attach_type; in libbpf_prog_type_by_name()
8532 return -ESRCH; in libbpf_prog_type_by_name()
8538 struct bpf_map *map; in find_struct_ops_map_by_offset() local
8541 for (i = 0; i < obj->nr_maps; i++) { in find_struct_ops_map_by_offset()
8542 map = &obj->maps[i]; in find_struct_ops_map_by_offset()
8543 if (!bpf_map__is_struct_ops(map)) in find_struct_ops_map_by_offset()
8545 if (map->sec_offset <= offset && in find_struct_ops_map_by_offset()
8546 offset - map->sec_offset < map->def.value_size) in find_struct_ops_map_by_offset()
8547 return map; in find_struct_ops_map_by_offset()
8553 /* Collect the reloc from ELF and populate the st_ops->progs[] */
8562 struct bpf_map *map; in bpf_object__collect_st_ops_relos() local
8571 symbols = obj->efile.symbols; in bpf_object__collect_st_ops_relos()
8572 btf = obj->btf; in bpf_object__collect_st_ops_relos()
8573 nrels = shdr->sh_size / shdr->sh_entsize; in bpf_object__collect_st_ops_relos()
8577 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_st_ops_relos()
8583 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_st_ops_relos()
8587 map = find_struct_ops_map_by_offset(obj, rel.r_offset); in bpf_object__collect_st_ops_relos()
8588 if (!map) { in bpf_object__collect_st_ops_relos()
8589 pr_warn("struct_ops reloc: cannot find map at rel.r_offset %zu\n", in bpf_object__collect_st_ops_relos()
8591 return -EINVAL; in bpf_object__collect_st_ops_relos()
8594 moff = rel.r_offset - map->sec_offset; in bpf_object__collect_st_ops_relos()
8596 st_ops = map->st_ops; in bpf_object__collect_st_ops_relos()
8597 …pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel.r_offset %zu map->sec_offset %z… in bpf_object__collect_st_ops_relos()
8598 map->name, in bpf_object__collect_st_ops_relos()
8602 map->sec_offset, sym.st_name, name); in bpf_object__collect_st_ops_relos()
8605 pr_warn("struct_ops reloc %s: rel.r_offset %zu shdr_idx %u unsupported non-static function\n", in bpf_object__collect_st_ops_relos()
8606 map->name, (size_t)rel.r_offset, shdr_idx); in bpf_object__collect_st_ops_relos()
8607 return -LIBBPF_ERRNO__RELOC; in bpf_object__collect_st_ops_relos()
8611 map->name, (unsigned long long)sym.st_value); in bpf_object__collect_st_ops_relos()
8612 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_st_ops_relos()
8616 member = find_member_by_offset(st_ops->type, moff * 8); in bpf_object__collect_st_ops_relos()
8619 map->name, moff); in bpf_object__collect_st_ops_relos()
8620 return -EINVAL; in bpf_object__collect_st_ops_relos()
8622 member_idx = member - btf_members(st_ops->type); in bpf_object__collect_st_ops_relos()
8623 name = btf__name_by_offset(btf, member->name_off); in bpf_object__collect_st_ops_relos()
8625 if (!resolve_func_ptr(btf, member->type, NULL)) { in bpf_object__collect_st_ops_relos()
8627 map->name, name); in bpf_object__collect_st_ops_relos()
8628 return -EINVAL; in bpf_object__collect_st_ops_relos()
8634 map->name, shdr_idx, name); in bpf_object__collect_st_ops_relos()
8635 return -EINVAL; in bpf_object__collect_st_ops_relos()
8638 if (prog->type == BPF_PROG_TYPE_UNSPEC) { in bpf_object__collect_st_ops_relos()
8641 sec_def = find_sec_def(prog->sec_name); in bpf_object__collect_st_ops_relos()
8643 sec_def->prog_type != BPF_PROG_TYPE_STRUCT_OPS) { in bpf_object__collect_st_ops_relos()
8645 prog->type = sec_def->prog_type; in bpf_object__collect_st_ops_relos()
8649 prog->type = BPF_PROG_TYPE_STRUCT_OPS; in bpf_object__collect_st_ops_relos()
8650 prog->attach_btf_id = st_ops->type_id; in bpf_object__collect_st_ops_relos()
8651 prog->expected_attach_type = member_idx; in bpf_object__collect_st_ops_relos()
8652 } else if (prog->type != BPF_PROG_TYPE_STRUCT_OPS || in bpf_object__collect_st_ops_relos()
8653 prog->attach_btf_id != st_ops->type_id || in bpf_object__collect_st_ops_relos()
8654 prog->expected_attach_type != member_idx) { in bpf_object__collect_st_ops_relos()
8657 st_ops->progs[member_idx] = prog; in bpf_object__collect_st_ops_relos()
8664 map->name, prog->name, prog->sec_name, prog->type, in bpf_object__collect_st_ops_relos()
8665 prog->attach_btf_id, prog->expected_attach_type, name); in bpf_object__collect_st_ops_relos()
8666 return -EINVAL; in bpf_object__collect_st_ops_relos()
8687 return -ENAMETOOLONG; in find_btf_by_prefix_kind()
8723 return -EINVAL; in libbpf_find_vmlinux_btf_id()
8736 int err = -EINVAL; in libbpf_find_prog_btf_id()
8742 return -EINVAL; in libbpf_find_prog_btf_id()
8744 info = &info_linear->info; in libbpf_find_prog_btf_id()
8745 if (!info->btf_id) { in libbpf_find_prog_btf_id()
8749 if (btf__get_from_id(info->btf_id, &btf)) { in libbpf_find_prog_btf_id()
8766 enum bpf_attach_type attach_type = prog->expected_attach_type; in libbpf_find_attach_btf_id()
8767 __u32 attach_prog_fd = prog->attach_prog_fd; in libbpf_find_attach_btf_id()
8768 const char *name = prog->sec_name; in libbpf_find_attach_btf_id()
8772 return -EINVAL; in libbpf_find_attach_btf_id()
8783 err = __find_vmlinux_btf_id(prog->obj->btf_vmlinux, in libbpf_find_attach_btf_id()
8789 return -ESRCH; in libbpf_find_attach_btf_id()
8799 return -EINVAL; in libbpf_attach_type_by_name()
8805 return -EINVAL; in libbpf_attach_type_by_name()
8816 return -EINVAL; in libbpf_attach_type_by_name()
8819 int bpf_map__fd(const struct bpf_map *map) in bpf_map__fd() argument
8821 return map ? map->fd : -EINVAL; in bpf_map__fd()
8824 const struct bpf_map_def *bpf_map__def(const struct bpf_map *map) in bpf_map__def() argument
8826 return map ? &map->def : ERR_PTR(-EINVAL); in bpf_map__def()
8829 const char *bpf_map__name(const struct bpf_map *map) in bpf_map__name() argument
8831 return map ? map->name : NULL; in bpf_map__name()
8834 enum bpf_map_type bpf_map__type(const struct bpf_map *map) in bpf_map__type() argument
8836 return map->def.type; in bpf_map__type()
8839 int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type) in bpf_map__set_type() argument
8841 if (map->fd >= 0) in bpf_map__set_type()
8842 return -EBUSY; in bpf_map__set_type()
8843 map->def.type = type; in bpf_map__set_type()
8847 __u32 bpf_map__map_flags(const struct bpf_map *map) in bpf_map__map_flags() argument
8849 return map->def.map_flags; in bpf_map__map_flags()
8852 int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags) in bpf_map__set_map_flags() argument
8854 if (map->fd >= 0) in bpf_map__set_map_flags()
8855 return -EBUSY; in bpf_map__set_map_flags()
8856 map->def.map_flags = flags; in bpf_map__set_map_flags()
8860 __u32 bpf_map__numa_node(const struct bpf_map *map) in bpf_map__numa_node() argument
8862 return map->numa_node; in bpf_map__numa_node()
8865 int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node) in bpf_map__set_numa_node() argument
8867 if (map->fd >= 0) in bpf_map__set_numa_node()
8868 return -EBUSY; in bpf_map__set_numa_node()
8869 map->numa_node = numa_node; in bpf_map__set_numa_node()
8873 __u32 bpf_map__key_size(const struct bpf_map *map) in bpf_map__key_size() argument
8875 return map->def.key_size; in bpf_map__key_size()
8878 int bpf_map__set_key_size(struct bpf_map *map, __u32 size) in bpf_map__set_key_size() argument
8880 if (map->fd >= 0) in bpf_map__set_key_size()
8881 return -EBUSY; in bpf_map__set_key_size()
8882 map->def.key_size = size; in bpf_map__set_key_size()
8886 __u32 bpf_map__value_size(const struct bpf_map *map) in bpf_map__value_size() argument
8888 return map->def.value_size; in bpf_map__value_size()
8891 int bpf_map__set_value_size(struct bpf_map *map, __u32 size) in bpf_map__set_value_size() argument
8893 if (map->fd >= 0) in bpf_map__set_value_size()
8894 return -EBUSY; in bpf_map__set_value_size()
8895 map->def.value_size = size; in bpf_map__set_value_size()
8899 __u32 bpf_map__btf_key_type_id(const struct bpf_map *map) in bpf_map__btf_key_type_id() argument
8901 return map ? map->btf_key_type_id : 0; in bpf_map__btf_key_type_id()
8904 __u32 bpf_map__btf_value_type_id(const struct bpf_map *map) in bpf_map__btf_value_type_id() argument
8906 return map ? map->btf_value_type_id : 0; in bpf_map__btf_value_type_id()
8909 int bpf_map__set_priv(struct bpf_map *map, void *priv, in bpf_map__set_priv() argument
8912 if (!map) in bpf_map__set_priv()
8913 return -EINVAL; in bpf_map__set_priv()
8915 if (map->priv) { in bpf_map__set_priv()
8916 if (map->clear_priv) in bpf_map__set_priv()
8917 map->clear_priv(map, map->priv); in bpf_map__set_priv()
8920 map->priv = priv; in bpf_map__set_priv()
8921 map->clear_priv = clear_priv; in bpf_map__set_priv()
8925 void *bpf_map__priv(const struct bpf_map *map) in bpf_map__priv() argument
8927 return map ? map->priv : ERR_PTR(-EINVAL); in bpf_map__priv()
8930 int bpf_map__set_initial_value(struct bpf_map *map, in bpf_map__set_initial_value() argument
8933 if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG || in bpf_map__set_initial_value()
8934 size != map->def.value_size || map->fd >= 0) in bpf_map__set_initial_value()
8935 return -EINVAL; in bpf_map__set_initial_value()
8937 memcpy(map->mmaped, data, size); in bpf_map__set_initial_value()
8941 bool bpf_map__is_offload_neutral(const struct bpf_map *map) in bpf_map__is_offload_neutral() argument
8943 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; in bpf_map__is_offload_neutral()
8946 bool bpf_map__is_internal(const struct bpf_map *map) in bpf_map__is_internal() argument
8948 return map->libbpf_type != LIBBPF_MAP_UNSPEC; in bpf_map__is_internal()
8951 __u32 bpf_map__ifindex(const struct bpf_map *map) in bpf_map__ifindex() argument
8953 return map->map_ifindex; in bpf_map__ifindex()
8956 int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex) in bpf_map__set_ifindex() argument
8958 if (map->fd >= 0) in bpf_map__set_ifindex()
8959 return -EBUSY; in bpf_map__set_ifindex()
8960 map->map_ifindex = ifindex; in bpf_map__set_ifindex()
8964 int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd) in bpf_map__set_inner_map_fd() argument
8966 if (!bpf_map_type__is_map_in_map(map->def.type)) { in bpf_map__set_inner_map_fd()
8967 pr_warn("error: unsupported map type\n"); in bpf_map__set_inner_map_fd()
8968 return -EINVAL; in bpf_map__set_inner_map_fd()
8970 if (map->inner_map_fd != -1) { in bpf_map__set_inner_map_fd()
8972 return -EINVAL; in bpf_map__set_inner_map_fd()
8974 map->inner_map_fd = fd; in bpf_map__set_inner_map_fd()
8984 if (!obj || !obj->maps) in __bpf_map__iter()
8987 s = obj->maps; in __bpf_map__iter()
8988 e = obj->maps + obj->nr_maps; in __bpf_map__iter()
8991 pr_warn("error in %s: map handler doesn't belong to object\n", in __bpf_map__iter()
8996 idx = (m - obj->maps) + i; in __bpf_map__iter()
8997 if (idx >= obj->nr_maps || idx < 0) in __bpf_map__iter()
8999 return &obj->maps[idx]; in __bpf_map__iter()
9006 return obj->maps; in bpf_map__next()
9015 if (!obj->nr_maps) in bpf_map__prev()
9017 return obj->maps + obj->nr_maps - 1; in bpf_map__prev()
9020 return __bpf_map__iter(next, obj, -1); in bpf_map__prev()
9029 if (pos->name && !strcmp(pos->name, name)) in bpf_object__find_map_by_name()
9044 return ERR_PTR(-ENOTSUP); in bpf_object__find_map_by_offset()
9071 struct bpf_map *map; in bpf_prog_load_xattr() local
9075 return -EINVAL; in bpf_prog_load_xattr()
9076 if (!attr->file) in bpf_prog_load_xattr()
9077 return -EINVAL; in bpf_prog_load_xattr()
9079 open_attr.file = attr->file; in bpf_prog_load_xattr()
9080 open_attr.prog_type = attr->prog_type; in bpf_prog_load_xattr()
9084 return -ENOENT; in bpf_prog_load_xattr()
9087 enum bpf_attach_type attach_type = attr->expected_attach_type; in bpf_prog_load_xattr()
9090 * attr->prog_type, if specified, as an override to whatever in bpf_prog_load_xattr()
9093 if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) { in bpf_prog_load_xattr()
9094 bpf_program__set_type(prog, attr->prog_type); in bpf_prog_load_xattr()
9104 return -EINVAL; in bpf_prog_load_xattr()
9107 prog->prog_ifindex = attr->ifindex; in bpf_prog_load_xattr()
9108 prog->log_level = attr->log_level; in bpf_prog_load_xattr()
9109 prog->prog_flags |= attr->prog_flags; in bpf_prog_load_xattr()
9114 bpf_object__for_each_map(map, obj) { in bpf_prog_load_xattr()
9115 if (!bpf_map__is_offload_neutral(map)) in bpf_prog_load_xattr()
9116 map->map_ifindex = attr->ifindex; in bpf_prog_load_xattr()
9122 return -ENOENT; in bpf_prog_load_xattr()
9140 int fd; /* hook FD, -1 if not applicable */
9162 link->disconnected = true; in bpf_link__disconnect()
9172 if (!link->disconnected && link->detach) in bpf_link__destroy()
9173 err = link->detach(link); in bpf_link__destroy()
9174 if (link->destroy) in bpf_link__destroy()
9175 link->destroy(link); in bpf_link__destroy()
9176 if (link->pin_path) in bpf_link__destroy()
9177 free(link->pin_path); in bpf_link__destroy()
9185 return link->fd; in bpf_link__fd()
9190 return link->pin_path; in bpf_link__pin_path()
9195 return close(link->fd); in bpf_link__detach_fd()
9198 struct bpf_link *bpf_link__open(const char *path) in bpf_link__open() argument
9203 fd = bpf_obj_get(path); in bpf_link__open()
9205 fd = -errno; in bpf_link__open()
9206 pr_warn("failed to open link at %s: %d\n", path, fd); in bpf_link__open()
9213 return ERR_PTR(-ENOMEM); in bpf_link__open()
9215 link->detach = &bpf_link__detach_fd; in bpf_link__open()
9216 link->fd = fd; in bpf_link__open()
9218 link->pin_path = strdup(path); in bpf_link__open()
9219 if (!link->pin_path) { in bpf_link__open()
9221 return ERR_PTR(-ENOMEM); in bpf_link__open()
9229 return bpf_link_detach(link->fd) ? -errno : 0; in bpf_link__detach()
9232 int bpf_link__pin(struct bpf_link *link, const char *path) in bpf_link__pin() argument
9236 if (link->pin_path) in bpf_link__pin()
9237 return -EBUSY; in bpf_link__pin()
9238 err = make_parent_dir(path); in bpf_link__pin()
9241 err = check_path(path); in bpf_link__pin()
9245 link->pin_path = strdup(path); in bpf_link__pin()
9246 if (!link->pin_path) in bpf_link__pin()
9247 return -ENOMEM; in bpf_link__pin()
9249 if (bpf_obj_pin(link->fd, link->pin_path)) { in bpf_link__pin()
9250 err = -errno; in bpf_link__pin()
9251 zfree(&link->pin_path); in bpf_link__pin()
9255 pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path); in bpf_link__pin()
9263 if (!link->pin_path) in bpf_link__unpin()
9264 return -EINVAL; in bpf_link__unpin()
9266 err = unlink(link->pin_path); in bpf_link__unpin()
9268 return -errno; in bpf_link__unpin()
9270 pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path); in bpf_link__unpin()
9271 zfree(&link->pin_path); in bpf_link__unpin()
9279 err = ioctl(link->fd, PERF_EVENT_IOC_DISABLE, 0); in bpf_link__detach_perf_event()
9281 err = -errno; in bpf_link__detach_perf_event()
9283 close(link->fd); in bpf_link__detach_perf_event()
9296 prog->name, pfd); in bpf_program__attach_perf_event()
9297 return ERR_PTR(-EINVAL); in bpf_program__attach_perf_event()
9302 prog->name); in bpf_program__attach_perf_event()
9303 return ERR_PTR(-EINVAL); in bpf_program__attach_perf_event()
9308 return ERR_PTR(-ENOMEM); in bpf_program__attach_perf_event()
9309 link->detach = &bpf_link__detach_perf_event; in bpf_program__attach_perf_event()
9310 link->fd = pfd; in bpf_program__attach_perf_event()
9313 err = -errno; in bpf_program__attach_perf_event()
9316 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); in bpf_program__attach_perf_event()
9317 if (err == -EPROTO) in bpf_program__attach_perf_event()
9319 prog->name, pfd); in bpf_program__attach_perf_event()
9323 err = -errno; in bpf_program__attach_perf_event()
9326 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); in bpf_program__attach_perf_event()
9333 * this function is expected to parse integer in the range of [0, 2^31-1] from
9345 err = -errno; in parse_uint_from_file()
9352 err = err == EOF ? -EIO : -errno; in parse_uint_from_file()
9424 pid < 0 ? -1 : pid /* pid */, in perf_event_open_probe()
9425 pid == -1 ? 0 : -1 /* cpu */, in perf_event_open_probe()
9426 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); in perf_event_open_probe()
9428 err = -errno; in perf_event_open_probe()
9446 0 /* offset */, -1 /* pid */); in bpf_program__attach_kprobe()
9449 prog->name, retprobe ? "kretprobe" : "kprobe", func_name, in bpf_program__attach_kprobe()
9458 prog->name, retprobe ? "kretprobe" : "kprobe", func_name, in bpf_program__attach_kprobe()
9471 func_name = prog->sec_name + sec->len; in attach_kprobe()
9472 retprobe = strcmp(sec->sec, "kretprobe/") == 0; in attach_kprobe()
9490 prog->name, retprobe ? "uretprobe" : "uprobe", in bpf_program__attach_uprobe()
9500 prog->name, retprobe ? "uretprobe" : "uprobe", in bpf_program__attach_uprobe()
9518 return -errno; in determine_tracepoint_id()
9520 pr_debug("tracepoint %s/%s path is too long\n", in determine_tracepoint_id()
9522 return -E2BIG; in determine_tracepoint_id()
9546 pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */, in perf_event_open_tracepoint()
9547 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); in perf_event_open_tracepoint()
9549 err = -errno; in perf_event_open_tracepoint()
9569 prog->name, tp_category, tp_name, in bpf_program__attach_tracepoint()
9578 prog->name, tp_category, tp_name, in bpf_program__attach_tracepoint()
9591 sec_name = strdup(prog->sec_name); in attach_tp()
9593 return ERR_PTR(-ENOMEM); in attach_tp()
9596 tp_cat = sec_name + sec->len; in attach_tp()
9599 link = ERR_PTR(-EINVAL); in attach_tp()
9620 pr_warn("prog '%s': can't attach before loaded\n", prog->name); in bpf_program__attach_raw_tracepoint()
9621 return ERR_PTR(-EINVAL); in bpf_program__attach_raw_tracepoint()
9626 return ERR_PTR(-ENOMEM); in bpf_program__attach_raw_tracepoint()
9627 link->detach = &bpf_link__detach_fd; in bpf_program__attach_raw_tracepoint()
9631 pfd = -errno; in bpf_program__attach_raw_tracepoint()
9634 prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); in bpf_program__attach_raw_tracepoint()
9637 link->fd = pfd; in bpf_program__attach_raw_tracepoint()
9644 const char *tp_name = prog->sec_name + sec->len; in attach_raw_tp()
9658 pr_warn("prog '%s': can't attach before loaded\n", prog->name); in bpf_program__attach_btf_id()
9659 return ERR_PTR(-EINVAL); in bpf_program__attach_btf_id()
9664 return ERR_PTR(-ENOMEM); in bpf_program__attach_btf_id()
9665 link->detach = &bpf_link__detach_fd; in bpf_program__attach_btf_id()
9669 pfd = -errno; in bpf_program__attach_btf_id()
9672 prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); in bpf_program__attach_btf_id()
9675 link->fd = pfd; in bpf_program__attach_btf_id()
9720 pr_warn("prog '%s': can't attach before loaded\n", prog->name); in bpf_program__attach_fd()
9721 return ERR_PTR(-EINVAL); in bpf_program__attach_fd()
9726 return ERR_PTR(-ENOMEM); in bpf_program__attach_fd()
9727 link->detach = &bpf_link__detach_fd; in bpf_program__attach_fd()
9732 link_fd = -errno; in bpf_program__attach_fd()
9735 prog->name, target_name, in bpf_program__attach_fd()
9739 link->fd = link_fd; in bpf_program__attach_fd()
9769 prog->name); in bpf_program__attach_freplace()
9770 return ERR_PTR(-EINVAL); in bpf_program__attach_freplace()
9773 if (prog->type != BPF_PROG_TYPE_EXT) { in bpf_program__attach_freplace()
9775 prog->name); in bpf_program__attach_freplace()
9776 return ERR_PTR(-EINVAL); in bpf_program__attach_freplace()
9804 return ERR_PTR(-EINVAL); in bpf_program__attach_iter()
9811 pr_warn("prog '%s': can't attach before loaded\n", prog->name); in bpf_program__attach_iter()
9812 return ERR_PTR(-EINVAL); in bpf_program__attach_iter()
9817 return ERR_PTR(-ENOMEM); in bpf_program__attach_iter()
9818 link->detach = &bpf_link__detach_fd; in bpf_program__attach_iter()
9823 link_fd = -errno; in bpf_program__attach_iter()
9826 prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg))); in bpf_program__attach_iter()
9829 link->fd = link_fd; in bpf_program__attach_iter()
9837 sec_def = find_sec_def(prog->sec_name); in bpf_program__attach()
9838 if (!sec_def || !sec_def->attach_fn) in bpf_program__attach()
9839 return ERR_PTR(-ESRCH); in bpf_program__attach()
9841 return sec_def->attach_fn(sec_def, prog); in bpf_program__attach()
9848 if (bpf_map_delete_elem(link->fd, &zero)) in bpf_link__detach_struct_ops()
9849 return -errno; in bpf_link__detach_struct_ops()
9854 struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map) in bpf_map__attach_struct_ops() argument
9861 if (!bpf_map__is_struct_ops(map) || map->fd == -1) in bpf_map__attach_struct_ops()
9862 return ERR_PTR(-EINVAL); in bpf_map__attach_struct_ops()
9866 return ERR_PTR(-EINVAL); in bpf_map__attach_struct_ops()
9868 st_ops = map->st_ops; in bpf_map__attach_struct_ops()
9869 for (i = 0; i < btf_vlen(st_ops->type); i++) { in bpf_map__attach_struct_ops()
9870 struct bpf_program *prog = st_ops->progs[i]; in bpf_map__attach_struct_ops()
9878 kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i]; in bpf_map__attach_struct_ops()
9882 err = bpf_map_update_elem(map->fd, &zero, st_ops->kern_vdata, 0); in bpf_map__attach_struct_ops()
9884 err = -errno; in bpf_map__attach_struct_ops()
9889 link->detach = bpf_link__detach_struct_ops; in bpf_map__attach_struct_ops()
9890 link->fd = map->fd; in bpf_map__attach_struct_ops()
9902 __u64 data_tail = header->data_tail; in bpf_perf_event_read_simple()
9909 ehdr = base + (data_tail & (mmap_size - 1)); in bpf_perf_event_read_simple()
9910 ehdr_size = ehdr->size; in bpf_perf_event_read_simple()
9914 size_t len_first = base + mmap_size - copy_start; in bpf_perf_event_read_simple()
9915 size_t len_secnd = ehdr_size - len_first; in bpf_perf_event_read_simple()
9949 /* sample_cb and lost_cb are higher-level common-case callbacks */
9980 int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */
9988 if (cpu_buf->base && in perf_buffer__free_cpu_buf()
9989 munmap(cpu_buf->base, pb->mmap_size + pb->page_size)) in perf_buffer__free_cpu_buf()
9990 pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu); in perf_buffer__free_cpu_buf()
9991 if (cpu_buf->fd >= 0) { in perf_buffer__free_cpu_buf()
9992 ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0); in perf_buffer__free_cpu_buf()
9993 close(cpu_buf->fd); in perf_buffer__free_cpu_buf()
9995 free(cpu_buf->buf); in perf_buffer__free_cpu_buf()
10005 if (pb->cpu_bufs) { in perf_buffer__free()
10006 for (i = 0; i < pb->cpu_cnt; i++) { in perf_buffer__free()
10007 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i]; in perf_buffer__free()
10012 bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key); in perf_buffer__free()
10015 free(pb->cpu_bufs); in perf_buffer__free()
10017 if (pb->epoll_fd >= 0) in perf_buffer__free()
10018 close(pb->epoll_fd); in perf_buffer__free()
10019 free(pb->events); in perf_buffer__free()
10033 return ERR_PTR(-ENOMEM); in perf_buffer__open_cpu_buf()
10035 cpu_buf->pb = pb; in perf_buffer__open_cpu_buf()
10036 cpu_buf->cpu = cpu; in perf_buffer__open_cpu_buf()
10037 cpu_buf->map_key = map_key; in perf_buffer__open_cpu_buf()
10039 cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu, in perf_buffer__open_cpu_buf()
10040 -1, PERF_FLAG_FD_CLOEXEC); in perf_buffer__open_cpu_buf()
10041 if (cpu_buf->fd < 0) { in perf_buffer__open_cpu_buf()
10042 err = -errno; in perf_buffer__open_cpu_buf()
10048 cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size, in perf_buffer__open_cpu_buf()
10050 cpu_buf->fd, 0); in perf_buffer__open_cpu_buf()
10051 if (cpu_buf->base == MAP_FAILED) { in perf_buffer__open_cpu_buf()
10052 cpu_buf->base = NULL; in perf_buffer__open_cpu_buf()
10053 err = -errno; in perf_buffer__open_cpu_buf()
10059 if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) { in perf_buffer__open_cpu_buf()
10060 err = -errno; in perf_buffer__open_cpu_buf()
10089 p.sample_cb = opts ? opts->sample_cb : NULL; in perf_buffer__new()
10090 p.lost_cb = opts ? opts->lost_cb : NULL; in perf_buffer__new()
10091 p.ctx = opts ? opts->ctx : NULL; in perf_buffer__new()
10102 p.attr = opts->attr; in perf_buffer__new_raw()
10103 p.event_cb = opts->event_cb; in perf_buffer__new_raw()
10104 p.ctx = opts->ctx; in perf_buffer__new_raw()
10105 p.cpu_cnt = opts->cpu_cnt; in perf_buffer__new_raw()
10106 p.cpus = opts->cpus; in perf_buffer__new_raw()
10107 p.map_keys = opts->map_keys; in perf_buffer__new_raw()
10116 struct bpf_map_info map; in __perf_buffer__new() local
10123 if (page_cnt & (page_cnt - 1)) { in __perf_buffer__new()
10126 return ERR_PTR(-EINVAL); in __perf_buffer__new()
10129 /* best-effort sanity checks */ in __perf_buffer__new()
10130 memset(&map, 0, sizeof(map)); in __perf_buffer__new()
10131 map_info_len = sizeof(map); in __perf_buffer__new()
10132 err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len); in __perf_buffer__new()
10134 err = -errno; in __perf_buffer__new()
10136 * -EBADFD, -EFAULT, or -E2BIG on real error in __perf_buffer__new()
10138 if (err != -EINVAL) { in __perf_buffer__new()
10139 pr_warn("failed to get map info for map FD %d: %s\n", in __perf_buffer__new()
10143 pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n", in __perf_buffer__new()
10146 if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { in __perf_buffer__new()
10147 pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n", in __perf_buffer__new()
10148 map.name); in __perf_buffer__new()
10149 return ERR_PTR(-EINVAL); in __perf_buffer__new()
10155 return ERR_PTR(-ENOMEM); in __perf_buffer__new()
10157 pb->event_cb = p->event_cb; in __perf_buffer__new()
10158 pb->sample_cb = p->sample_cb; in __perf_buffer__new()
10159 pb->lost_cb = p->lost_cb; in __perf_buffer__new()
10160 pb->ctx = p->ctx; in __perf_buffer__new()
10162 pb->page_size = getpagesize(); in __perf_buffer__new()
10163 pb->mmap_size = pb->page_size * page_cnt; in __perf_buffer__new()
10164 pb->map_fd = map_fd; in __perf_buffer__new()
10166 pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC); in __perf_buffer__new()
10167 if (pb->epoll_fd < 0) { in __perf_buffer__new()
10168 err = -errno; in __perf_buffer__new()
10174 if (p->cpu_cnt > 0) { in __perf_buffer__new()
10175 pb->cpu_cnt = p->cpu_cnt; in __perf_buffer__new()
10177 pb->cpu_cnt = libbpf_num_possible_cpus(); in __perf_buffer__new()
10178 if (pb->cpu_cnt < 0) { in __perf_buffer__new()
10179 err = pb->cpu_cnt; in __perf_buffer__new()
10182 if (map.max_entries && map.max_entries < pb->cpu_cnt) in __perf_buffer__new()
10183 pb->cpu_cnt = map.max_entries; in __perf_buffer__new()
10186 pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events)); in __perf_buffer__new()
10187 if (!pb->events) { in __perf_buffer__new()
10188 err = -ENOMEM; in __perf_buffer__new()
10192 pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs)); in __perf_buffer__new()
10193 if (!pb->cpu_bufs) { in __perf_buffer__new()
10194 err = -ENOMEM; in __perf_buffer__new()
10205 for (i = 0, j = 0; i < pb->cpu_cnt; i++) { in __perf_buffer__new()
10209 cpu = p->cpu_cnt > 0 ? p->cpus[i] : i; in __perf_buffer__new()
10210 map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i; in __perf_buffer__new()
10215 if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu])) in __perf_buffer__new()
10218 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key); in __perf_buffer__new()
10224 pb->cpu_bufs[j] = cpu_buf; in __perf_buffer__new()
10226 err = bpf_map_update_elem(pb->map_fd, &map_key, in __perf_buffer__new()
10227 &cpu_buf->fd, 0); in __perf_buffer__new()
10229 err = -errno; in __perf_buffer__new()
10230 pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n", in __perf_buffer__new()
10231 cpu, map_key, cpu_buf->fd, in __perf_buffer__new()
10236 pb->events[j].events = EPOLLIN; in __perf_buffer__new()
10237 pb->events[j].data.ptr = cpu_buf; in __perf_buffer__new()
10238 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd, in __perf_buffer__new()
10239 &pb->events[j]) < 0) { in __perf_buffer__new()
10240 err = -errno; in __perf_buffer__new()
10242 cpu, cpu_buf->fd, in __perf_buffer__new()
10248 pb->cpu_cnt = j; in __perf_buffer__new()
10277 struct perf_buffer *pb = cpu_buf->pb; in perf_buffer__process_record()
10281 if (pb->event_cb) in perf_buffer__process_record()
10282 return pb->event_cb(pb->ctx, cpu_buf->cpu, e); in perf_buffer__process_record()
10284 switch (e->type) { in perf_buffer__process_record()
10288 if (pb->sample_cb) in perf_buffer__process_record()
10289 pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size); in perf_buffer__process_record()
10295 if (pb->lost_cb) in perf_buffer__process_record()
10296 pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost); in perf_buffer__process_record()
10300 pr_warn("unknown perf sample type %d\n", e->type); in perf_buffer__process_record()
10311 ret = bpf_perf_event_read_simple(cpu_buf->base, pb->mmap_size, in perf_buffer__process_records()
10312 pb->page_size, &cpu_buf->buf, in perf_buffer__process_records()
10313 &cpu_buf->buf_size, in perf_buffer__process_records()
10322 return pb->epoll_fd; in perf_buffer__epoll_fd()
10329 cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms); in perf_buffer__poll()
10331 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr; in perf_buffer__poll()
10339 return cnt < 0 ? -errno : cnt; in perf_buffer__poll()
10342 /* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer
10347 return pb->cpu_cnt; in perf_buffer__buffer_cnt()
10352 * PERF_EVENT_ARRAY BPF map. This FD can be polled for new data using
10359 if (buf_idx >= pb->cpu_cnt) in perf_buffer__buffer_fd()
10360 return -EINVAL; in perf_buffer__buffer_fd()
10362 cpu_buf = pb->cpu_bufs[buf_idx]; in perf_buffer__buffer_fd()
10364 return -ENOENT; in perf_buffer__buffer_fd()
10366 return cpu_buf->fd; in perf_buffer__buffer_fd()
10371 * PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to
10374 * - 0 on success;
10375 * - <0 on failure.
10381 if (buf_idx >= pb->cpu_cnt) in perf_buffer__consume_buffer()
10382 return -EINVAL; in perf_buffer__consume_buffer()
10384 cpu_buf = pb->cpu_bufs[buf_idx]; in perf_buffer__consume_buffer()
10386 return -ENOENT; in perf_buffer__consume_buffer()
10395 for (i = 0; i < pb->cpu_cnt; i++) { in perf_buffer__consume()
10396 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i]; in perf_buffer__consume()
10414 * < 0: fix size of -size_offset
10422 -1,
10427 -1,
10432 -(int)sizeof(__u32),
10437 -(int)sizeof(__u64),
10442 -(int)sizeof(__u32),
10462 -(int)sizeof(__u8) * BPF_TAG_SIZE,
10474 return -(int)offset; in bpf_prog_info_read_offset_u32()
10484 return -(int)offset; in bpf_prog_info_read_offset_u64()
10516 return ERR_PTR(-EINVAL); in bpf_program__get_prog_info_linear()
10522 return ERR_PTR(-EFAULT); in bpf_program__get_prog_info_linear()
10534 if (info_len < desc->array_offset + sizeof(__u32) || in bpf_program__get_prog_info_linear()
10535 info_len < desc->count_offset + sizeof(__u32) || in bpf_program__get_prog_info_linear()
10536 (desc->size_offset > 0 && info_len < desc->size_offset)) in bpf_program__get_prog_info_linear()
10544 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset); in bpf_program__get_prog_info_linear()
10545 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset); in bpf_program__get_prog_info_linear()
10554 return ERR_PTR(-ENOMEM); in bpf_program__get_prog_info_linear()
10556 /* step 4: fill data to info_linear->info */ in bpf_program__get_prog_info_linear()
10557 info_linear->arrays = arrays; in bpf_program__get_prog_info_linear()
10558 memset(&info_linear->info, 0, sizeof(info)); in bpf_program__get_prog_info_linear()
10559 ptr = info_linear->data; in bpf_program__get_prog_info_linear()
10569 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset); in bpf_program__get_prog_info_linear()
10570 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset); in bpf_program__get_prog_info_linear()
10571 bpf_prog_info_set_offset_u32(&info_linear->info, in bpf_program__get_prog_info_linear()
10572 desc->count_offset, count); in bpf_program__get_prog_info_linear()
10573 bpf_prog_info_set_offset_u32(&info_linear->info, in bpf_program__get_prog_info_linear()
10574 desc->size_offset, size); in bpf_program__get_prog_info_linear()
10575 bpf_prog_info_set_offset_u64(&info_linear->info, in bpf_program__get_prog_info_linear()
10576 desc->array_offset, in bpf_program__get_prog_info_linear()
10582 err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len); in bpf_program__get_prog_info_linear()
10586 return ERR_PTR(-EFAULT); in bpf_program__get_prog_info_linear()
10598 v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset); in bpf_program__get_prog_info_linear()
10599 v2 = bpf_prog_info_read_offset_u32(&info_linear->info, in bpf_program__get_prog_info_linear()
10600 desc->count_offset); in bpf_program__get_prog_info_linear()
10604 v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset); in bpf_program__get_prog_info_linear()
10605 v2 = bpf_prog_info_read_offset_u32(&info_linear->info, in bpf_program__get_prog_info_linear()
10606 desc->size_offset); in bpf_program__get_prog_info_linear()
10612 info_linear->info_len = sizeof(struct bpf_prog_info); in bpf_program__get_prog_info_linear()
10613 info_linear->data_len = data_len; in bpf_program__get_prog_info_linear()
10626 if ((info_linear->arrays & (1UL << i)) == 0) in bpf_program__bpil_addr_to_offs()
10630 addr = bpf_prog_info_read_offset_u64(&info_linear->info, in bpf_program__bpil_addr_to_offs()
10631 desc->array_offset); in bpf_program__bpil_addr_to_offs()
10632 offs = addr - ptr_to_u64(info_linear->data); in bpf_program__bpil_addr_to_offs()
10633 bpf_prog_info_set_offset_u64(&info_linear->info, in bpf_program__bpil_addr_to_offs()
10634 desc->array_offset, offs); in bpf_program__bpil_addr_to_offs()
10646 if ((info_linear->arrays & (1UL << i)) == 0) in bpf_program__bpil_offs_to_addr()
10650 offs = bpf_prog_info_read_offset_u64(&info_linear->info, in bpf_program__bpil_offs_to_addr()
10651 desc->array_offset); in bpf_program__bpil_offs_to_addr()
10652 addr = offs + ptr_to_u64(info_linear->data); in bpf_program__bpil_offs_to_addr()
10653 bpf_prog_info_set_offset_u64(&info_linear->info, in bpf_program__bpil_offs_to_addr()
10654 desc->array_offset, addr); in bpf_program__bpil_offs_to_addr()
10665 return -EINVAL; in bpf_program__set_attach_target()
10672 prog->expected_attach_type); in bpf_program__set_attach_target()
10677 prog->attach_btf_id = btf_id; in bpf_program__set_attach_target()
10678 prog->attach_prog_fd = attach_prog_fd; in bpf_program__set_attach_target()
10684 int err = 0, n, len, start, end = -1; in parse_cpu_mask_str()
10690 /* Each sub string separated by ',' has format \d+-\d+ or \d+ */ in parse_cpu_mask_str()
10696 n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len); in parse_cpu_mask_str()
10699 err = -EINVAL; in parse_cpu_mask_str()
10707 err = -EINVAL; in parse_cpu_mask_str()
10712 err = -ENOMEM; in parse_cpu_mask_str()
10716 memset(tmp + *mask_sz, 0, start - *mask_sz); in parse_cpu_mask_str()
10717 memset(tmp + start, 1, end - start + 1); in parse_cpu_mask_str()
10723 return -EINVAL; in parse_cpu_mask_str()
10739 err = -errno; in parse_cpu_mask_file()
10746 err = len ? -errno : -EINVAL; in parse_cpu_mask_file()
10752 return -E2BIG; in parse_cpu_mask_file()
10789 .object_name = s->name, in bpf_object__open_skeleton()
10794 /* Attempt to preserve opts->object_name, unless overriden by user in bpf_object__open_skeleton()
10797 * prefix as their own map name prefix. When skeleton is generated, in bpf_object__open_skeleton()
10802 if (!opts->object_name) in bpf_object__open_skeleton()
10803 skel_opts.object_name = s->name; in bpf_object__open_skeleton()
10806 obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts); in bpf_object__open_skeleton()
10809 s->name, PTR_ERR(obj)); in bpf_object__open_skeleton()
10813 *s->obj = obj; in bpf_object__open_skeleton()
10815 for (i = 0; i < s->map_cnt; i++) { in bpf_object__open_skeleton()
10816 struct bpf_map **map = s->maps[i].map; in bpf_object__open_skeleton() local
10817 const char *name = s->maps[i].name; in bpf_object__open_skeleton()
10818 void **mmaped = s->maps[i].mmaped; in bpf_object__open_skeleton()
10820 *map = bpf_object__find_map_by_name(obj, name); in bpf_object__open_skeleton()
10821 if (!*map) { in bpf_object__open_skeleton()
10822 pr_warn("failed to find skeleton map '%s'\n", name); in bpf_object__open_skeleton()
10823 return -ESRCH; in bpf_object__open_skeleton()
10826 /* externs shouldn't be pre-setup from user code */ in bpf_object__open_skeleton()
10827 if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG) in bpf_object__open_skeleton()
10828 *mmaped = (*map)->mmaped; in bpf_object__open_skeleton()
10831 for (i = 0; i < s->prog_cnt; i++) { in bpf_object__open_skeleton()
10832 struct bpf_program **prog = s->progs[i].prog; in bpf_object__open_skeleton()
10833 const char *name = s->progs[i].name; in bpf_object__open_skeleton()
10838 return -ESRCH; in bpf_object__open_skeleton()
10849 err = bpf_object__load(*s->obj); in bpf_object__load_skeleton()
10851 pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err); in bpf_object__load_skeleton()
10855 for (i = 0; i < s->map_cnt; i++) { in bpf_object__load_skeleton()
10856 struct bpf_map *map = *s->maps[i].map; in bpf_object__load_skeleton() local
10857 size_t mmap_sz = bpf_map_mmap_sz(map); in bpf_object__load_skeleton()
10858 int prot, map_fd = bpf_map__fd(map); in bpf_object__load_skeleton()
10859 void **mmaped = s->maps[i].mmaped; in bpf_object__load_skeleton()
10864 if (!(map->def.map_flags & BPF_F_MMAPABLE)) { in bpf_object__load_skeleton()
10869 if (map->def.map_flags & BPF_F_RDONLY_PROG) in bpf_object__load_skeleton()
10874 /* Remap anonymous mmap()-ed "map initialization image" as in bpf_object__load_skeleton()
10875 * a BPF map-backed mmap()-ed memory, but preserving the same in bpf_object__load_skeleton()
10884 *mmaped = mmap(map->mmaped, mmap_sz, prot, in bpf_object__load_skeleton()
10887 err = -errno; in bpf_object__load_skeleton()
10889 pr_warn("failed to re-mmap() map '%s': %d\n", in bpf_object__load_skeleton()
10890 bpf_map__name(map), err); in bpf_object__load_skeleton()
10902 for (i = 0; i < s->prog_cnt; i++) { in bpf_object__attach_skeleton()
10903 struct bpf_program *prog = *s->progs[i].prog; in bpf_object__attach_skeleton()
10904 struct bpf_link **link = s->progs[i].link; in bpf_object__attach_skeleton()
10907 if (!prog->load) in bpf_object__attach_skeleton()
10910 sec_def = find_sec_def(prog->sec_name); in bpf_object__attach_skeleton()
10911 if (!sec_def || !sec_def->attach_fn) in bpf_object__attach_skeleton()
10914 *link = sec_def->attach_fn(sec_def, prog); in bpf_object__attach_skeleton()
10916 pr_warn("failed to auto-attach program '%s': %ld\n", in bpf_object__attach_skeleton()
10929 for (i = 0; i < s->prog_cnt; i++) { in bpf_object__detach_skeleton()
10930 struct bpf_link **link = s->progs[i].link; in bpf_object__detach_skeleton()
10942 if (s->progs) in bpf_object__destroy_skeleton()
10944 if (s->obj) in bpf_object__destroy_skeleton()
10945 bpf_object__close(*s->obj); in bpf_object__destroy_skeleton()
10946 free(s->maps); in bpf_object__destroy_skeleton()
10947 free(s->progs); in bpf_object__destroy_skeleton()