Lines Matching full:storage
79 struct bpf_cgroup_storage *storage; in cgroup_storage_lookup() local
81 storage = container_of(node, struct bpf_cgroup_storage, node); in cgroup_storage_lookup()
83 switch (bpf_cgroup_storage_key_cmp(map, key, &storage->key)) { in cgroup_storage_lookup()
93 return storage; in cgroup_storage_lookup()
104 struct bpf_cgroup_storage *storage) in cgroup_storage_insert() argument
115 switch (bpf_cgroup_storage_key_cmp(map, &storage->key, &this->key)) { in cgroup_storage_insert()
127 rb_link_node(&storage->node, parent, new); in cgroup_storage_insert()
128 rb_insert_color(&storage->node, root); in cgroup_storage_insert()
136 struct bpf_cgroup_storage *storage; in cgroup_storage_lookup_elem() local
138 storage = cgroup_storage_lookup(map, key, false); in cgroup_storage_lookup_elem()
139 if (!storage) in cgroup_storage_lookup_elem()
142 return &READ_ONCE(storage->buf)->data[0]; in cgroup_storage_lookup_elem()
148 struct bpf_cgroup_storage *storage; in cgroup_storage_update_elem() local
158 storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map, in cgroup_storage_update_elem()
160 if (!storage) in cgroup_storage_update_elem()
164 copy_map_value_locked(map, storage->buf->data, value, false); in cgroup_storage_update_elem()
178 new = xchg(&storage->buf, new); in cgroup_storage_update_elem()
188 struct bpf_cgroup_storage *storage; in bpf_percpu_cgroup_storage_copy() local
193 storage = cgroup_storage_lookup(map, key, false); in bpf_percpu_cgroup_storage_copy()
194 if (!storage) { in bpf_percpu_cgroup_storage_copy()
206 per_cpu_ptr(storage->percpu_buf, cpu), size); in bpf_percpu_cgroup_storage_copy()
217 struct bpf_cgroup_storage *storage; in bpf_percpu_cgroup_storage_update() local
225 storage = cgroup_storage_lookup(map, key, false); in bpf_percpu_cgroup_storage_update()
226 if (!storage) { in bpf_percpu_cgroup_storage_update()
239 bpf_long_memcpy(per_cpu_ptr(storage->percpu_buf, cpu), in bpf_percpu_cgroup_storage_update()
251 struct bpf_cgroup_storage *storage; in cgroup_storage_get_next_key() local
259 storage = cgroup_storage_lookup(map, key, true); in cgroup_storage_get_next_key()
260 if (!storage) in cgroup_storage_get_next_key()
263 storage = list_next_entry(storage, list_map); in cgroup_storage_get_next_key()
264 if (!storage) in cgroup_storage_get_next_key()
267 storage = list_first_entry(&map->list, in cgroup_storage_get_next_key()
275 *next = storage->key; in cgroup_storage_get_next_key()
278 *next = storage->key.cgroup_inode_id; in cgroup_storage_get_next_key()
339 struct bpf_cgroup_storage *storage, *stmp; in cgroup_storage_map_free() local
343 list_for_each_entry_safe(storage, stmp, storages, list_map) { in cgroup_storage_map_free()
344 bpf_cgroup_storage_unlink(storage); in cgroup_storage_map_free()
345 bpf_cgroup_storage_free(storage); in cgroup_storage_map_free()
423 struct bpf_cgroup_storage *storage; in cgroup_storage_seq_show_elem() local
427 storage = cgroup_storage_lookup(map_to_storage(map), key, false); in cgroup_storage_seq_show_elem()
428 if (!storage) { in cgroup_storage_seq_show_elem()
438 &READ_ONCE(storage->buf)->data[0], m); in cgroup_storage_seq_show_elem()
445 per_cpu_ptr(storage->percpu_buf, cpu), in cgroup_storage_seq_show_elem()
500 struct bpf_cgroup_storage *storage; in bpf_cgroup_storage_alloc() local
515 storage = kmalloc_node(sizeof(struct bpf_cgroup_storage), in bpf_cgroup_storage_alloc()
517 if (!storage) in bpf_cgroup_storage_alloc()
523 storage->buf = kmalloc_node(size, flags, map->numa_node); in bpf_cgroup_storage_alloc()
524 if (!storage->buf) in bpf_cgroup_storage_alloc()
526 check_and_init_map_lock(map, storage->buf->data); in bpf_cgroup_storage_alloc()
528 storage->percpu_buf = __alloc_percpu_gfp(size, 8, flags); in bpf_cgroup_storage_alloc()
529 if (!storage->percpu_buf) in bpf_cgroup_storage_alloc()
533 storage->map = (struct bpf_cgroup_storage_map *)map; in bpf_cgroup_storage_alloc()
535 return storage; in bpf_cgroup_storage_alloc()
539 kfree(storage); in bpf_cgroup_storage_alloc()
545 struct bpf_cgroup_storage *storage = in free_shared_cgroup_storage_rcu() local
548 kfree(storage->buf); in free_shared_cgroup_storage_rcu()
549 kfree(storage); in free_shared_cgroup_storage_rcu()
554 struct bpf_cgroup_storage *storage = in free_percpu_cgroup_storage_rcu() local
557 free_percpu(storage->percpu_buf); in free_percpu_cgroup_storage_rcu()
558 kfree(storage); in free_percpu_cgroup_storage_rcu()
561 void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage) in bpf_cgroup_storage_free() argument
567 if (!storage) in bpf_cgroup_storage_free()
570 map = &storage->map->map; in bpf_cgroup_storage_free()
577 call_rcu(&storage->rcu, free_shared_cgroup_storage_rcu); in bpf_cgroup_storage_free()
579 call_rcu(&storage->rcu, free_percpu_cgroup_storage_rcu); in bpf_cgroup_storage_free()
582 void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, in bpf_cgroup_storage_link() argument
588 if (!storage) in bpf_cgroup_storage_link()
591 storage->key.attach_type = type; in bpf_cgroup_storage_link()
592 storage->key.cgroup_inode_id = cgroup_id(cgroup); in bpf_cgroup_storage_link()
594 map = storage->map; in bpf_cgroup_storage_link()
597 WARN_ON(cgroup_storage_insert(map, storage)); in bpf_cgroup_storage_link()
598 list_add(&storage->list_map, &map->list); in bpf_cgroup_storage_link()
599 list_add(&storage->list_cg, &cgroup->bpf.storages); in bpf_cgroup_storage_link()
603 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage) in bpf_cgroup_storage_unlink() argument
608 if (!storage) in bpf_cgroup_storage_unlink()
611 map = storage->map; in bpf_cgroup_storage_unlink()
615 rb_erase(&storage->node, root); in bpf_cgroup_storage_unlink()
617 list_del(&storage->list_map); in bpf_cgroup_storage_unlink()
618 list_del(&storage->list_cg); in bpf_cgroup_storage_unlink()