bpf: Add map and need_defer parameters to .map_fd_put_ptr()

[ Upstream commit 20c20bd11a0702ce4dc9300c3da58acf551d9725 ]

map is the pointer of outer map, and need_defer needs some explanation.
need_defer tells the implementation to defer the reference release of
the passed element and ensure that the element is still alive before
the bpf program, which may manipulate it, exits.

The following three cases will invoke map_fd_put_ptr() and different
need_defer values will be passed to these callers:

1) release the reference of the old element in the map during map update
   or map deletion. The release must be deferred, otherwise the bpf
   program may incur use-after-free problem, so need_defer needs to be
   true.
2) release the reference of the to-be-added element in the error path of
   map update. The to-be-added element is not visible to any bpf
   program, so it is OK to pass false for need_defer parameter.
3) release the references of all elements in the map during map release.
   Any bpf program which has access to the map must have been exited and
   released, so need_defer=false will be OK.

These two parameters will be used by the following patches to fix the
potential use-after-free problem for map-in-map.

Signed-off-by: Hou Tao <houtao1@huawei.com>
Link: https://lore.kernel.org/r/20231204140425.1480317-3-houtao@huaweicloud.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
(cherry picked from commit 5aa1e7d3f6d0db96c7139677d9e898bbbd6a7dcf)
Signed-off-by: Vegard Nossum <vegard.nossum@oracle.com>
Change-Id: Ifb87b3a6a590d0deab4aaa4cf5510753a42ef9ce
Hou Tao 1 year ago committed by Simon1511
parent 523e31de9a
commit 2a13a419d1
  1. 6
      include/linux/bpf.h
  2. 12
      kernel/bpf/arraymap.c
  3. 6
      kernel/bpf/hashtab.c
  4. 2
      kernel/bpf/map_in_map.c
  5. 2
      kernel/bpf/map_in_map.h

@ -39,7 +39,11 @@ struct bpf_map_ops {
/* funcs called by prog_array and perf_event_array map */
void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
int fd);
void (*map_fd_put_ptr)(void *ptr);
/* If need_defer is true, the implementation should guarantee that
* the to-be-put element is still alive before the bpf program, which
* may manipulate it, exists.
*/
void (*map_fd_put_ptr)(struct bpf_map *map, void *ptr, bool need_defer);
u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
u32 (*map_fd_sys_lookup_elem)(void *ptr);
};

@ -420,7 +420,7 @@ int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
old_ptr = xchg(array->ptrs + index, new_ptr);
if (old_ptr)
map->ops->map_fd_put_ptr(old_ptr);
map->ops->map_fd_put_ptr(map, old_ptr, true);
return 0;
}
@ -436,7 +436,7 @@ static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
old_ptr = xchg(array->ptrs + index, NULL);
if (old_ptr) {
map->ops->map_fd_put_ptr(old_ptr);
map->ops->map_fd_put_ptr(map, old_ptr, true);
return 0;
} else {
return -ENOENT;
@ -460,8 +460,9 @@ static void *prog_fd_array_get_ptr(struct bpf_map *map,
return prog;
}
static void prog_fd_array_put_ptr(void *ptr)
static void prog_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
{
/* bpf_prog is freed after one RCU or tasks trace grace period */
bpf_prog_put(ptr);
}
@ -547,8 +548,9 @@ err_out:
return ee;
}
static void perf_event_fd_array_put_ptr(void *ptr)
static void perf_event_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
{
/* bpf_perf_event is freed after one RCU grace period */
bpf_event_entry_free_rcu(ptr);
}
@ -587,7 +589,7 @@ static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
return cgroup_get_from_fd(fd);
}
static void cgroup_fd_array_put_ptr(void *ptr)
static void cgroup_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
{
/* cgroup_put free cgrp after a rcu grace period */
cgroup_put(ptr);

@ -656,7 +656,7 @@ static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
if (map->ops->map_fd_put_ptr) {
ptr = fd_htab_map_get_ptr(map, l);
map->ops->map_fd_put_ptr(ptr);
map->ops->map_fd_put_ptr(map, ptr, true);
}
}
@ -1289,7 +1289,7 @@ static void fd_htab_map_free(struct bpf_map *map)
hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
void *ptr = fd_htab_map_get_ptr(map, l);
map->ops->map_fd_put_ptr(ptr);
map->ops->map_fd_put_ptr(map, ptr, false);
}
}
@ -1330,7 +1330,7 @@ int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
ret = htab_map_update_elem(map, key, &ptr, map_flags);
if (ret)
map->ops->map_fd_put_ptr(ptr);
map->ops->map_fd_put_ptr(map, ptr, false);
return ret;
}

@ -101,7 +101,7 @@ void *bpf_map_fd_get_ptr(struct bpf_map *map,
return inner_map;
}
void bpf_map_fd_put_ptr(void *ptr)
void bpf_map_fd_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
{
/* ptr->ops->map_free() has to go through one
* rcu grace period by itself.

@ -18,7 +18,7 @@ bool bpf_map_meta_equal(const struct bpf_map *meta0,
const struct bpf_map *meta1);
void *bpf_map_fd_get_ptr(struct bpf_map *map, struct file *map_file,
int ufd);
void bpf_map_fd_put_ptr(void *ptr);
void bpf_map_fd_put_ptr(struct bpf_map *map, void *ptr, bool need_defer);
u32 bpf_map_fd_sys_lookup_elem(void *ptr);
#endif

Loading…
Cancel
Save