mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-24 09:13:20 -05:00
Merge branch 'bpf-Fix-for-BPF-devmap-percpu-allocation-splat'
Daniel Borkmann says: ==================== bpf: Fix for BPF devmap percpu allocation splat The set fixes a splat in devmap percpu allocation when we alloc the flush bitmap. Patch 1 is a prerequisite for the fix in patch 2, patch 1 is rather small, so if this could be routed via -net, for example, with Tejun's Ack that would be good. Patch 3 gets rid of remaining PCPU_MIN_UNIT_SIZE checks, which are percpu allocator internals and should not be used. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
4bbb508348
4 changed files with 14 additions and 12 deletions
|
@ -98,7 +98,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
|
|||
array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
|
||||
|
||||
if (array_size >= U32_MAX - PAGE_SIZE ||
|
||||
elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
|
||||
bpf_array_alloc_percpu(array)) {
|
||||
bpf_map_area_free(array);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
|
|
@ -111,8 +111,9 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
|
|||
err = -ENOMEM;
|
||||
|
||||
/* A per cpu bitfield with a bit per possible net device */
|
||||
dtab->flush_needed = __alloc_percpu(dev_map_bitmap_size(attr),
|
||||
__alignof__(unsigned long));
|
||||
dtab->flush_needed = __alloc_percpu_gfp(dev_map_bitmap_size(attr),
|
||||
__alignof__(unsigned long),
|
||||
GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!dtab->flush_needed)
|
||||
goto free_dtab;
|
||||
|
||||
|
|
|
@ -317,10 +317,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
|
|||
*/
|
||||
goto free_htab;
|
||||
|
||||
if (percpu && round_up(htab->map.value_size, 8) > PCPU_MIN_UNIT_SIZE)
|
||||
/* make sure the size for pcpu_alloc() is reasonable */
|
||||
goto free_htab;
|
||||
|
||||
htab->elem_size = sizeof(struct htab_elem) +
|
||||
round_up(htab->map.key_size, 8);
|
||||
if (percpu)
|
||||
|
|
15
mm/percpu.c
15
mm/percpu.c
|
@ -1329,7 +1329,9 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
|
|||
* @gfp: allocation flags
|
||||
*
|
||||
* Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't
|
||||
* contain %GFP_KERNEL, the allocation is atomic.
|
||||
* contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
|
||||
* then no warning will be triggered on invalid or failed allocation
|
||||
* requests.
|
||||
*
|
||||
* RETURNS:
|
||||
* Percpu pointer to the allocated area on success, NULL on failure.
|
||||
|
@ -1337,10 +1339,11 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
|
|||
static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
|
||||
gfp_t gfp)
|
||||
{
|
||||
bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
|
||||
bool do_warn = !(gfp & __GFP_NOWARN);
|
||||
static int warn_limit = 10;
|
||||
struct pcpu_chunk *chunk;
|
||||
const char *err;
|
||||
bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
|
||||
int slot, off, cpu, ret;
|
||||
unsigned long flags;
|
||||
void __percpu *ptr;
|
||||
|
@ -1361,7 +1364,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
|
|||
|
||||
if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
|
||||
!is_power_of_2(align))) {
|
||||
WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n",
|
||||
WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n",
|
||||
size, align);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1482,7 +1485,7 @@ fail_unlock:
|
|||
fail:
|
||||
trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
|
||||
|
||||
if (!is_atomic && warn_limit) {
|
||||
if (!is_atomic && do_warn && warn_limit) {
|
||||
pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
|
||||
size, align, is_atomic, err);
|
||||
dump_stack();
|
||||
|
@ -1507,7 +1510,9 @@ fail:
|
|||
*
|
||||
* Allocate zero-filled percpu area of @size bytes aligned at @align. If
|
||||
* @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
|
||||
* be called from any context but is a lot more likely to fail.
|
||||
* be called from any context but is a lot more likely to fail. If @gfp
|
||||
* has __GFP_NOWARN then no warning will be triggered on invalid or failed
|
||||
* allocation requests.
|
||||
*
|
||||
* RETURNS:
|
||||
* Percpu pointer to the allocated area on success, NULL on failure.
|
||||
|
|
Loading…
Add table
Reference in a new issue