Skip to content
Snippets Groups Projects
Select Git revision
  • a34a9f1a19afe9c60ca0ea61dfeee63a1c2baac8
  • bpf-spectre default protected
  • vf-baseline
  • bpf-spectre-baseline
  • pkt-ptr-revert-v1
  • v6.5-rc6-bpf-spectre-nospec
  • master
  • spectector-bpf
  • bpftask protected
  • bpftask-no-unused-args
  • bpftask-master
  • v5.9-bpftask
  • v5.8-amd-17h-em protected
  • v5.8-amd-17h-eas protected
  • freqinv-amd3950x-v5.8
  • v5.8-scale-inv-acc-amd-ryzen-3950x
  • 23186e43-amd-17h-eas protected
  • caffb99b6929-perf-x86-rapl-Enable-RAPL-for-AMD-Fam17h
  • 6a9ee74800a1-amd-17h-eas protected
  • add2fae34926-amd_17h_em
  • 3643c88e5545-Add-support-for-frequency-invariance-for-some-x86
  • v5.7-rc6
22 results

queue_stack_maps.c

Blame
  • queue_stack_maps.c 7.08 KiB
    // SPDX-License-Identifier: GPL-2.0
    /*
     * queue_stack_maps.c: BPF queue and stack maps
     *
     * Copyright (c) 2018 Politecnico di Torino
     */
    #include <linux/bpf.h>
    #include <linux/list.h>
    #include <linux/slab.h>
    #include <linux/btf_ids.h>
    #include "percpu_freelist.h"
    
    #define QUEUE_STACK_CREATE_FLAG_MASK \
    	(BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
    
    struct bpf_queue_stack {
    	struct bpf_map map;
    	raw_spinlock_t lock;
    	u32 head, tail;
    	u32 size; /* max_entries + 1 */
    
    	char elements[] __aligned(8);
    };
    
    static struct bpf_queue_stack *bpf_queue_stack(struct bpf_map *map)
    {
    	return container_of(map, struct bpf_queue_stack, map);
    }
    
    static bool queue_stack_map_is_empty(struct bpf_queue_stack *qs)
    {
    	return qs->head == qs->tail;
    }
    
    static bool queue_stack_map_is_full(struct bpf_queue_stack *qs)
    {
    	u32 head = qs->head + 1;
    
    	if (unlikely(head >= qs->size))
    		head = 0;
    
    	return head == qs->tail;
    }
    
    /* Called from syscall */
    static int queue_stack_map_alloc_check(union bpf_attr *attr)
    {
    	/* check sanity of attributes */
    	if (attr->max_entries == 0 || attr->key_size != 0 ||
    	    attr->value_size == 0 ||
    	    attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK ||
    	    !bpf_map_flags_access_ok(attr->map_flags))
    		return -EINVAL;
    
    	if (attr->value_size > KMALLOC_MAX_SIZE)
    		/* if value_size is bigger, the user space won't be able to
    		 * access the elements.
    		 */
    		return -E2BIG;
    
    	return 0;
    }
    
    static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
    {
    	int numa_node = bpf_map_attr_numa_node(attr);
    	struct bpf_queue_stack *qs;
    	u64 size, queue_size;
    
    	size = (u64) attr->max_entries + 1;