linux/kernel/bpf/bpf_lru_list.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/* Copyright (c) 2016 Facebook
   3 */
   4#ifndef __BPF_LRU_LIST_H_
   5#define __BPF_LRU_LIST_H_
   6
   7#include <linux/list.h>
   8#include <linux/spinlock_types.h>
   9
  10#define NR_BPF_LRU_LIST_T       (3)
  11#define NR_BPF_LRU_LIST_COUNT   (2)
  12#define NR_BPF_LRU_LOCAL_LIST_T (2)
  13#define BPF_LOCAL_LIST_T_OFFSET NR_BPF_LRU_LIST_T
  14
  15enum bpf_lru_list_type {
  16        BPF_LRU_LIST_T_ACTIVE,
  17        BPF_LRU_LIST_T_INACTIVE,
  18        BPF_LRU_LIST_T_FREE,
  19        BPF_LRU_LOCAL_LIST_T_FREE,
  20        BPF_LRU_LOCAL_LIST_T_PENDING,
  21};
  22
  23struct bpf_lru_node {
  24        struct list_head list;
  25        u16 cpu;
  26        u8 type;
  27        u8 ref;
  28};
  29
  30struct bpf_lru_list {
  31        struct list_head lists[NR_BPF_LRU_LIST_T];
  32        unsigned int counts[NR_BPF_LRU_LIST_COUNT];
  33        /* The next inactive list rotation starts from here */
  34        struct list_head *next_inactive_rotation;
  35
  36        raw_spinlock_t lock ____cacheline_aligned_in_smp;
  37};
  38
  39struct bpf_lru_locallist {
  40        struct list_head lists[NR_BPF_LRU_LOCAL_LIST_T];
  41        u16 next_steal;
  42        raw_spinlock_t lock;
  43};
  44
  45struct bpf_common_lru {
  46        struct bpf_lru_list lru_list;
  47        struct bpf_lru_locallist __percpu *local_list;
  48};
  49
  50typedef bool (*del_from_htab_func)(void *arg, struct bpf_lru_node *node);
  51
  52struct bpf_lru {
  53        union {
  54                struct bpf_common_lru common_lru;
  55                struct bpf_lru_list __percpu *percpu_lru;
  56        };
  57        del_from_htab_func del_from_htab;
  58        void *del_arg;
  59        unsigned int hash_offset;
  60        unsigned int nr_scans;
  61        bool percpu;
  62};
  63
  64static inline void bpf_lru_node_set_ref(struct bpf_lru_node *node)
  65{
  66        /* ref is an approximation on access frequency.  It does not
  67         * have to be very accurate.  Hence, no protection is used.
  68         */
  69        if (!node->ref)
  70                node->ref = 1;
  71}
  72
  73int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset,
  74                 del_from_htab_func del_from_htab, void *delete_arg);
  75void bpf_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset,
  76                      u32 elem_size, u32 nr_elems);
  77void bpf_lru_destroy(struct bpf_lru *lru);
  78struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash);
  79void bpf_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node);
  80void bpf_lru_promote(struct bpf_lru *lru, struct bpf_lru_node *node);
  81
  82#endif
  83