linux/kernel/bpf/bpf_lru_list.h
<<
>>
Prefs
   1/* Copyright (c) 2016 Facebook
   2 *
   3 * This program is free software; you can redistribute it and/or
   4 * modify it under the terms of version 2 of the GNU General Public
   5 * License as published by the Free Software Foundation.
   6 */
   7#ifndef __BPF_LRU_LIST_H_
   8#define __BPF_LRU_LIST_H_
   9
  10#include <linux/list.h>
  11#include <linux/spinlock_types.h>
  12
  13#define NR_BPF_LRU_LIST_T       (3)
  14#define NR_BPF_LRU_LIST_COUNT   (2)
  15#define NR_BPF_LRU_LOCAL_LIST_T (2)
  16#define BPF_LOCAL_LIST_T_OFFSET NR_BPF_LRU_LIST_T
  17
  18enum bpf_lru_list_type {
  19        BPF_LRU_LIST_T_ACTIVE,
  20        BPF_LRU_LIST_T_INACTIVE,
  21        BPF_LRU_LIST_T_FREE,
  22        BPF_LRU_LOCAL_LIST_T_FREE,
  23        BPF_LRU_LOCAL_LIST_T_PENDING,
  24};
  25
  26struct bpf_lru_node {
  27        struct list_head list;
  28        u16 cpu;
  29        u8 type;
  30        u8 ref;
  31};
  32
  33struct bpf_lru_list {
  34        struct list_head lists[NR_BPF_LRU_LIST_T];
  35        unsigned int counts[NR_BPF_LRU_LIST_COUNT];
  36        /* The next inactive list rotation starts from here */
  37        struct list_head *next_inactive_rotation;
  38
  39        raw_spinlock_t lock ____cacheline_aligned_in_smp;
  40};
  41
  42struct bpf_lru_locallist {
  43        struct list_head lists[NR_BPF_LRU_LOCAL_LIST_T];
  44        u16 next_steal;
  45        raw_spinlock_t lock;
  46};
  47
  48struct bpf_common_lru {
  49        struct bpf_lru_list lru_list;
  50        struct bpf_lru_locallist __percpu *local_list;
  51};
  52
  53typedef bool (*del_from_htab_func)(void *arg, struct bpf_lru_node *node);
  54
  55struct bpf_lru {
  56        union {
  57                struct bpf_common_lru common_lru;
  58                struct bpf_lru_list __percpu *percpu_lru;
  59        };
  60        del_from_htab_func del_from_htab;
  61        void *del_arg;
  62        unsigned int hash_offset;
  63        unsigned int nr_scans;
  64        bool percpu;
  65};
  66
  67static inline void bpf_lru_node_set_ref(struct bpf_lru_node *node)
  68{
  69        /* ref is an approximation on access frequency.  It does not
  70         * have to be very accurate.  Hence, no protection is used.
  71         */
  72        if (!node->ref)
  73                node->ref = 1;
  74}
  75
  76int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset,
  77                 del_from_htab_func del_from_htab, void *delete_arg);
  78void bpf_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset,
  79                      u32 elem_size, u32 nr_elems);
  80void bpf_lru_destroy(struct bpf_lru *lru);
  81struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash);
  82void bpf_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node);
  83void bpf_lru_promote(struct bpf_lru *lru, struct bpf_lru_node *node);
  84
  85#endif
  86