1
2
3
4
5
6
7#ifndef _LRU_LIST_H
8#define _LRU_LIST_H
9
10#include <linux/list.h>
11#include <linux/nodemask.h>
12#include <linux/shrinker.h>
13
14struct mem_cgroup;
15
16
17enum lru_status {
18 LRU_REMOVED,
19 LRU_REMOVED_RETRY,
20
21 LRU_ROTATE,
22 LRU_SKIP,
23 LRU_RETRY,
24
25};
26
27struct list_lru_one {
28 struct list_head list;
29
30 long nr_items;
31};
32
33struct list_lru_memcg {
34
35 struct list_lru_one *lru[0];
36};
37
38struct list_lru_node {
39
40 spinlock_t lock;
41
42 struct list_lru_one lru;
43#ifdef CONFIG_MEMCG_KMEM
44
45 struct list_lru_memcg *memcg_lrus;
46#endif
47} ____cacheline_aligned_in_smp;
48
49struct list_lru {
50 struct list_lru_node *node;
51#ifdef CONFIG_MEMCG_KMEM
52 struct list_head list;
53#endif
54};
55
56void list_lru_destroy(struct list_lru *lru);
57int __list_lru_init(struct list_lru *lru, bool memcg_aware,
58 struct lock_class_key *key);
59
60#define list_lru_init(lru) __list_lru_init((lru), false, NULL)
61#define list_lru_init_key(lru, key) __list_lru_init((lru), false, (key))
62#define list_lru_init_memcg(lru) __list_lru_init((lru), true, NULL)
63
64int memcg_update_all_list_lrus(int num_memcgs);
65void memcg_drain_all_list_lrus(int src_idx, int dst_idx);
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83bool list_lru_add(struct list_lru *lru, struct list_head *item);
84
85
86
87
88
89
90
91
92
93
94
95
96bool list_lru_del(struct list_lru *lru, struct list_head *item);
97
98
99
100
101
102
103
104
105
106
107
108unsigned long list_lru_count_one(struct list_lru *lru,
109 int nid, struct mem_cgroup *memcg);
110unsigned long list_lru_count_node(struct list_lru *lru, int nid);
111
112static inline unsigned long list_lru_shrink_count(struct list_lru *lru,
113 struct shrink_control *sc)
114{
115 return list_lru_count_one(lru, sc->nid, sc->memcg);
116}
117
118static inline unsigned long list_lru_count(struct list_lru *lru)
119{
120 long count = 0;
121 int nid;
122
123 for_each_node_state(nid, N_NORMAL_MEMORY)
124 count += list_lru_count_node(lru, nid);
125
126 return count;
127}
128
129void list_lru_isolate(struct list_lru_one *list, struct list_head *item);
130void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
131 struct list_head *head);
132
133typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
134 struct list_lru_one *list, spinlock_t *lock, void *cb_arg);
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158unsigned long list_lru_walk_one(struct list_lru *lru,
159 int nid, struct mem_cgroup *memcg,
160 list_lru_walk_cb isolate, void *cb_arg,
161 unsigned long *nr_to_walk);
162unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
163 list_lru_walk_cb isolate, void *cb_arg,
164 unsigned long *nr_to_walk);
165
166static inline unsigned long
167list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc,
168 list_lru_walk_cb isolate, void *cb_arg)
169{
170 return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg,
171 &sc->nr_to_scan);
172}
173
174static inline unsigned long
175list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
176 void *cb_arg, unsigned long nr_to_walk)
177{
178 long isolated = 0;
179 int nid;
180
181 for_each_node_state(nid, N_NORMAL_MEMORY) {
182 isolated += list_lru_walk_node(lru, nid, isolate,
183 cb_arg, &nr_to_walk);
184 if (nr_to_walk <= 0)
185 break;
186 }
187 return isolated;
188}
189#endif
190