1
2
3
4
5
6
7
8
9
10
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/kernel.h>
15#include <linux/slab.h>
16#include <linux/init.h>
17#include <linux/jffs2.h>
18#include "nodelist.h"
19
20
21
22static struct kmem_cache *full_dnode_slab;
23static struct kmem_cache *raw_dirent_slab;
24static struct kmem_cache *raw_inode_slab;
25static struct kmem_cache *tmp_dnode_info_slab;
26static struct kmem_cache *raw_node_ref_slab;
27static struct kmem_cache *node_frag_slab;
28static struct kmem_cache *inode_cache_slab;
29#ifdef CONFIG_JFFS2_FS_XATTR
30static struct kmem_cache *xattr_datum_cache;
31static struct kmem_cache *xattr_ref_cache;
32#endif
33
34int __init jffs2_create_slab_caches(void)
35{
36 full_dnode_slab = kmem_cache_create("jffs2_full_dnode",
37 sizeof(struct jffs2_full_dnode),
38 0, 0, NULL);
39 if (!full_dnode_slab)
40 goto err;
41
42 raw_dirent_slab = kmem_cache_create("jffs2_raw_dirent",
43 sizeof(struct jffs2_raw_dirent),
44 0, SLAB_HWCACHE_ALIGN, NULL);
45 if (!raw_dirent_slab)
46 goto err;
47
48 raw_inode_slab = kmem_cache_create("jffs2_raw_inode",
49 sizeof(struct jffs2_raw_inode),
50 0, SLAB_HWCACHE_ALIGN, NULL);
51 if (!raw_inode_slab)
52 goto err;
53
54 tmp_dnode_info_slab = kmem_cache_create("jffs2_tmp_dnode",
55 sizeof(struct jffs2_tmp_dnode_info),
56 0, 0, NULL);
57 if (!tmp_dnode_info_slab)
58 goto err;
59
60 raw_node_ref_slab = kmem_cache_create("jffs2_refblock",
61 sizeof(struct jffs2_raw_node_ref) * (REFS_PER_BLOCK + 1),
62 0, 0, NULL);
63 if (!raw_node_ref_slab)
64 goto err;
65
66 node_frag_slab = kmem_cache_create("jffs2_node_frag",
67 sizeof(struct jffs2_node_frag),
68 0, 0, NULL);
69 if (!node_frag_slab)
70 goto err;
71
72 inode_cache_slab = kmem_cache_create("jffs2_inode_cache",
73 sizeof(struct jffs2_inode_cache),
74 0, 0, NULL);
75 if (!inode_cache_slab)
76 goto err;
77
78#ifdef CONFIG_JFFS2_FS_XATTR
79 xattr_datum_cache = kmem_cache_create("jffs2_xattr_datum",
80 sizeof(struct jffs2_xattr_datum),
81 0, 0, NULL);
82 if (!xattr_datum_cache)
83 goto err;
84
85 xattr_ref_cache = kmem_cache_create("jffs2_xattr_ref",
86 sizeof(struct jffs2_xattr_ref),
87 0, 0, NULL);
88 if (!xattr_ref_cache)
89 goto err;
90#endif
91
92 return 0;
93 err:
94 jffs2_destroy_slab_caches();
95 return -ENOMEM;
96}
97
98void jffs2_destroy_slab_caches(void)
99{
100 kmem_cache_destroy(full_dnode_slab);
101 kmem_cache_destroy(raw_dirent_slab);
102 kmem_cache_destroy(raw_inode_slab);
103 kmem_cache_destroy(tmp_dnode_info_slab);
104 kmem_cache_destroy(raw_node_ref_slab);
105 kmem_cache_destroy(node_frag_slab);
106 kmem_cache_destroy(inode_cache_slab);
107#ifdef CONFIG_JFFS2_FS_XATTR
108 kmem_cache_destroy(xattr_datum_cache);
109 kmem_cache_destroy(xattr_ref_cache);
110#endif
111}
112
113struct jffs2_full_dirent *jffs2_alloc_full_dirent(int namesize)
114{
115 struct jffs2_full_dirent *ret;
116 ret = kmalloc(sizeof(struct jffs2_full_dirent) + namesize, GFP_KERNEL);
117 dbg_memalloc("%p\n", ret);
118 return ret;
119}
120
121void jffs2_free_full_dirent(struct jffs2_full_dirent *x)
122{
123 dbg_memalloc("%p\n", x);
124 kfree(x);
125}
126
127struct jffs2_full_dnode *jffs2_alloc_full_dnode(void)
128{
129 struct jffs2_full_dnode *ret;
130 ret = kmem_cache_alloc(full_dnode_slab, GFP_KERNEL);
131 dbg_memalloc("%p\n", ret);
132 return ret;
133}
134
135void jffs2_free_full_dnode(struct jffs2_full_dnode *x)
136{
137 dbg_memalloc("%p\n", x);
138 kmem_cache_free(full_dnode_slab, x);
139}
140
141struct jffs2_raw_dirent *jffs2_alloc_raw_dirent(void)
142{
143 struct jffs2_raw_dirent *ret;
144 ret = kmem_cache_alloc(raw_dirent_slab, GFP_KERNEL);
145 dbg_memalloc("%p\n", ret);
146 return ret;
147}
148
149void jffs2_free_raw_dirent(struct jffs2_raw_dirent *x)
150{
151 dbg_memalloc("%p\n", x);
152 kmem_cache_free(raw_dirent_slab, x);
153}
154
155struct jffs2_raw_inode *jffs2_alloc_raw_inode(void)
156{
157 struct jffs2_raw_inode *ret;
158 ret = kmem_cache_alloc(raw_inode_slab, GFP_KERNEL);
159 dbg_memalloc("%p\n", ret);
160 return ret;
161}
162
163void jffs2_free_raw_inode(struct jffs2_raw_inode *x)
164{
165 dbg_memalloc("%p\n", x);
166 kmem_cache_free(raw_inode_slab, x);
167}
168
169struct jffs2_tmp_dnode_info *jffs2_alloc_tmp_dnode_info(void)
170{
171 struct jffs2_tmp_dnode_info *ret;
172 ret = kmem_cache_alloc(tmp_dnode_info_slab, GFP_KERNEL);
173 dbg_memalloc("%p\n",
174 ret);
175 return ret;
176}
177
178void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *x)
179{
180 dbg_memalloc("%p\n", x);
181 kmem_cache_free(tmp_dnode_info_slab, x);
182}
183
184static struct jffs2_raw_node_ref *jffs2_alloc_refblock(void)
185{
186 struct jffs2_raw_node_ref *ret;
187
188 ret = kmem_cache_alloc(raw_node_ref_slab, GFP_KERNEL);
189 if (ret) {
190 int i = 0;
191 for (i=0; i < REFS_PER_BLOCK; i++) {
192 ret[i].flash_offset = REF_EMPTY_NODE;
193 ret[i].next_in_ino = NULL;
194 }
195 ret[i].flash_offset = REF_LINK_NODE;
196 ret[i].next_in_ino = NULL;
197 }
198 return ret;
199}
200
201int jffs2_prealloc_raw_node_refs(struct jffs2_sb_info *c,
202 struct jffs2_eraseblock *jeb, int nr)
203{
204 struct jffs2_raw_node_ref **p, *ref;
205 int i = nr;
206
207 dbg_memalloc("%d\n", nr);
208
209 p = &jeb->last_node;
210 ref = *p;
211
212 dbg_memalloc("Reserving %d refs for block @0x%08x\n", nr, jeb->offset);
213
214
215 if (ref && ref->flash_offset != REF_EMPTY_NODE)
216 ref++;
217
218 while (i) {
219 if (!ref) {
220 dbg_memalloc("Allocating new refblock linked from %p\n", p);
221 ref = *p = jffs2_alloc_refblock();
222 if (!ref)
223 return -ENOMEM;
224 }
225 if (ref->flash_offset == REF_LINK_NODE) {
226 p = &ref->next_in_ino;
227 ref = *p;
228 continue;
229 }
230 i--;
231 ref++;
232 }
233 jeb->allocated_refs = nr;
234
235 dbg_memalloc("Reserved %d refs for block @0x%08x, last_node is %p (%08x,%p)\n",
236 nr, jeb->offset, jeb->last_node, jeb->last_node->flash_offset,
237 jeb->last_node->next_in_ino);
238
239 return 0;
240}
241
242void jffs2_free_refblock(struct jffs2_raw_node_ref *x)
243{
244 dbg_memalloc("%p\n", x);
245 kmem_cache_free(raw_node_ref_slab, x);
246}
247
248struct jffs2_node_frag *jffs2_alloc_node_frag(void)
249{
250 struct jffs2_node_frag *ret;
251 ret = kmem_cache_alloc(node_frag_slab, GFP_KERNEL);
252 dbg_memalloc("%p\n", ret);
253 return ret;
254}
255
256void jffs2_free_node_frag(struct jffs2_node_frag *x)
257{
258 dbg_memalloc("%p\n", x);
259 kmem_cache_free(node_frag_slab, x);
260}
261
262struct jffs2_inode_cache *jffs2_alloc_inode_cache(void)
263{
264 struct jffs2_inode_cache *ret;
265 ret = kmem_cache_alloc(inode_cache_slab, GFP_KERNEL);
266 dbg_memalloc("%p\n", ret);
267 return ret;
268}
269
270void jffs2_free_inode_cache(struct jffs2_inode_cache *x)
271{
272 dbg_memalloc("%p\n", x);
273 kmem_cache_free(inode_cache_slab, x);
274}
275
276#ifdef CONFIG_JFFS2_FS_XATTR
277struct jffs2_xattr_datum *jffs2_alloc_xattr_datum(void)
278{
279 struct jffs2_xattr_datum *xd;
280 xd = kmem_cache_zalloc(xattr_datum_cache, GFP_KERNEL);
281 dbg_memalloc("%p\n", xd);
282 if (!xd)
283 return NULL;
284
285 xd->class = RAWNODE_CLASS_XATTR_DATUM;
286 xd->node = (void *)xd;
287 INIT_LIST_HEAD(&xd->xindex);
288 return xd;
289}
290
291void jffs2_free_xattr_datum(struct jffs2_xattr_datum *xd)
292{
293 dbg_memalloc("%p\n", xd);
294 kmem_cache_free(xattr_datum_cache, xd);
295}
296
297struct jffs2_xattr_ref *jffs2_alloc_xattr_ref(void)
298{
299 struct jffs2_xattr_ref *ref;
300 ref = kmem_cache_zalloc(xattr_ref_cache, GFP_KERNEL);
301 dbg_memalloc("%p\n", ref);
302 if (!ref)
303 return NULL;
304
305 ref->class = RAWNODE_CLASS_XATTR_REF;
306 ref->node = (void *)ref;
307 return ref;
308}
309
310void jffs2_free_xattr_ref(struct jffs2_xattr_ref *ref)
311{
312 dbg_memalloc("%p\n", ref);
313 kmem_cache_free(xattr_ref_cache, ref);
314}
315#endif
316