1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include <linux/list.h>
36#include <linux/spinlock.h>
37#include <linux/atomic.h>
38#include <linux/export.h>
39#if defined(CONFIG_RAMSTER) || defined(CONFIG_RAMSTER_MODULE)
40#include <linux/delay.h>
41#endif
42
43#include "tmem.h"
44
45
46#define POOL_SENTINEL 0x87658765
47#define OBJ_SENTINEL 0x12345678
48#define OBJNODE_SENTINEL 0xfedcba09
49
50
51
52
53
54static struct tmem_hostops tmem_hostops;
55
56static void tmem_objnode_tree_init(void);
57
58void tmem_register_hostops(struct tmem_hostops *m)
59{
60 tmem_objnode_tree_init();
61 tmem_hostops = *m;
62}
63
64
65
66
67
68static struct tmem_pamops tmem_pamops;
69
70void tmem_register_pamops(struct tmem_pamops *m)
71{
72 tmem_pamops = *m;
73}
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93static struct tmem_obj *__tmem_obj_find(struct tmem_hashbucket *hb,
94 struct tmem_oid *oidp,
95 struct rb_node **parent,
96 struct rb_node ***link)
97{
98 struct rb_node *_parent = NULL, **rbnode;
99 struct tmem_obj *obj = NULL;
100
101 rbnode = &hb->obj_rb_root.rb_node;
102 while (*rbnode) {
103 BUG_ON(RB_EMPTY_NODE(*rbnode));
104 _parent = *rbnode;
105 obj = rb_entry(*rbnode, struct tmem_obj,
106 rb_tree_node);
107 switch (tmem_oid_compare(oidp, &obj->oid)) {
108 case 0:
109 goto out;
110 case -1:
111 rbnode = &(*rbnode)->rb_left;
112 break;
113 case 1:
114 rbnode = &(*rbnode)->rb_right;
115 break;
116 }
117 }
118
119 if (parent)
120 *parent = _parent;
121 if (link)
122 *link = rbnode;
123 obj = NULL;
124out:
125 return obj;
126}
127
128static struct tmem_obj *tmem_obj_find(struct tmem_hashbucket *hb,
129 struct tmem_oid *oidp)
130{
131 return __tmem_obj_find(hb, oidp, NULL, NULL);
132}
133
134static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *, bool);
135
136
137static void tmem_obj_free(struct tmem_obj *obj, struct tmem_hashbucket *hb)
138{
139 struct tmem_pool *pool;
140
141 BUG_ON(obj == NULL);
142 ASSERT_SENTINEL(obj, OBJ);
143 BUG_ON(obj->pampd_count > 0);
144 pool = obj->pool;
145 BUG_ON(pool == NULL);
146 if (obj->objnode_tree_root != NULL)
147 tmem_pampd_destroy_all_in_obj(obj, false);
148 BUG_ON(obj->objnode_tree_root != NULL);
149 BUG_ON((long)obj->objnode_count != 0);
150 atomic_dec(&pool->obj_count);
151 BUG_ON(atomic_read(&pool->obj_count) < 0);
152 INVERT_SENTINEL(obj, OBJ);
153 obj->pool = NULL;
154 tmem_oid_set_invalid(&obj->oid);
155 rb_erase(&obj->rb_tree_node, &hb->obj_rb_root);
156}
157
158
159
160
161static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
162 struct tmem_pool *pool,
163 struct tmem_oid *oidp)
164{
165 struct rb_root *root = &hb->obj_rb_root;
166 struct rb_node **new = NULL, *parent = NULL;
167
168 BUG_ON(pool == NULL);
169 atomic_inc(&pool->obj_count);
170 obj->objnode_tree_height = 0;
171 obj->objnode_tree_root = NULL;
172 obj->pool = pool;
173 obj->oid = *oidp;
174 obj->objnode_count = 0;
175 obj->pampd_count = 0;
176#ifdef CONFIG_RAMSTER
177 if (tmem_pamops.new_obj != NULL)
178 (*tmem_pamops.new_obj)(obj);
179#endif
180 SET_SENTINEL(obj, OBJ);
181
182 if (__tmem_obj_find(hb, oidp, &parent, &new))
183 BUG();
184
185 rb_link_node(&obj->rb_tree_node, parent, new);
186 rb_insert_color(&obj->rb_tree_node, root);
187}
188
189
190
191
192
193
194
195
196
197
198static void tmem_pool_flush(struct tmem_pool *pool, bool destroy)
199{
200 struct rb_node *rbnode;
201 struct tmem_obj *obj;
202 struct tmem_hashbucket *hb = &pool->hashbucket[0];
203 int i;
204
205 BUG_ON(pool == NULL);
206 for (i = 0; i < TMEM_HASH_BUCKETS; i++, hb++) {
207 spin_lock(&hb->lock);
208 rbnode = rb_first(&hb->obj_rb_root);
209 while (rbnode != NULL) {
210 obj = rb_entry(rbnode, struct tmem_obj, rb_tree_node);
211 rbnode = rb_next(rbnode);
212 tmem_pampd_destroy_all_in_obj(obj, true);
213 tmem_obj_free(obj, hb);
214 (*tmem_hostops.obj_free)(obj, pool);
215 }
216 spin_unlock(&hb->lock);
217 }
218 if (destroy)
219 list_del(&pool->pool_list);
220}
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236struct tmem_objnode_tree_path {
237 struct tmem_objnode *objnode;
238 int offset;
239};
240
241
242static unsigned long tmem_objnode_tree_h2max[OBJNODE_TREE_MAX_PATH + 1];
243
244static void tmem_objnode_tree_init(void)
245{
246 unsigned int ht, tmp;
247
248 for (ht = 0; ht < ARRAY_SIZE(tmem_objnode_tree_h2max); ht++) {
249 tmp = ht * OBJNODE_TREE_MAP_SHIFT;
250 if (tmp >= OBJNODE_TREE_INDEX_BITS)
251 tmem_objnode_tree_h2max[ht] = ~0UL;
252 else
253 tmem_objnode_tree_h2max[ht] =
254 (~0UL >> (OBJNODE_TREE_INDEX_BITS - tmp - 1)) >> 1;
255 }
256}
257
258static struct tmem_objnode *tmem_objnode_alloc(struct tmem_obj *obj)
259{
260 struct tmem_objnode *objnode;
261
262 ASSERT_SENTINEL(obj, OBJ);
263 BUG_ON(obj->pool == NULL);
264 ASSERT_SENTINEL(obj->pool, POOL);
265 objnode = (*tmem_hostops.objnode_alloc)(obj->pool);
266 if (unlikely(objnode == NULL))
267 goto out;
268 objnode->obj = obj;
269 SET_SENTINEL(objnode, OBJNODE);
270 memset(&objnode->slots, 0, sizeof(objnode->slots));
271 objnode->slots_in_use = 0;
272 obj->objnode_count++;
273out:
274 return objnode;
275}
276
277static void tmem_objnode_free(struct tmem_objnode *objnode)
278{
279 struct tmem_pool *pool;
280 int i;
281
282 BUG_ON(objnode == NULL);
283 for (i = 0; i < OBJNODE_TREE_MAP_SIZE; i++)
284 BUG_ON(objnode->slots[i] != NULL);
285 ASSERT_SENTINEL(objnode, OBJNODE);
286 INVERT_SENTINEL(objnode, OBJNODE);
287 BUG_ON(objnode->obj == NULL);
288 ASSERT_SENTINEL(objnode->obj, OBJ);
289 pool = objnode->obj->pool;
290 BUG_ON(pool == NULL);
291 ASSERT_SENTINEL(pool, POOL);
292 objnode->obj->objnode_count--;
293 objnode->obj = NULL;
294 (*tmem_hostops.objnode_free)(objnode, pool);
295}
296
297
298
299
300static void **__tmem_pampd_lookup_in_obj(struct tmem_obj *obj, uint32_t index)
301{
302 unsigned int height, shift;
303 struct tmem_objnode **slot = NULL;
304
305 BUG_ON(obj == NULL);
306 ASSERT_SENTINEL(obj, OBJ);
307 BUG_ON(obj->pool == NULL);
308 ASSERT_SENTINEL(obj->pool, POOL);
309
310 height = obj->objnode_tree_height;
311 if (index > tmem_objnode_tree_h2max[obj->objnode_tree_height])
312 goto out;
313 if (height == 0 && obj->objnode_tree_root) {
314 slot = &obj->objnode_tree_root;
315 goto out;
316 }
317 shift = (height-1) * OBJNODE_TREE_MAP_SHIFT;
318 slot = &obj->objnode_tree_root;
319 while (height > 0) {
320 if (*slot == NULL)
321 goto out;
322 slot = (struct tmem_objnode **)
323 ((*slot)->slots +
324 ((index >> shift) & OBJNODE_TREE_MAP_MASK));
325 shift -= OBJNODE_TREE_MAP_SHIFT;
326 height--;
327 }
328out:
329 return slot != NULL ? (void **)slot : NULL;
330}
331
332static void *tmem_pampd_lookup_in_obj(struct tmem_obj *obj, uint32_t index)
333{
334 struct tmem_objnode **slot;
335
336 slot = (struct tmem_objnode **)__tmem_pampd_lookup_in_obj(obj, index);
337 return slot != NULL ? *slot : NULL;
338}
339
340#ifdef CONFIG_RAMSTER
341static void *tmem_pampd_replace_in_obj(struct tmem_obj *obj, uint32_t index,
342 void *new_pampd, bool no_free)
343{
344 struct tmem_objnode **slot;
345 void *ret = NULL;
346
347 slot = (struct tmem_objnode **)__tmem_pampd_lookup_in_obj(obj, index);
348 if ((slot != NULL) && (*slot != NULL)) {
349 void *old_pampd = *(void **)slot;
350 *(void **)slot = new_pampd;
351 if (!no_free)
352 (*tmem_pamops.free)(old_pampd, obj->pool,
353 NULL, 0, false);
354 ret = new_pampd;
355 }
356 return ret;
357}
358#endif
359
360static int tmem_pampd_add_to_obj(struct tmem_obj *obj, uint32_t index,
361 void *pampd)
362{
363 int ret = 0;
364 struct tmem_objnode *objnode = NULL, *newnode, *slot;
365 unsigned int height, shift;
366 int offset = 0;
367
368
369 if (index > tmem_objnode_tree_h2max[obj->objnode_tree_height]) {
370 height = obj->objnode_tree_height + 1;
371 if (index > tmem_objnode_tree_h2max[height])
372 while (index > tmem_objnode_tree_h2max[height])
373 height++;
374 if (obj->objnode_tree_root == NULL) {
375 obj->objnode_tree_height = height;
376 goto insert;
377 }
378 do {
379 newnode = tmem_objnode_alloc(obj);
380 if (!newnode) {
381 ret = -ENOMEM;
382 goto out;
383 }
384 newnode->slots[0] = obj->objnode_tree_root;
385 newnode->slots_in_use = 1;
386 obj->objnode_tree_root = newnode;
387 obj->objnode_tree_height++;
388 } while (height > obj->objnode_tree_height);
389 }
390insert:
391 slot = obj->objnode_tree_root;
392 height = obj->objnode_tree_height;
393 shift = (height-1) * OBJNODE_TREE_MAP_SHIFT;
394 while (height > 0) {
395 if (slot == NULL) {
396
397 slot = tmem_objnode_alloc(obj);
398 if (!slot) {
399 ret = -ENOMEM;
400 goto out;
401 }
402 if (objnode) {
403
404 objnode->slots[offset] = slot;
405 objnode->slots_in_use++;
406 } else
407 obj->objnode_tree_root = slot;
408 }
409
410 offset = (index >> shift) & OBJNODE_TREE_MAP_MASK;
411 objnode = slot;
412 slot = objnode->slots[offset];
413 shift -= OBJNODE_TREE_MAP_SHIFT;
414 height--;
415 }
416 BUG_ON(slot != NULL);
417 if (objnode) {
418 objnode->slots_in_use++;
419 objnode->slots[offset] = pampd;
420 } else
421 obj->objnode_tree_root = pampd;
422 obj->pampd_count++;
423out:
424 return ret;
425}
426
427static void *tmem_pampd_delete_from_obj(struct tmem_obj *obj, uint32_t index)
428{
429 struct tmem_objnode_tree_path path[OBJNODE_TREE_MAX_PATH + 1];
430 struct tmem_objnode_tree_path *pathp = path;
431 struct tmem_objnode *slot = NULL;
432 unsigned int height, shift;
433 int offset;
434
435 BUG_ON(obj == NULL);
436 ASSERT_SENTINEL(obj, OBJ);
437 BUG_ON(obj->pool == NULL);
438 ASSERT_SENTINEL(obj->pool, POOL);
439 height = obj->objnode_tree_height;
440 if (index > tmem_objnode_tree_h2max[height])
441 goto out;
442 slot = obj->objnode_tree_root;
443 if (height == 0 && obj->objnode_tree_root) {
444 obj->objnode_tree_root = NULL;
445 goto out;
446 }
447 shift = (height - 1) * OBJNODE_TREE_MAP_SHIFT;
448 pathp->objnode = NULL;
449 do {
450 if (slot == NULL)
451 goto out;
452 pathp++;
453 offset = (index >> shift) & OBJNODE_TREE_MAP_MASK;
454 pathp->offset = offset;
455 pathp->objnode = slot;
456 slot = slot->slots[offset];
457 shift -= OBJNODE_TREE_MAP_SHIFT;
458 height--;
459 } while (height > 0);
460 if (slot == NULL)
461 goto out;
462 while (pathp->objnode) {
463 pathp->objnode->slots[pathp->offset] = NULL;
464 pathp->objnode->slots_in_use--;
465 if (pathp->objnode->slots_in_use) {
466 if (pathp->objnode == obj->objnode_tree_root) {
467 while (obj->objnode_tree_height > 0 &&
468 obj->objnode_tree_root->slots_in_use == 1 &&
469 obj->objnode_tree_root->slots[0]) {
470 struct tmem_objnode *to_free =
471 obj->objnode_tree_root;
472
473 obj->objnode_tree_root =
474 to_free->slots[0];
475 obj->objnode_tree_height--;
476 to_free->slots[0] = NULL;
477 to_free->slots_in_use = 0;
478 tmem_objnode_free(to_free);
479 }
480 }
481 goto out;
482 }
483 tmem_objnode_free(pathp->objnode);
484 pathp--;
485 }
486 obj->objnode_tree_height = 0;
487 obj->objnode_tree_root = NULL;
488
489out:
490 if (slot != NULL)
491 obj->pampd_count--;
492 BUG_ON(obj->pampd_count < 0);
493 return slot;
494}
495
496
497static void tmem_objnode_node_destroy(struct tmem_obj *obj,
498 struct tmem_objnode *objnode,
499 unsigned int ht)
500{
501 int i;
502
503 if (ht == 0)
504 return;
505 for (i = 0; i < OBJNODE_TREE_MAP_SIZE; i++) {
506 if (objnode->slots[i]) {
507 if (ht == 1) {
508 obj->pampd_count--;
509 (*tmem_pamops.free)(objnode->slots[i],
510 obj->pool, NULL, 0, true);
511 objnode->slots[i] = NULL;
512 continue;
513 }
514 tmem_objnode_node_destroy(obj, objnode->slots[i], ht-1);
515 tmem_objnode_free(objnode->slots[i]);
516 objnode->slots[i] = NULL;
517 }
518 }
519}
520
521static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj,
522 bool pool_destroy)
523{
524 if (obj->objnode_tree_root == NULL)
525 return;
526 if (obj->objnode_tree_height == 0) {
527 obj->pampd_count--;
528 (*tmem_pamops.free)(obj->objnode_tree_root,
529 obj->pool, NULL, 0, true);
530 } else {
531 tmem_objnode_node_destroy(obj, obj->objnode_tree_root,
532 obj->objnode_tree_height);
533 tmem_objnode_free(obj->objnode_tree_root);
534 obj->objnode_tree_height = 0;
535 }
536 obj->objnode_tree_root = NULL;
537#ifdef CONFIG_RAMSTER
538 if (tmem_pamops.free_obj != NULL)
539 (*tmem_pamops.free_obj)(obj->pool, obj, pool_destroy);
540#endif
541}
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
562 bool raw, void *pampd_to_use)
563{
564 struct tmem_obj *obj = NULL, *objfound = NULL, *objnew = NULL;
565 void *pampd = NULL, *pampd_del = NULL;
566 int ret = -ENOMEM;
567 struct tmem_hashbucket *hb;
568
569 hb = &pool->hashbucket[tmem_oid_hash(oidp)];
570 spin_lock(&hb->lock);
571 obj = objfound = tmem_obj_find(hb, oidp);
572 if (obj != NULL) {
573 pampd = tmem_pampd_lookup_in_obj(objfound, index);
574 if (pampd != NULL) {
575
576 pampd_del = tmem_pampd_delete_from_obj(obj, index);
577 BUG_ON(pampd_del != pampd);
578 (*tmem_pamops.free)(pampd, pool, oidp, index, true);
579 if (obj->pampd_count == 0) {
580 objnew = obj;
581 objfound = NULL;
582 }
583 pampd = NULL;
584 }
585 } else {
586 obj = objnew = (*tmem_hostops.obj_alloc)(pool);
587 if (unlikely(obj == NULL)) {
588 ret = -ENOMEM;
589 goto out;
590 }
591 tmem_obj_init(obj, hb, pool, oidp);
592 }
593 BUG_ON(obj == NULL);
594 BUG_ON(((objnew != obj) && (objfound != obj)) || (objnew == objfound));
595 pampd = pampd_to_use;
596 BUG_ON(pampd_to_use == NULL);
597 ret = tmem_pampd_add_to_obj(obj, index, pampd);
598 if (unlikely(ret == -ENOMEM))
599
600 goto delete_and_free;
601 (*tmem_pamops.create_finish)(pampd, is_ephemeral(pool));
602 goto out;
603
604delete_and_free:
605 (void)tmem_pampd_delete_from_obj(obj, index);
606 if (pampd)
607 (*tmem_pamops.free)(pampd, pool, NULL, 0, true);
608 if (objnew) {
609 tmem_obj_free(objnew, hb);
610 (*tmem_hostops.obj_free)(objnew, pool);
611 }
612out:
613 spin_unlock(&hb->lock);
614 return ret;
615}
616
617#ifdef CONFIG_RAMSTER
618
619
620
621
622
623
624
625
626
627void *tmem_localify_get_pampd(struct tmem_pool *pool, struct tmem_oid *oidp,
628 uint32_t index, struct tmem_obj **ret_obj,
629 void **saved_hb)
630{
631 struct tmem_hashbucket *hb;
632 struct tmem_obj *obj = NULL;
633 void *pampd = NULL;
634
635 hb = &pool->hashbucket[tmem_oid_hash(oidp)];
636 spin_lock(&hb->lock);
637 obj = tmem_obj_find(hb, oidp);
638 if (likely(obj != NULL))
639 pampd = tmem_pampd_lookup_in_obj(obj, index);
640 *ret_obj = obj;
641 *saved_hb = (void *)hb;
642
643 return pampd;
644}
645EXPORT_SYMBOL_GPL(tmem_localify_get_pampd);
646
647void tmem_localify_finish(struct tmem_obj *obj, uint32_t index,
648 void *pampd, void *saved_hb, bool delete)
649{
650 struct tmem_hashbucket *hb = (struct tmem_hashbucket *)saved_hb;
651
652 BUG_ON(!spin_is_locked(&hb->lock));
653 if (pampd != NULL) {
654 BUG_ON(obj == NULL);
655 (void)tmem_pampd_replace_in_obj(obj, index, pampd, 1);
656 (*tmem_pamops.create_finish)(pampd, is_ephemeral(obj->pool));
657 } else if (delete) {
658 BUG_ON(obj == NULL);
659 (void)tmem_pampd_delete_from_obj(obj, index);
660 }
661 spin_unlock(&hb->lock);
662}
663EXPORT_SYMBOL_GPL(tmem_localify_finish);
664
665
666
667
668static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
669 struct tmem_pool *pool, struct tmem_oid *oidp,
670 uint32_t index, bool free, char *data)
671{
672 void *old_pampd = *ppampd, *new_pampd = NULL;
673 bool intransit = false;
674 int ret = 0;
675
676 if (!is_ephemeral(pool))
677 new_pampd = (*tmem_pamops.repatriate_preload)(
678 old_pampd, pool, oidp, index, &intransit);
679 if (intransit)
680 ret = -EAGAIN;
681 else if (new_pampd != NULL)
682 *ppampd = new_pampd;
683
684 spin_unlock(&hb->lock);
685 if (!intransit)
686 ret = (*tmem_pamops.repatriate)(old_pampd, new_pampd, pool,
687 oidp, index, free, data);
688 if (ret == -EAGAIN) {
689
690 usleep_range(10, 1000);
691 } else if (ret == -ENOTCONN || ret == -EHOSTDOWN) {
692 ret = -1;
693 } else if (ret != 0 && ret != -ENOENT) {
694 ret = -1;
695 }
696
697 return ret;
698}
699
700
701
702
703
704
705int tmem_replace(struct tmem_pool *pool, struct tmem_oid *oidp,
706 uint32_t index, void *new_pampd)
707{
708 struct tmem_obj *obj;
709 int ret = -1;
710 struct tmem_hashbucket *hb;
711
712 hb = &pool->hashbucket[tmem_oid_hash(oidp)];
713 spin_lock(&hb->lock);
714 obj = tmem_obj_find(hb, oidp);
715 if (obj == NULL)
716 goto out;
717 new_pampd = tmem_pampd_replace_in_obj(obj, index, new_pampd, 0);
718
719 BUG_ON(tmem_pamops.replace_in_obj == NULL);
720 ret = (*tmem_pamops.replace_in_obj)(new_pampd, obj);
721out:
722 spin_unlock(&hb->lock);
723 return ret;
724}
725EXPORT_SYMBOL_GPL(tmem_replace);
726#endif
727
728
729
730
731
732
733
734
735
736
737
738
739int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
740 char *data, size_t *sizep, bool raw, int get_and_free)
741{
742 struct tmem_obj *obj;
743 void *pampd = NULL;
744 bool ephemeral = is_ephemeral(pool);
745 int ret = -1;
746 struct tmem_hashbucket *hb;
747 bool free = (get_and_free == 1) || ((get_and_free == 0) && ephemeral);
748 bool lock_held = false;
749 void **ppampd;
750
751 do {
752 hb = &pool->hashbucket[tmem_oid_hash(oidp)];
753 spin_lock(&hb->lock);
754 lock_held = true;
755 obj = tmem_obj_find(hb, oidp);
756 if (obj == NULL)
757 goto out;
758 ppampd = __tmem_pampd_lookup_in_obj(obj, index);
759 if (ppampd == NULL)
760 goto out;
761#ifdef CONFIG_RAMSTER
762 if ((tmem_pamops.is_remote != NULL) &&
763 tmem_pamops.is_remote(*ppampd)) {
764 ret = tmem_repatriate(ppampd, hb, pool, oidp,
765 index, free, data);
766
767 lock_held = false;
768 *sizep = PAGE_SIZE;
769 if (ret != -EAGAIN)
770 goto out;
771 }
772#endif
773 } while (ret == -EAGAIN);
774 if (free)
775 pampd = tmem_pampd_delete_from_obj(obj, index);
776 else
777 pampd = tmem_pampd_lookup_in_obj(obj, index);
778 if (pampd == NULL)
779 goto out;
780 if (free) {
781 if (obj->pampd_count == 0) {
782 tmem_obj_free(obj, hb);
783 (*tmem_hostops.obj_free)(obj, pool);
784 obj = NULL;
785 }
786 }
787 if (free)
788 ret = (*tmem_pamops.get_data_and_free)(
789 data, sizep, raw, pampd, pool, oidp, index);
790 else
791 ret = (*tmem_pamops.get_data)(
792 data, sizep, raw, pampd, pool, oidp, index);
793 if (ret < 0)
794 goto out;
795 ret = 0;
796out:
797 if (lock_held)
798 spin_unlock(&hb->lock);
799 return ret;
800}
801
802
803
804
805
806
807int tmem_flush_page(struct tmem_pool *pool,
808 struct tmem_oid *oidp, uint32_t index)
809{
810 struct tmem_obj *obj;
811 void *pampd;
812 int ret = -1;
813 struct tmem_hashbucket *hb;
814
815 hb = &pool->hashbucket[tmem_oid_hash(oidp)];
816 spin_lock(&hb->lock);
817 obj = tmem_obj_find(hb, oidp);
818 if (obj == NULL)
819 goto out;
820 pampd = tmem_pampd_delete_from_obj(obj, index);
821 if (pampd == NULL)
822 goto out;
823 (*tmem_pamops.free)(pampd, pool, oidp, index, true);
824 if (obj->pampd_count == 0) {
825 tmem_obj_free(obj, hb);
826 (*tmem_hostops.obj_free)(obj, pool);
827 }
828 ret = 0;
829
830out:
831 spin_unlock(&hb->lock);
832 return ret;
833}
834
835
836
837
838int tmem_flush_object(struct tmem_pool *pool, struct tmem_oid *oidp)
839{
840 struct tmem_obj *obj;
841 struct tmem_hashbucket *hb;
842 int ret = -1;
843
844 hb = &pool->hashbucket[tmem_oid_hash(oidp)];
845 spin_lock(&hb->lock);
846 obj = tmem_obj_find(hb, oidp);
847 if (obj == NULL)
848 goto out;
849 tmem_pampd_destroy_all_in_obj(obj, false);
850 tmem_obj_free(obj, hb);
851 (*tmem_hostops.obj_free)(obj, pool);
852 ret = 0;
853
854out:
855 spin_unlock(&hb->lock);
856 return ret;
857}
858
859
860
861
862
863int tmem_destroy_pool(struct tmem_pool *pool)
864{
865 int ret = -1;
866
867 if (pool == NULL)
868 goto out;
869 tmem_pool_flush(pool, 1);
870 ret = 0;
871out:
872 return ret;
873}
874
875static LIST_HEAD(tmem_global_pool_list);
876
877
878
879
880
881void tmem_new_pool(struct tmem_pool *pool, uint32_t flags)
882{
883 int persistent = flags & TMEM_POOL_PERSIST;
884 int shared = flags & TMEM_POOL_SHARED;
885 struct tmem_hashbucket *hb = &pool->hashbucket[0];
886 int i;
887
888 for (i = 0; i < TMEM_HASH_BUCKETS; i++, hb++) {
889 hb->obj_rb_root = RB_ROOT;
890 spin_lock_init(&hb->lock);
891 }
892 INIT_LIST_HEAD(&pool->pool_list);
893 atomic_set(&pool->obj_count, 0);
894 SET_SENTINEL(pool, POOL);
895 list_add_tail(&pool->pool_list, &tmem_global_pool_list);
896 pool->persistent = persistent;
897 pool->shared = shared;
898}
899