linux/tools/testing/radix-tree/linux.c
<<
>>
Prefs
   1#include <stdlib.h>
   2#include <string.h>
   3#include <malloc.h>
   4#include <pthread.h>
   5#include <unistd.h>
   6#include <assert.h>
   7
   8#include <linux/gfp.h>
   9#include <linux/poison.h>
  10#include <linux/slab.h>
  11#include <linux/radix-tree.h>
  12#include <urcu/uatomic.h>
  13
  14int nr_allocated;
  15int preempt_count;
  16int kmalloc_verbose;
  17int test_verbose;
  18
  19struct kmem_cache {
  20        pthread_mutex_t lock;
  21        int size;
  22        int nr_objs;
  23        void *objs;
  24        void (*ctor)(void *);
  25};
  26
  27void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
  28{
  29        struct radix_tree_node *node;
  30
  31        if (flags & __GFP_NOWARN)
  32                return NULL;
  33
  34        pthread_mutex_lock(&cachep->lock);
  35        if (cachep->nr_objs) {
  36                cachep->nr_objs--;
  37                node = cachep->objs;
  38                cachep->objs = node->parent;
  39                pthread_mutex_unlock(&cachep->lock);
  40                node->parent = NULL;
  41        } else {
  42                pthread_mutex_unlock(&cachep->lock);
  43                node = malloc(cachep->size);
  44                if (cachep->ctor)
  45                        cachep->ctor(node);
  46        }
  47
  48        uatomic_inc(&nr_allocated);
  49        if (kmalloc_verbose)
  50                printf("Allocating %p from slab\n", node);
  51        return node;
  52}
  53
  54void kmem_cache_free(struct kmem_cache *cachep, void *objp)
  55{
  56        assert(objp);
  57        uatomic_dec(&nr_allocated);
  58        if (kmalloc_verbose)
  59                printf("Freeing %p to slab\n", objp);
  60        pthread_mutex_lock(&cachep->lock);
  61        if (cachep->nr_objs > 10) {
  62                memset(objp, POISON_FREE, cachep->size);
  63                free(objp);
  64        } else {
  65                struct radix_tree_node *node = objp;
  66                cachep->nr_objs++;
  67                node->parent = cachep->objs;
  68                cachep->objs = node;
  69        }
  70        pthread_mutex_unlock(&cachep->lock);
  71}
  72
  73void *kmalloc(size_t size, gfp_t gfp)
  74{
  75        void *ret = malloc(size);
  76        uatomic_inc(&nr_allocated);
  77        if (kmalloc_verbose)
  78                printf("Allocating %p from malloc\n", ret);
  79        return ret;
  80}
  81
  82void kfree(void *p)
  83{
  84        if (!p)
  85                return;
  86        uatomic_dec(&nr_allocated);
  87        if (kmalloc_verbose)
  88                printf("Freeing %p to malloc\n", p);
  89        free(p);
  90}
  91
  92struct kmem_cache *
  93kmem_cache_create(const char *name, size_t size, size_t offset,
  94        unsigned long flags, void (*ctor)(void *))
  95{
  96        struct kmem_cache *ret = malloc(sizeof(*ret));
  97
  98        pthread_mutex_init(&ret->lock, NULL);
  99        ret->size = size;
 100        ret->nr_objs = 0;
 101        ret->objs = NULL;
 102        ret->ctor = ctor;
 103        return ret;
 104}
 105