1
2#define _GNU_SOURCE
3#include "main.h"
4#include <stdlib.h>
5#include <stdio.h>
6#include <string.h>
7#include <pthread.h>
8#include <malloc.h>
9#include <assert.h>
10#include <errno.h>
11#include <limits.h>
12
13#define SMP_CACHE_BYTES 64
14#define cache_line_size() SMP_CACHE_BYTES
15#define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES)))
16#define unlikely(x) (__builtin_expect(!!(x), 0))
17#define likely(x) (__builtin_expect(!!(x), 1))
18#define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a))
19#define SIZE_MAX (~(size_t)0)
20#define KMALLOC_MAX_SIZE SIZE_MAX
21#define BUG_ON(x) assert(x)
22
23typedef pthread_spinlock_t spinlock_t;
24
25typedef int gfp_t;
26#define __GFP_ZERO 0x1
27
28static void *kmalloc(unsigned size, gfp_t gfp)
29{
30 void *p = memalign(64, size);
31 if (!p)
32 return p;
33
34 if (gfp & __GFP_ZERO)
35 memset(p, 0, size);
36 return p;
37}
38
39static inline void *kzalloc(unsigned size, gfp_t flags)
40{
41 return kmalloc(size, flags | __GFP_ZERO);
42}
43
44static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
45{
46 if (size != 0 && n > SIZE_MAX / size)
47 return NULL;
48 return kmalloc(n * size, flags);
49}
50
51static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
52{
53 return kmalloc_array(n, size, flags | __GFP_ZERO);
54}
55
56static void kfree(void *p)
57{
58 if (p)
59 free(p);
60}
61
62#define kvmalloc_array kmalloc_array
63#define kvfree kfree
64
65static void spin_lock_init(spinlock_t *lock)
66{
67 int r = pthread_spin_init(lock, 0);
68 assert(!r);
69}
70
71static void spin_lock(spinlock_t *lock)
72{
73 int ret = pthread_spin_lock(lock);
74 assert(!ret);
75}
76
77static void spin_unlock(spinlock_t *lock)
78{
79 int ret = pthread_spin_unlock(lock);
80 assert(!ret);
81}
82
83static void spin_lock_bh(spinlock_t *lock)
84{
85 spin_lock(lock);
86}
87
88static void spin_unlock_bh(spinlock_t *lock)
89{
90 spin_unlock(lock);
91}
92
93static void spin_lock_irq(spinlock_t *lock)
94{
95 spin_lock(lock);
96}
97
98static void spin_unlock_irq(spinlock_t *lock)
99{
100 spin_unlock(lock);
101}
102
103static void spin_lock_irqsave(spinlock_t *lock, unsigned long f)
104{
105 spin_lock(lock);
106}
107
108static void spin_unlock_irqrestore(spinlock_t *lock, unsigned long f)
109{
110 spin_unlock(lock);
111}
112
113#include "../../../include/linux/ptr_ring.h"
114
115static unsigned long long headcnt, tailcnt;
116static struct ptr_ring array ____cacheline_aligned_in_smp;
117
118
119void alloc_ring(void)
120{
121 int ret = ptr_ring_init(&array, ring_size, 0);
122 assert(!ret);
123
124 if (param)
125 array.batch = param;
126}
127
128
129int add_inbuf(unsigned len, void *buf, void *datap)
130{
131 int ret;
132
133 ret = __ptr_ring_produce(&array, buf);
134 if (ret >= 0) {
135 ret = 0;
136 headcnt++;
137 }
138
139 return ret;
140}
141
142
143
144
145
146
147
148void *get_buf(unsigned *lenp, void **bufp)
149{
150 void *datap;
151
152 if (tailcnt == headcnt || __ptr_ring_full(&array))
153 datap = NULL;
154 else {
155 datap = "Buffer\n";
156 ++tailcnt;
157 }
158
159 return datap;
160}
161
162bool used_empty()
163{
164 return (tailcnt == headcnt || __ptr_ring_full(&array));
165}
166
167void disable_call()
168{
169 assert(0);
170}
171
172bool enable_call()
173{
174 assert(0);
175}
176
177void kick_available(void)
178{
179 assert(0);
180}
181
182
183void disable_kick()
184{
185 assert(0);
186}
187
188bool enable_kick()
189{
190 assert(0);
191}
192
193bool avail_empty()
194{
195 return __ptr_ring_empty(&array);
196}
197
198bool use_buf(unsigned *lenp, void **bufp)
199{
200 void *ptr;
201
202 ptr = __ptr_ring_consume(&array);
203
204 return ptr;
205}
206
207void call_used(void)
208{
209 assert(0);
210}
211