1
2#define _GNU_SOURCE
3#include "main.h"
4#include <stdlib.h>
5#include <stdio.h>
6#include <string.h>
7#include <pthread.h>
8#include <malloc.h>
9#include <assert.h>
10#include <errno.h>
11#include <limits.h>
12
13#define SMP_CACHE_BYTES 64
14#define cache_line_size() SMP_CACHE_BYTES
15#define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES)))
16#define unlikely(x) (__builtin_expect(!!(x), 0))
17#define likely(x) (__builtin_expect(!!(x), 1))
18#define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a))
19#define SIZE_MAX (~(size_t)0)
20#define KMALLOC_MAX_SIZE SIZE_MAX
21
22typedef pthread_spinlock_t spinlock_t;
23
24typedef int gfp_t;
25#define __GFP_ZERO 0x1
26
27static void *kmalloc(unsigned size, gfp_t gfp)
28{
29 void *p = memalign(64, size);
30 if (!p)
31 return p;
32
33 if (gfp & __GFP_ZERO)
34 memset(p, 0, size);
35 return p;
36}
37
38static inline void *kzalloc(unsigned size, gfp_t flags)
39{
40 return kmalloc(size, flags | __GFP_ZERO);
41}
42
43static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
44{
45 if (size != 0 && n > SIZE_MAX / size)
46 return NULL;
47 return kmalloc(n * size, flags);
48}
49
50static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
51{
52 return kmalloc_array(n, size, flags | __GFP_ZERO);
53}
54
55static void kfree(void *p)
56{
57 if (p)
58 free(p);
59}
60
61#define kvmalloc_array kmalloc_array
62#define kvfree kfree
63
64static void spin_lock_init(spinlock_t *lock)
65{
66 int r = pthread_spin_init(lock, 0);
67 assert(!r);
68}
69
70static void spin_lock(spinlock_t *lock)
71{
72 int ret = pthread_spin_lock(lock);
73 assert(!ret);
74}
75
76static void spin_unlock(spinlock_t *lock)
77{
78 int ret = pthread_spin_unlock(lock);
79 assert(!ret);
80}
81
82static void spin_lock_bh(spinlock_t *lock)
83{
84 spin_lock(lock);
85}
86
87static void spin_unlock_bh(spinlock_t *lock)
88{
89 spin_unlock(lock);
90}
91
92static void spin_lock_irq(spinlock_t *lock)
93{
94 spin_lock(lock);
95}
96
97static void spin_unlock_irq(spinlock_t *lock)
98{
99 spin_unlock(lock);
100}
101
102static void spin_lock_irqsave(spinlock_t *lock, unsigned long f)
103{
104 spin_lock(lock);
105}
106
107static void spin_unlock_irqrestore(spinlock_t *lock, unsigned long f)
108{
109 spin_unlock(lock);
110}
111
112#include "../../../include/linux/ptr_ring.h"
113
114static unsigned long long headcnt, tailcnt;
115static struct ptr_ring array ____cacheline_aligned_in_smp;
116
117
118void alloc_ring(void)
119{
120 int ret = ptr_ring_init(&array, ring_size, 0);
121 assert(!ret);
122
123 if (param)
124 array.batch = param;
125}
126
127
128int add_inbuf(unsigned len, void *buf, void *datap)
129{
130 int ret;
131
132 ret = __ptr_ring_produce(&array, buf);
133 if (ret >= 0) {
134 ret = 0;
135 headcnt++;
136 }
137
138 return ret;
139}
140
141
142
143
144
145
146
147void *get_buf(unsigned *lenp, void **bufp)
148{
149 void *datap;
150
151 if (tailcnt == headcnt || __ptr_ring_full(&array))
152 datap = NULL;
153 else {
154 datap = "Buffer\n";
155 ++tailcnt;
156 }
157
158 return datap;
159}
160
161bool used_empty()
162{
163 return (tailcnt == headcnt || __ptr_ring_full(&array));
164}
165
166void disable_call()
167{
168 assert(0);
169}
170
171bool enable_call()
172{
173 assert(0);
174}
175
176void kick_available(void)
177{
178 assert(0);
179}
180
181
182void disable_kick()
183{
184 assert(0);
185}
186
187bool enable_kick()
188{
189 assert(0);
190}
191
192bool avail_empty()
193{
194 return __ptr_ring_empty(&array);
195}
196
197bool use_buf(unsigned *lenp, void **bufp)
198{
199 void *ptr;
200
201 ptr = __ptr_ring_consume(&array);
202
203 return ptr;
204}
205
206void call_used(void)
207{
208 assert(0);
209}
210