1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include "ivtv-driver.h"
23#include "ivtv-queue.h"
24
25int ivtv_buf_copy_from_user(struct ivtv_stream *s, struct ivtv_buffer *buf, const char __user *src, int copybytes)
26{
27 if (s->buf_size - buf->bytesused < copybytes)
28 copybytes = s->buf_size - buf->bytesused;
29 if (copy_from_user(buf->buf + buf->bytesused, src, copybytes)) {
30 return -EFAULT;
31 }
32 buf->bytesused += copybytes;
33 return copybytes;
34}
35
36void ivtv_buf_swap(struct ivtv_buffer *buf)
37{
38 int i;
39
40 for (i = 0; i < buf->bytesused; i += 4)
41 swab32s((u32 *)(buf->buf + i));
42}
43
44void ivtv_queue_init(struct ivtv_queue *q)
45{
46 INIT_LIST_HEAD(&q->list);
47 q->buffers = 0;
48 q->length = 0;
49 q->bytesused = 0;
50}
51
52void ivtv_enqueue(struct ivtv_stream *s, struct ivtv_buffer *buf, struct ivtv_queue *q)
53{
54 unsigned long flags;
55
56
57 if (q == &s->q_free) {
58 buf->bytesused = 0;
59 buf->readpos = 0;
60 buf->b_flags = 0;
61 buf->dma_xfer_cnt = 0;
62 }
63 spin_lock_irqsave(&s->qlock, flags);
64 list_add_tail(&buf->list, &q->list);
65 q->buffers++;
66 q->length += s->buf_size;
67 q->bytesused += buf->bytesused - buf->readpos;
68 spin_unlock_irqrestore(&s->qlock, flags);
69}
70
71struct ivtv_buffer *ivtv_dequeue(struct ivtv_stream *s, struct ivtv_queue *q)
72{
73 struct ivtv_buffer *buf = NULL;
74 unsigned long flags;
75
76 spin_lock_irqsave(&s->qlock, flags);
77 if (!list_empty(&q->list)) {
78 buf = list_entry(q->list.next, struct ivtv_buffer, list);
79 list_del_init(q->list.next);
80 q->buffers--;
81 q->length -= s->buf_size;
82 q->bytesused -= buf->bytesused - buf->readpos;
83 }
84 spin_unlock_irqrestore(&s->qlock, flags);
85 return buf;
86}
87
88static void ivtv_queue_move_buf(struct ivtv_stream *s, struct ivtv_queue *from,
89 struct ivtv_queue *to, int clear)
90{
91 struct ivtv_buffer *buf = list_entry(from->list.next, struct ivtv_buffer, list);
92
93 list_move_tail(from->list.next, &to->list);
94 from->buffers--;
95 from->length -= s->buf_size;
96 from->bytesused -= buf->bytesused - buf->readpos;
97
98 if (clear)
99 buf->bytesused = buf->readpos = buf->b_flags = buf->dma_xfer_cnt = 0;
100 to->buffers++;
101 to->length += s->buf_size;
102 to->bytesused += buf->bytesused - buf->readpos;
103}
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122int ivtv_queue_move(struct ivtv_stream *s, struct ivtv_queue *from, struct ivtv_queue *steal,
123 struct ivtv_queue *to, int needed_bytes)
124{
125 unsigned long flags;
126 int rc = 0;
127 int from_free = from == &s->q_free;
128 int to_free = to == &s->q_free;
129 int bytes_available, bytes_steal;
130
131 spin_lock_irqsave(&s->qlock, flags);
132 if (needed_bytes == 0) {
133 from_free = 1;
134 needed_bytes = from->length;
135 }
136
137 bytes_available = from_free ? from->length : from->bytesused;
138 bytes_steal = (from_free && steal) ? steal->length : 0;
139
140 if (bytes_available + bytes_steal < needed_bytes) {
141 spin_unlock_irqrestore(&s->qlock, flags);
142 return -ENOMEM;
143 }
144 while (steal && bytes_available < needed_bytes) {
145 struct ivtv_buffer *buf = list_entry(steal->list.prev, struct ivtv_buffer, list);
146 u16 dma_xfer_cnt = buf->dma_xfer_cnt;
147
148
149
150
151
152 while (dma_xfer_cnt == buf->dma_xfer_cnt) {
153 list_move_tail(steal->list.prev, &from->list);
154 rc++;
155 steal->buffers--;
156 steal->length -= s->buf_size;
157 steal->bytesused -= buf->bytesused - buf->readpos;
158 buf->bytesused = buf->readpos = buf->b_flags = buf->dma_xfer_cnt = 0;
159 from->buffers++;
160 from->length += s->buf_size;
161 bytes_available += s->buf_size;
162 if (list_empty(&steal->list))
163 break;
164 buf = list_entry(steal->list.prev, struct ivtv_buffer, list);
165 }
166 }
167 if (from_free) {
168 u32 old_length = to->length;
169
170 while (to->length - old_length < needed_bytes) {
171 ivtv_queue_move_buf(s, from, to, 1);
172 }
173 }
174 else {
175 u32 old_bytesused = to->bytesused;
176
177 while (to->bytesused - old_bytesused < needed_bytes) {
178 ivtv_queue_move_buf(s, from, to, to_free);
179 }
180 }
181 spin_unlock_irqrestore(&s->qlock, flags);
182 return rc;
183}
184
185void ivtv_flush_queues(struct ivtv_stream *s)
186{
187 ivtv_queue_move(s, &s->q_io, NULL, &s->q_free, 0);
188 ivtv_queue_move(s, &s->q_full, NULL, &s->q_free, 0);
189 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
190 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0);
191}
192
193int ivtv_stream_alloc(struct ivtv_stream *s)
194{
195 struct ivtv *itv = s->itv;
196 int SGsize = sizeof(struct ivtv_sg_host_element) * s->buffers;
197 int i;
198
199 if (s->buffers == 0)
200 return 0;
201
202 IVTV_DEBUG_INFO("Allocate %s%s stream: %d x %d buffers (%dkB total)\n",
203 s->dma != PCI_DMA_NONE ? "DMA " : "",
204 s->name, s->buffers, s->buf_size, s->buffers * s->buf_size / 1024);
205
206 s->sg_pending = kzalloc(SGsize, GFP_KERNEL|__GFP_NOWARN);
207 if (s->sg_pending == NULL) {
208 IVTV_ERR("Could not allocate sg_pending for %s stream\n", s->name);
209 return -ENOMEM;
210 }
211 s->sg_pending_size = 0;
212
213 s->sg_processing = kzalloc(SGsize, GFP_KERNEL|__GFP_NOWARN);
214 if (s->sg_processing == NULL) {
215 IVTV_ERR("Could not allocate sg_processing for %s stream\n", s->name);
216 kfree(s->sg_pending);
217 s->sg_pending = NULL;
218 return -ENOMEM;
219 }
220 s->sg_processing_size = 0;
221
222 s->sg_dma = kzalloc(sizeof(struct ivtv_sg_element),
223 GFP_KERNEL|__GFP_NOWARN);
224 if (s->sg_dma == NULL) {
225 IVTV_ERR("Could not allocate sg_dma for %s stream\n", s->name);
226 kfree(s->sg_pending);
227 s->sg_pending = NULL;
228 kfree(s->sg_processing);
229 s->sg_processing = NULL;
230 return -ENOMEM;
231 }
232 if (ivtv_might_use_dma(s)) {
233 s->sg_handle = pci_map_single(itv->pdev, s->sg_dma,
234 sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE);
235 ivtv_stream_sync_for_cpu(s);
236 }
237
238
239 for (i = 0; i < s->buffers; i++) {
240 struct ivtv_buffer *buf = kzalloc(sizeof(struct ivtv_buffer),
241 GFP_KERNEL|__GFP_NOWARN);
242
243 if (buf == NULL)
244 break;
245 buf->buf = kmalloc(s->buf_size + 256, GFP_KERNEL|__GFP_NOWARN);
246 if (buf->buf == NULL) {
247 kfree(buf);
248 break;
249 }
250 INIT_LIST_HEAD(&buf->list);
251 if (ivtv_might_use_dma(s)) {
252 buf->dma_handle = pci_map_single(s->itv->pdev,
253 buf->buf, s->buf_size + 256, s->dma);
254 ivtv_buf_sync_for_cpu(s, buf);
255 }
256 ivtv_enqueue(s, buf, &s->q_free);
257 }
258 if (i == s->buffers)
259 return 0;
260 IVTV_ERR("Couldn't allocate buffers for %s stream\n", s->name);
261 ivtv_stream_free(s);
262 return -ENOMEM;
263}
264
265void ivtv_stream_free(struct ivtv_stream *s)
266{
267 struct ivtv_buffer *buf;
268
269
270 ivtv_flush_queues(s);
271
272
273 while ((buf = ivtv_dequeue(s, &s->q_free))) {
274 if (ivtv_might_use_dma(s))
275 pci_unmap_single(s->itv->pdev, buf->dma_handle,
276 s->buf_size + 256, s->dma);
277 kfree(buf->buf);
278 kfree(buf);
279 }
280
281
282 if (s->sg_dma != NULL) {
283 if (s->sg_handle != IVTV_DMA_UNMAPPED) {
284 pci_unmap_single(s->itv->pdev, s->sg_handle,
285 sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE);
286 s->sg_handle = IVTV_DMA_UNMAPPED;
287 }
288 kfree(s->sg_pending);
289 kfree(s->sg_processing);
290 kfree(s->sg_dma);
291 s->sg_pending = NULL;
292 s->sg_processing = NULL;
293 s->sg_dma = NULL;
294 s->sg_pending_size = 0;
295 s->sg_processing_size = 0;
296 }
297}
298