1
2
3
4
5
6
7
8
9
10#include "ivtv-driver.h"
11#include "ivtv-queue.h"
12
13int ivtv_buf_copy_from_user(struct ivtv_stream *s, struct ivtv_buffer *buf, const char __user *src, int copybytes)
14{
15 if (s->buf_size - buf->bytesused < copybytes)
16 copybytes = s->buf_size - buf->bytesused;
17 if (copy_from_user(buf->buf + buf->bytesused, src, copybytes)) {
18 return -EFAULT;
19 }
20 buf->bytesused += copybytes;
21 return copybytes;
22}
23
24void ivtv_buf_swap(struct ivtv_buffer *buf)
25{
26 int i;
27
28 for (i = 0; i < buf->bytesused; i += 4)
29 swab32s((u32 *)(buf->buf + i));
30}
31
32void ivtv_queue_init(struct ivtv_queue *q)
33{
34 INIT_LIST_HEAD(&q->list);
35 q->buffers = 0;
36 q->length = 0;
37 q->bytesused = 0;
38}
39
40void ivtv_enqueue(struct ivtv_stream *s, struct ivtv_buffer *buf, struct ivtv_queue *q)
41{
42 unsigned long flags;
43
44
45 if (q == &s->q_free) {
46 buf->bytesused = 0;
47 buf->readpos = 0;
48 buf->b_flags = 0;
49 buf->dma_xfer_cnt = 0;
50 }
51 spin_lock_irqsave(&s->qlock, flags);
52 list_add_tail(&buf->list, &q->list);
53 q->buffers++;
54 q->length += s->buf_size;
55 q->bytesused += buf->bytesused - buf->readpos;
56 spin_unlock_irqrestore(&s->qlock, flags);
57}
58
59struct ivtv_buffer *ivtv_dequeue(struct ivtv_stream *s, struct ivtv_queue *q)
60{
61 struct ivtv_buffer *buf = NULL;
62 unsigned long flags;
63
64 spin_lock_irqsave(&s->qlock, flags);
65 if (!list_empty(&q->list)) {
66 buf = list_entry(q->list.next, struct ivtv_buffer, list);
67 list_del_init(q->list.next);
68 q->buffers--;
69 q->length -= s->buf_size;
70 q->bytesused -= buf->bytesused - buf->readpos;
71 }
72 spin_unlock_irqrestore(&s->qlock, flags);
73 return buf;
74}
75
76static void ivtv_queue_move_buf(struct ivtv_stream *s, struct ivtv_queue *from,
77 struct ivtv_queue *to, int clear)
78{
79 struct ivtv_buffer *buf = list_entry(from->list.next, struct ivtv_buffer, list);
80
81 list_move_tail(from->list.next, &to->list);
82 from->buffers--;
83 from->length -= s->buf_size;
84 from->bytesused -= buf->bytesused - buf->readpos;
85
86 if (clear)
87 buf->bytesused = buf->readpos = buf->b_flags = buf->dma_xfer_cnt = 0;
88 to->buffers++;
89 to->length += s->buf_size;
90 to->bytesused += buf->bytesused - buf->readpos;
91}
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110int ivtv_queue_move(struct ivtv_stream *s, struct ivtv_queue *from, struct ivtv_queue *steal,
111 struct ivtv_queue *to, int needed_bytes)
112{
113 unsigned long flags;
114 int rc = 0;
115 int from_free = from == &s->q_free;
116 int to_free = to == &s->q_free;
117 int bytes_available, bytes_steal;
118
119 spin_lock_irqsave(&s->qlock, flags);
120 if (needed_bytes == 0) {
121 from_free = 1;
122 needed_bytes = from->length;
123 }
124
125 bytes_available = from_free ? from->length : from->bytesused;
126 bytes_steal = (from_free && steal) ? steal->length : 0;
127
128 if (bytes_available + bytes_steal < needed_bytes) {
129 spin_unlock_irqrestore(&s->qlock, flags);
130 return -ENOMEM;
131 }
132 while (steal && bytes_available < needed_bytes) {
133 struct ivtv_buffer *buf = list_entry(steal->list.prev, struct ivtv_buffer, list);
134 u16 dma_xfer_cnt = buf->dma_xfer_cnt;
135
136
137
138
139
140 while (dma_xfer_cnt == buf->dma_xfer_cnt) {
141 list_move_tail(steal->list.prev, &from->list);
142 rc++;
143 steal->buffers--;
144 steal->length -= s->buf_size;
145 steal->bytesused -= buf->bytesused - buf->readpos;
146 buf->bytesused = buf->readpos = buf->b_flags = buf->dma_xfer_cnt = 0;
147 from->buffers++;
148 from->length += s->buf_size;
149 bytes_available += s->buf_size;
150 if (list_empty(&steal->list))
151 break;
152 buf = list_entry(steal->list.prev, struct ivtv_buffer, list);
153 }
154 }
155 if (from_free) {
156 u32 old_length = to->length;
157
158 while (to->length - old_length < needed_bytes) {
159 ivtv_queue_move_buf(s, from, to, 1);
160 }
161 }
162 else {
163 u32 old_bytesused = to->bytesused;
164
165 while (to->bytesused - old_bytesused < needed_bytes) {
166 ivtv_queue_move_buf(s, from, to, to_free);
167 }
168 }
169 spin_unlock_irqrestore(&s->qlock, flags);
170 return rc;
171}
172
173void ivtv_flush_queues(struct ivtv_stream *s)
174{
175 ivtv_queue_move(s, &s->q_io, NULL, &s->q_free, 0);
176 ivtv_queue_move(s, &s->q_full, NULL, &s->q_free, 0);
177 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
178 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0);
179}
180
181int ivtv_stream_alloc(struct ivtv_stream *s)
182{
183 struct ivtv *itv = s->itv;
184 int SGsize = sizeof(struct ivtv_sg_host_element) * s->buffers;
185 int i;
186
187 if (s->buffers == 0)
188 return 0;
189
190 IVTV_DEBUG_INFO("Allocate %s%s stream: %d x %d buffers (%dkB total)\n",
191 s->dma != PCI_DMA_NONE ? "DMA " : "",
192 s->name, s->buffers, s->buf_size, s->buffers * s->buf_size / 1024);
193
194 s->sg_pending = kzalloc(SGsize, GFP_KERNEL|__GFP_NOWARN);
195 if (s->sg_pending == NULL) {
196 IVTV_ERR("Could not allocate sg_pending for %s stream\n", s->name);
197 return -ENOMEM;
198 }
199 s->sg_pending_size = 0;
200
201 s->sg_processing = kzalloc(SGsize, GFP_KERNEL|__GFP_NOWARN);
202 if (s->sg_processing == NULL) {
203 IVTV_ERR("Could not allocate sg_processing for %s stream\n", s->name);
204 kfree(s->sg_pending);
205 s->sg_pending = NULL;
206 return -ENOMEM;
207 }
208 s->sg_processing_size = 0;
209
210 s->sg_dma = kzalloc(sizeof(struct ivtv_sg_element),
211 GFP_KERNEL|__GFP_NOWARN);
212 if (s->sg_dma == NULL) {
213 IVTV_ERR("Could not allocate sg_dma for %s stream\n", s->name);
214 kfree(s->sg_pending);
215 s->sg_pending = NULL;
216 kfree(s->sg_processing);
217 s->sg_processing = NULL;
218 return -ENOMEM;
219 }
220 if (ivtv_might_use_dma(s)) {
221 s->sg_handle = pci_map_single(itv->pdev, s->sg_dma,
222 sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE);
223 ivtv_stream_sync_for_cpu(s);
224 }
225
226
227 for (i = 0; i < s->buffers; i++) {
228 struct ivtv_buffer *buf = kzalloc(sizeof(struct ivtv_buffer),
229 GFP_KERNEL|__GFP_NOWARN);
230
231 if (buf == NULL)
232 break;
233 buf->buf = kmalloc(s->buf_size + 256, GFP_KERNEL|__GFP_NOWARN);
234 if (buf->buf == NULL) {
235 kfree(buf);
236 break;
237 }
238 INIT_LIST_HEAD(&buf->list);
239 if (ivtv_might_use_dma(s)) {
240 buf->dma_handle = pci_map_single(s->itv->pdev,
241 buf->buf, s->buf_size + 256, s->dma);
242 ivtv_buf_sync_for_cpu(s, buf);
243 }
244 ivtv_enqueue(s, buf, &s->q_free);
245 }
246 if (i == s->buffers)
247 return 0;
248 IVTV_ERR("Couldn't allocate buffers for %s stream\n", s->name);
249 ivtv_stream_free(s);
250 return -ENOMEM;
251}
252
253void ivtv_stream_free(struct ivtv_stream *s)
254{
255 struct ivtv_buffer *buf;
256
257
258 ivtv_flush_queues(s);
259
260
261 while ((buf = ivtv_dequeue(s, &s->q_free))) {
262 if (ivtv_might_use_dma(s))
263 pci_unmap_single(s->itv->pdev, buf->dma_handle,
264 s->buf_size + 256, s->dma);
265 kfree(buf->buf);
266 kfree(buf);
267 }
268
269
270 if (s->sg_dma != NULL) {
271 if (s->sg_handle != IVTV_DMA_UNMAPPED) {
272 pci_unmap_single(s->itv->pdev, s->sg_handle,
273 sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE);
274 s->sg_handle = IVTV_DMA_UNMAPPED;
275 }
276 kfree(s->sg_pending);
277 kfree(s->sg_processing);
278 kfree(s->sg_dma);
279 s->sg_pending = NULL;
280 s->sg_processing = NULL;
281 s->sg_dma = NULL;
282 s->sg_pending_size = 0;
283 s->sg_processing_size = 0;
284 }
285}
286