1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
48
49#include <linux/module.h>
50#include <linux/pci.h>
51
52#include "../comedidev.h"
53
54#include "comedi_fc.h"
55#include "mite.h"
56
57#define TOP_OF_PAGE(x) ((x)|(~(PAGE_MASK)))
58
59struct mite_struct *mite_alloc(struct pci_dev *pcidev)
60{
61 struct mite_struct *mite;
62 unsigned int i;
63
64 mite = kzalloc(sizeof(*mite), GFP_KERNEL);
65 if (mite) {
66 spin_lock_init(&mite->lock);
67 mite->pcidev = pcidev;
68 for (i = 0; i < MAX_MITE_DMA_CHANNELS; ++i) {
69 mite->channels[i].mite = mite;
70 mite->channels[i].channel = i;
71 mite->channels[i].done = 1;
72 }
73 }
74 return mite;
75}
76EXPORT_SYMBOL_GPL(mite_alloc);
77
78static void dump_chip_signature(u32 csigr_bits)
79{
80 pr_info("version = %i, type = %i, mite mode = %i, interface mode = %i\n",
81 mite_csigr_version(csigr_bits), mite_csigr_type(csigr_bits),
82 mite_csigr_mmode(csigr_bits), mite_csigr_imode(csigr_bits));
83 pr_info("num channels = %i, write post fifo depth = %i, wins = %i, iowins = %i\n",
84 mite_csigr_dmac(csigr_bits), mite_csigr_wpdep(csigr_bits),
85 mite_csigr_wins(csigr_bits), mite_csigr_iowins(csigr_bits));
86}
87
88static unsigned mite_fifo_size(struct mite_struct *mite, unsigned channel)
89{
90 unsigned fcr_bits = readl(mite->mite_io_addr + MITE_FCR(channel));
91 unsigned empty_count = (fcr_bits >> 16) & 0xff;
92 unsigned full_count = fcr_bits & 0xff;
93
94 return empty_count + full_count;
95}
96
97int mite_setup2(struct comedi_device *dev,
98 struct mite_struct *mite, bool use_win1)
99{
100 unsigned long length;
101 int i;
102 u32 csigr_bits;
103 unsigned unknown_dma_burst_bits;
104
105 pci_set_master(mite->pcidev);
106
107 mite->mite_io_addr = pci_ioremap_bar(mite->pcidev, 0);
108 if (!mite->mite_io_addr) {
109 dev_err(dev->class_dev,
110 "Failed to remap mite io memory address\n");
111 return -ENOMEM;
112 }
113 mite->mite_phys_addr = pci_resource_start(mite->pcidev, 0);
114
115 dev->mmio = pci_ioremap_bar(mite->pcidev, 1);
116 if (!dev->mmio) {
117 dev_err(dev->class_dev,
118 "Failed to remap daq io memory address\n");
119 return -ENOMEM;
120 }
121 mite->daq_phys_addr = pci_resource_start(mite->pcidev, 1);
122 length = pci_resource_len(mite->pcidev, 1);
123
124 if (use_win1) {
125 writel(0, mite->mite_io_addr + MITE_IODWBSR);
126 dev_info(dev->class_dev,
127 "using I/O Window Base Size register 1\n");
128 writel(mite->daq_phys_addr | WENAB |
129 MITE_IODWBSR_1_WSIZE_bits(length),
130 mite->mite_io_addr + MITE_IODWBSR_1);
131 writel(0, mite->mite_io_addr + MITE_IODWCR_1);
132 } else {
133 writel(mite->daq_phys_addr | WENAB,
134 mite->mite_io_addr + MITE_IODWBSR);
135 }
136
137
138
139
140
141
142
143 unknown_dma_burst_bits =
144 readl(mite->mite_io_addr + MITE_UNKNOWN_DMA_BURST_REG);
145 unknown_dma_burst_bits |= UNKNOWN_DMA_BURST_ENABLE_BITS;
146 writel(unknown_dma_burst_bits,
147 mite->mite_io_addr + MITE_UNKNOWN_DMA_BURST_REG);
148
149 csigr_bits = readl(mite->mite_io_addr + MITE_CSIGR);
150 mite->num_channels = mite_csigr_dmac(csigr_bits);
151 if (mite->num_channels > MAX_MITE_DMA_CHANNELS) {
152 dev_warn(dev->class_dev,
153 "mite: bug? chip claims to have %i dma channels. Setting to %i.\n",
154 mite->num_channels, MAX_MITE_DMA_CHANNELS);
155 mite->num_channels = MAX_MITE_DMA_CHANNELS;
156 }
157 dump_chip_signature(csigr_bits);
158 for (i = 0; i < mite->num_channels; i++) {
159 writel(CHOR_DMARESET, mite->mite_io_addr + MITE_CHOR(i));
160
161 writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | CHCR_CLR_SAR_IE |
162 CHCR_CLR_DONE_IE | CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
163 CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
164 mite->mite_io_addr + MITE_CHCR(i));
165 }
166 mite->fifo_size = mite_fifo_size(mite, 0);
167 dev_info(dev->class_dev, "fifo size is %i.\n", mite->fifo_size);
168 return 0;
169}
170EXPORT_SYMBOL_GPL(mite_setup2);
171
172void mite_detach(struct mite_struct *mite)
173{
174 if (!mite)
175 return;
176
177 if (mite->mite_io_addr)
178 iounmap(mite->mite_io_addr);
179
180 kfree(mite);
181}
182EXPORT_SYMBOL_GPL(mite_detach);
183
184struct mite_dma_descriptor_ring *mite_alloc_ring(struct mite_struct *mite)
185{
186 struct mite_dma_descriptor_ring *ring =
187 kmalloc(sizeof(struct mite_dma_descriptor_ring), GFP_KERNEL);
188
189 if (ring == NULL)
190 return ring;
191 ring->hw_dev = get_device(&mite->pcidev->dev);
192 if (ring->hw_dev == NULL) {
193 kfree(ring);
194 return NULL;
195 }
196 ring->n_links = 0;
197 ring->descriptors = NULL;
198 ring->descriptors_dma_addr = 0;
199 return ring;
200};
201EXPORT_SYMBOL_GPL(mite_alloc_ring);
202
203void mite_free_ring(struct mite_dma_descriptor_ring *ring)
204{
205 if (ring) {
206 if (ring->descriptors) {
207 dma_free_coherent(ring->hw_dev,
208 ring->n_links *
209 sizeof(struct mite_dma_descriptor),
210 ring->descriptors,
211 ring->descriptors_dma_addr);
212 }
213 put_device(ring->hw_dev);
214 kfree(ring);
215 }
216};
217EXPORT_SYMBOL_GPL(mite_free_ring);
218
219struct mite_channel *mite_request_channel_in_range(struct mite_struct *mite,
220 struct
221 mite_dma_descriptor_ring
222 *ring, unsigned min_channel,
223 unsigned max_channel)
224{
225 int i;
226 unsigned long flags;
227 struct mite_channel *channel = NULL;
228
229
230
231
232 spin_lock_irqsave(&mite->lock, flags);
233 for (i = min_channel; i <= max_channel; ++i) {
234 if (mite->channel_allocated[i] == 0) {
235 mite->channel_allocated[i] = 1;
236 channel = &mite->channels[i];
237 channel->ring = ring;
238 break;
239 }
240 }
241 spin_unlock_irqrestore(&mite->lock, flags);
242 return channel;
243}
244EXPORT_SYMBOL_GPL(mite_request_channel_in_range);
245
246void mite_release_channel(struct mite_channel *mite_chan)
247{
248 struct mite_struct *mite = mite_chan->mite;
249 unsigned long flags;
250
251
252 spin_lock_irqsave(&mite->lock, flags);
253 if (mite->channel_allocated[mite_chan->channel]) {
254 mite_dma_disarm(mite_chan);
255 mite_dma_reset(mite_chan);
256
257
258
259
260 writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE |
261 CHCR_CLR_SAR_IE | CHCR_CLR_DONE_IE |
262 CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
263 CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
264 mite->mite_io_addr + MITE_CHCR(mite_chan->channel));
265 mite->channel_allocated[mite_chan->channel] = 0;
266 mite_chan->ring = NULL;
267 mmiowb();
268 }
269 spin_unlock_irqrestore(&mite->lock, flags);
270}
271EXPORT_SYMBOL_GPL(mite_release_channel);
272
273void mite_dma_arm(struct mite_channel *mite_chan)
274{
275 struct mite_struct *mite = mite_chan->mite;
276 int chor;
277 unsigned long flags;
278
279
280
281
282
283 smp_mb();
284
285 chor = CHOR_START;
286 spin_lock_irqsave(&mite->lock, flags);
287 mite_chan->done = 0;
288 writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
289 mmiowb();
290 spin_unlock_irqrestore(&mite->lock, flags);
291
292}
293EXPORT_SYMBOL_GPL(mite_dma_arm);
294
295
296
297int mite_buf_change(struct mite_dma_descriptor_ring *ring,
298 struct comedi_subdevice *s)
299{
300 struct comedi_async *async = s->async;
301 unsigned int n_links;
302 int i;
303
304 if (ring->descriptors) {
305 dma_free_coherent(ring->hw_dev,
306 ring->n_links *
307 sizeof(struct mite_dma_descriptor),
308 ring->descriptors,
309 ring->descriptors_dma_addr);
310 }
311 ring->descriptors = NULL;
312 ring->descriptors_dma_addr = 0;
313 ring->n_links = 0;
314
315 if (async->prealloc_bufsz == 0)
316 return 0;
317
318 n_links = async->prealloc_bufsz >> PAGE_SHIFT;
319
320 ring->descriptors =
321 dma_alloc_coherent(ring->hw_dev,
322 n_links * sizeof(struct mite_dma_descriptor),
323 &ring->descriptors_dma_addr, GFP_KERNEL);
324 if (!ring->descriptors) {
325 dev_err(s->device->class_dev,
326 "mite: ring buffer allocation failed\n");
327 return -ENOMEM;
328 }
329 ring->n_links = n_links;
330
331 for (i = 0; i < n_links; i++) {
332 ring->descriptors[i].count = cpu_to_le32(PAGE_SIZE);
333 ring->descriptors[i].addr =
334 cpu_to_le32(async->buf_map->page_list[i].dma_addr);
335 ring->descriptors[i].next =
336 cpu_to_le32(ring->descriptors_dma_addr + (i +
337 1) *
338 sizeof(struct mite_dma_descriptor));
339 }
340 ring->descriptors[n_links - 1].next =
341 cpu_to_le32(ring->descriptors_dma_addr);
342
343
344
345
346 smp_wmb();
347 return 0;
348}
349EXPORT_SYMBOL_GPL(mite_buf_change);
350
351void mite_prep_dma(struct mite_channel *mite_chan,
352 unsigned int num_device_bits, unsigned int num_memory_bits)
353{
354 unsigned int chor, chcr, mcr, dcr, lkcr;
355 struct mite_struct *mite = mite_chan->mite;
356
357
358 chor = CHOR_DMARESET | CHOR_FRESET;
359 writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
360
361
362 chcr = CHCR_SET_DMA_IE | CHCR_LINKSHORT | CHCR_SET_DONE_IE |
363 CHCR_BURSTEN;
364
365
366
367
368
369
370
371
372 chcr |= CHCR_SET_LC_IE;
373 if (num_memory_bits == 32 && num_device_bits == 16) {
374
375
376
377
378
379
380
381 chcr |= CHCR_BYTE_SWAP_DEVICE | CHCR_BYTE_SWAP_MEMORY;
382 }
383 if (mite_chan->dir == COMEDI_INPUT)
384 chcr |= CHCR_DEV_TO_MEM;
385
386 writel(chcr, mite->mite_io_addr + MITE_CHCR(mite_chan->channel));
387
388
389 mcr = CR_RL(64) | CR_ASEQUP;
390 switch (num_memory_bits) {
391 case 8:
392 mcr |= CR_PSIZE8;
393 break;
394 case 16:
395 mcr |= CR_PSIZE16;
396 break;
397 case 32:
398 mcr |= CR_PSIZE32;
399 break;
400 default:
401 pr_warn("bug! invalid mem bit width for dma transfer\n");
402 break;
403 }
404 writel(mcr, mite->mite_io_addr + MITE_MCR(mite_chan->channel));
405
406
407 dcr = CR_RL(64) | CR_ASEQUP;
408 dcr |= CR_PORTIO | CR_AMDEVICE | CR_REQSDRQ(mite_chan->channel);
409 switch (num_device_bits) {
410 case 8:
411 dcr |= CR_PSIZE8;
412 break;
413 case 16:
414 dcr |= CR_PSIZE16;
415 break;
416 case 32:
417 dcr |= CR_PSIZE32;
418 break;
419 default:
420 pr_warn("bug! invalid dev bit width for dma transfer\n");
421 break;
422 }
423 writel(dcr, mite->mite_io_addr + MITE_DCR(mite_chan->channel));
424
425
426 writel(0, mite->mite_io_addr + MITE_DAR(mite_chan->channel));
427
428
429 lkcr = CR_RL(64) | CR_ASEQUP | CR_PSIZE32;
430 writel(lkcr, mite->mite_io_addr + MITE_LKCR(mite_chan->channel));
431
432
433 writel(mite_chan->ring->descriptors_dma_addr,
434 mite->mite_io_addr + MITE_LKAR(mite_chan->channel));
435}
436EXPORT_SYMBOL_GPL(mite_prep_dma);
437
438static u32 mite_device_bytes_transferred(struct mite_channel *mite_chan)
439{
440 struct mite_struct *mite = mite_chan->mite;
441
442 return readl(mite->mite_io_addr + MITE_DAR(mite_chan->channel));
443}
444
445u32 mite_bytes_in_transit(struct mite_channel *mite_chan)
446{
447 struct mite_struct *mite = mite_chan->mite;
448
449 return readl(mite->mite_io_addr +
450 MITE_FCR(mite_chan->channel)) & 0x000000FF;
451}
452EXPORT_SYMBOL_GPL(mite_bytes_in_transit);
453
454
455u32 mite_bytes_written_to_memory_lb(struct mite_channel *mite_chan)
456{
457 u32 device_byte_count;
458
459 device_byte_count = mite_device_bytes_transferred(mite_chan);
460 return device_byte_count - mite_bytes_in_transit(mite_chan);
461}
462EXPORT_SYMBOL_GPL(mite_bytes_written_to_memory_lb);
463
464
465u32 mite_bytes_written_to_memory_ub(struct mite_channel *mite_chan)
466{
467 u32 in_transit_count;
468
469 in_transit_count = mite_bytes_in_transit(mite_chan);
470 return mite_device_bytes_transferred(mite_chan) - in_transit_count;
471}
472EXPORT_SYMBOL_GPL(mite_bytes_written_to_memory_ub);
473
474
475u32 mite_bytes_read_from_memory_lb(struct mite_channel *mite_chan)
476{
477 u32 device_byte_count;
478
479 device_byte_count = mite_device_bytes_transferred(mite_chan);
480 return device_byte_count + mite_bytes_in_transit(mite_chan);
481}
482EXPORT_SYMBOL_GPL(mite_bytes_read_from_memory_lb);
483
484
485u32 mite_bytes_read_from_memory_ub(struct mite_channel *mite_chan)
486{
487 u32 in_transit_count;
488
489 in_transit_count = mite_bytes_in_transit(mite_chan);
490 return mite_device_bytes_transferred(mite_chan) + in_transit_count;
491}
492EXPORT_SYMBOL_GPL(mite_bytes_read_from_memory_ub);
493
494unsigned mite_dma_tcr(struct mite_channel *mite_chan)
495{
496 struct mite_struct *mite = mite_chan->mite;
497
498 return readl(mite->mite_io_addr + MITE_TCR(mite_chan->channel));
499}
500EXPORT_SYMBOL_GPL(mite_dma_tcr);
501
502void mite_dma_disarm(struct mite_channel *mite_chan)
503{
504 struct mite_struct *mite = mite_chan->mite;
505 unsigned chor;
506
507
508 chor = CHOR_ABORT;
509 writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
510}
511EXPORT_SYMBOL_GPL(mite_dma_disarm);
512
513int mite_sync_input_dma(struct mite_channel *mite_chan,
514 struct comedi_subdevice *s)
515{
516 struct comedi_async *async = s->async;
517 int count;
518 unsigned int nbytes, old_alloc_count;
519
520 old_alloc_count = async->buf_write_alloc_count;
521
522 comedi_buf_write_alloc(s, async->prealloc_bufsz);
523
524 nbytes = mite_bytes_written_to_memory_lb(mite_chan);
525 if ((int)(mite_bytes_written_to_memory_ub(mite_chan) -
526 old_alloc_count) > 0) {
527 dev_warn(s->device->class_dev,
528 "mite: DMA overwrite of free area\n");
529 async->events |= COMEDI_CB_OVERFLOW;
530 return -1;
531 }
532
533 count = nbytes - async->buf_write_count;
534
535
536 if (count <= 0)
537 return 0;
538
539 comedi_buf_write_free(s, count);
540 comedi_inc_scan_progress(s, count);
541 async->events |= COMEDI_CB_BLOCK;
542 return 0;
543}
544EXPORT_SYMBOL_GPL(mite_sync_input_dma);
545
546int mite_sync_output_dma(struct mite_channel *mite_chan,
547 struct comedi_subdevice *s)
548{
549 struct comedi_async *async = s->async;
550 struct comedi_cmd *cmd = &async->cmd;
551 u32 stop_count = cmd->stop_arg * comedi_bytes_per_scan(s);
552 unsigned int old_alloc_count = async->buf_read_alloc_count;
553 u32 nbytes_ub, nbytes_lb;
554 int count;
555
556
557 comedi_buf_read_alloc(s, async->prealloc_bufsz);
558 nbytes_lb = mite_bytes_read_from_memory_lb(mite_chan);
559 if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_lb - stop_count) > 0)
560 nbytes_lb = stop_count;
561 nbytes_ub = mite_bytes_read_from_memory_ub(mite_chan);
562 if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_ub - stop_count) > 0)
563 nbytes_ub = stop_count;
564 if ((int)(nbytes_ub - old_alloc_count) > 0) {
565 dev_warn(s->device->class_dev, "mite: DMA underrun\n");
566 async->events |= COMEDI_CB_OVERFLOW;
567 return -1;
568 }
569 count = nbytes_lb - async->buf_read_count;
570 if (count <= 0)
571 return 0;
572
573 if (count) {
574 comedi_buf_read_free(s, count);
575 async->events |= COMEDI_CB_BLOCK;
576 }
577 return 0;
578}
579EXPORT_SYMBOL_GPL(mite_sync_output_dma);
580
581unsigned mite_get_status(struct mite_channel *mite_chan)
582{
583 struct mite_struct *mite = mite_chan->mite;
584 unsigned status;
585 unsigned long flags;
586
587 spin_lock_irqsave(&mite->lock, flags);
588 status = readl(mite->mite_io_addr + MITE_CHSR(mite_chan->channel));
589 if (status & CHSR_DONE) {
590 mite_chan->done = 1;
591 writel(CHOR_CLRDONE,
592 mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
593 }
594 mmiowb();
595 spin_unlock_irqrestore(&mite->lock, flags);
596 return status;
597}
598EXPORT_SYMBOL_GPL(mite_get_status);
599
600int mite_done(struct mite_channel *mite_chan)
601{
602 struct mite_struct *mite = mite_chan->mite;
603 unsigned long flags;
604 int done;
605
606 mite_get_status(mite_chan);
607 spin_lock_irqsave(&mite->lock, flags);
608 done = mite_chan->done;
609 spin_unlock_irqrestore(&mite->lock, flags);
610 return done;
611}
612EXPORT_SYMBOL_GPL(mite_done);
613
614static int __init mite_module_init(void)
615{
616 return 0;
617}
618
619static void __exit mite_module_exit(void)
620{
621}
622
623module_init(mite_module_init);
624module_exit(mite_module_exit);
625
626MODULE_AUTHOR("Comedi http://www.comedi.org");
627MODULE_DESCRIPTION("Comedi low-level driver");
628MODULE_LICENSE("GPL");
629