1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#define DEBUG
33#include <linux/init.h>
34#include <linux/module.h>
35#include <linux/miscdevice.h>
36#include <linux/fs.h>
37#include <linux/cdev.h>
38#include <linux/kdev_t.h>
39#include <linux/mutex.h>
40#include <linux/sched.h>
41#include <linux/mm.h>
42#include <linux/poll.h>
43#include <linux/wait.h>
44#include <linux/pci.h>
45#include <linux/firmware.h>
46#include <linux/slab.h>
47#include <linux/ioctl.h>
48#include <asm/current.h>
49#include <linux/ioport.h>
50#include <linux/io.h>
51#include <linux/interrupt.h>
52#include <linux/pagemap.h>
53#include <asm/cacheflush.h>
54#include <linux/sched.h>
55#include <linux/delay.h>
56#include <linux/jiffies.h>
57#include <linux/rar_register.h>
58
59#include "../memrar/memrar.h"
60
61#include "sep_driver_hw_defs.h"
62#include "sep_driver_config.h"
63#include "sep_driver_api.h"
64#include "sep_dev.h"
65
66
67
68
69
70#define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
71
72
73
74
75
76
77
78static struct sep_device *sep_dev;
79
80
81
82
83
84
85
86
87static int sep_load_firmware(struct sep_device *sep)
88{
89 const struct firmware *fw;
90 char *cache_name = "cache.image.bin";
91 char *res_name = "resident.image.bin";
92 char *extapp_name = "extapp.image.bin";
93 int error ;
94 unsigned long work1, work2, work3;
95
96
97 sep->resident_bus = sep->rar_bus;
98 sep->resident_addr = sep->rar_addr;
99
100 error = request_firmware(&fw, res_name, &sep->pdev->dev);
101 if (error) {
102 dev_warn(&sep->pdev->dev, "can't request resident fw\n");
103 return error;
104 }
105
106 memcpy(sep->resident_addr, (void *)fw->data, fw->size);
107 sep->resident_size = fw->size;
108 release_firmware(fw);
109
110 dev_dbg(&sep->pdev->dev, "resident virtual is %p\n",
111 sep->resident_addr);
112 dev_dbg(&sep->pdev->dev, "resident bus is %lx\n",
113 (unsigned long)sep->resident_bus);
114 dev_dbg(&sep->pdev->dev, "resident size is %08zx\n",
115 sep->resident_size);
116
117
118 work1 = (unsigned long)sep->resident_bus;
119 work2 = (unsigned long)sep->resident_size;
120 work3 = (work1 + work2 + (1024 * 4)) & 0xfffff000;
121 sep->dcache_bus = (dma_addr_t)work3;
122
123 work1 = (unsigned long)sep->resident_addr;
124 work2 = (unsigned long)sep->resident_size;
125 work3 = (work1 + work2 + (1024 * 4)) & 0xfffff000;
126 sep->dcache_addr = (void *)work3;
127
128 sep->dcache_size = 1024 * 128;
129
130
131 sep->cache_bus = sep->dcache_bus + sep->dcache_size;
132 sep->cache_addr = sep->dcache_addr + sep->dcache_size;
133
134 error = request_firmware(&fw, cache_name, &sep->pdev->dev);
135 if (error) {
136 dev_warn(&sep->pdev->dev, "Unable to request cache firmware\n");
137 return error;
138 }
139
140 memcpy(sep->cache_addr, (void *)fw->data, fw->size);
141 sep->cache_size = fw->size;
142 release_firmware(fw);
143
144 dev_dbg(&sep->pdev->dev, "cache virtual is %p\n",
145 sep->cache_addr);
146 dev_dbg(&sep->pdev->dev, "cache bus is %08lx\n",
147 (unsigned long)sep->cache_bus);
148 dev_dbg(&sep->pdev->dev, "cache size is %08zx\n",
149 sep->cache_size);
150
151
152 sep->extapp_bus = sep->cache_bus + (1024 * 370);
153 sep->extapp_addr = sep->cache_addr + (1024 * 370);
154
155 error = request_firmware(&fw, extapp_name, &sep->pdev->dev);
156 if (error) {
157 dev_warn(&sep->pdev->dev, "Unable to request extapp firmware\n");
158 return error;
159 }
160
161 memcpy(sep->extapp_addr, (void *)fw->data, fw->size);
162 sep->extapp_size = fw->size;
163 release_firmware(fw);
164
165 dev_dbg(&sep->pdev->dev, "extapp virtual is %p\n",
166 sep->extapp_addr);
167 dev_dbg(&sep->pdev->dev, "extapp bus is %08llx\n",
168 (unsigned long long)sep->extapp_bus);
169 dev_dbg(&sep->pdev->dev, "extapp size is %08zx\n",
170 sep->extapp_size);
171
172 return error;
173}
174
175MODULE_FIRMWARE("sep/cache.image.bin");
176MODULE_FIRMWARE("sep/resident.image.bin");
177MODULE_FIRMWARE("sep/extapp.image.bin");
178
179
180
181
182
183static void sep_dump_message(struct sep_device *sep)
184{
185 int count;
186 u32 *p = sep->shared_addr;
187 for (count = 0; count < 12 * 4; count += 4)
188 dev_dbg(&sep->pdev->dev, "Word %d of the message is %x\n",
189 count, *p++);
190}
191
192
193
194
195
196
197static int sep_map_and_alloc_shared_area(struct sep_device *sep)
198{
199 sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev,
200 sep->shared_size,
201 &sep->shared_bus, GFP_KERNEL);
202
203 if (!sep->shared_addr) {
204 dev_warn(&sep->pdev->dev,
205 "shared memory dma_alloc_coherent failed\n");
206 return -ENOMEM;
207 }
208 dev_dbg(&sep->pdev->dev,
209 "shared_addr %zx bytes @%p (bus %llx)\n",
210 sep->shared_size, sep->shared_addr,
211 (unsigned long long)sep->shared_bus);
212 return 0;
213}
214
215
216
217
218
219static void sep_unmap_and_free_shared_area(struct sep_device *sep)
220{
221 dev_dbg(&sep->pdev->dev, "shared area unmap and free\n");
222 dma_free_coherent(&sep->pdev->dev, sep->shared_size,
223 sep->shared_addr, sep->shared_bus);
224}
225
226
227
228
229
230
231
232
233
234static void *sep_shared_bus_to_virt(struct sep_device *sep,
235 dma_addr_t bus_address)
236{
237 return sep->shared_addr + (bus_address - sep->shared_bus);
238}
239
240
241
242
243
244
245
246
247static int sep_singleton_open(struct inode *inode_ptr, struct file *file_ptr)
248{
249 int error = 0;
250 struct sep_device *sep;
251
252
253
254
255
256 sep = sep_dev;
257
258 file_ptr->private_data = sep;
259
260 dev_dbg(&sep->pdev->dev, "Singleton open for pid %d\n", current->pid);
261
262 dev_dbg(&sep->pdev->dev, "calling test and set for singleton 0\n");
263 if (test_and_set_bit(0, &sep->singleton_access_flag)) {
264 error = -EBUSY;
265 goto end_function;
266 }
267
268 dev_dbg(&sep->pdev->dev, "sep_singleton_open end\n");
269end_function:
270 return error;
271}
272
273
274
275
276
277
278
279
280
281
282
283static int sep_open(struct inode *inode, struct file *filp)
284{
285 struct sep_device *sep;
286
287
288
289
290
291 sep = sep_dev;
292 filp->private_data = sep;
293
294 dev_dbg(&sep->pdev->dev, "Open for pid %d\n", current->pid);
295
296
297 return 0;
298}
299
300
301
302
303
304
305
306
307
308
309static int sep_singleton_release(struct inode *inode, struct file *filp)
310{
311 struct sep_device *sep = filp->private_data;
312
313 dev_dbg(&sep->pdev->dev, "Singleton release for pid %d\n",
314 current->pid);
315 clear_bit(0, &sep->singleton_access_flag);
316 return 0;
317}
318
319
320
321
322
323
324
325
326
327
328
329static int sep_request_daemon_open(struct inode *inode, struct file *filp)
330{
331 struct sep_device *sep = sep_dev;
332 int error = 0;
333
334 filp->private_data = sep;
335
336 dev_dbg(&sep->pdev->dev, "Request daemon open for pid %d\n",
337 current->pid);
338
339
340 dev_dbg(&sep->pdev->dev, "calling test and set for req_dmon open 0\n");
341 if (test_and_set_bit(0, &sep->request_daemon_open))
342 error = -EBUSY;
343 return error;
344}
345
346
347
348
349
350
351
352
353static int sep_request_daemon_release(struct inode *inode, struct file *filp)
354{
355 struct sep_device *sep = filp->private_data;
356
357 dev_dbg(&sep->pdev->dev, "Reques daemon release for pid %d\n",
358 current->pid);
359
360
361 clear_bit(0, &sep->request_daemon_open);
362 return 0;
363}
364
365
366
367
368
369
370
371
372static int sep_req_daemon_send_reply_command_handler(struct sep_device *sep)
373{
374 unsigned long lck_flags;
375
376 dev_dbg(&sep->pdev->dev,
377 "sep_req_daemon_send_reply_command_handler start\n");
378
379 sep_dump_message(sep);
380
381
382 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
383 sep->send_ct++;
384 sep->reply_ct++;
385
386
387 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
388 sep->send_ct++;
389
390 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
391
392 dev_dbg(&sep->pdev->dev,
393 "sep_req_daemon_send_reply send_ct %lx reply_ct %lx\n",
394 sep->send_ct, sep->reply_ct);
395
396 dev_dbg(&sep->pdev->dev,
397 "sep_req_daemon_send_reply_command_handler end\n");
398
399 return 0;
400}
401
402
403
404
405
406
407
408
409static int sep_free_dma_table_data_handler(struct sep_device *sep)
410{
411 int count;
412 int dcb_counter;
413
414 struct sep_dma_resource *dma;
415
416 dev_dbg(&sep->pdev->dev, "sep_free_dma_table_data_handler start\n");
417
418 for (dcb_counter = 0; dcb_counter < sep->nr_dcb_creat; dcb_counter++) {
419 dma = &sep->dma_res_arr[dcb_counter];
420
421
422 if (dma->in_map_array) {
423 for (count = 0; count < dma->in_num_pages; count++) {
424 dma_unmap_page(&sep->pdev->dev,
425 dma->in_map_array[count].dma_addr,
426 dma->in_map_array[count].size,
427 DMA_TO_DEVICE);
428 }
429 kfree(dma->in_map_array);
430 }
431
432
433 if (dma->out_map_array) {
434 for (count = 0; count < dma->out_num_pages; count++) {
435 dma_unmap_page(&sep->pdev->dev,
436 dma->out_map_array[count].dma_addr,
437 dma->out_map_array[count].size,
438 DMA_FROM_DEVICE);
439 }
440 kfree(dma->out_map_array);
441 }
442
443
444 if (dma->in_page_array) {
445 for (count = 0; count < dma->in_num_pages; count++) {
446 flush_dcache_page(dma->in_page_array[count]);
447 page_cache_release(dma->in_page_array[count]);
448 }
449 kfree(dma->in_page_array);
450 }
451
452 if (dma->out_page_array) {
453 for (count = 0; count < dma->out_num_pages; count++) {
454 if (!PageReserved(dma->out_page_array[count]))
455 SetPageDirty(dma->out_page_array[count]);
456 flush_dcache_page(dma->out_page_array[count]);
457 page_cache_release(dma->out_page_array[count]);
458 }
459 kfree(dma->out_page_array);
460 }
461
462
463 dma->in_page_array = NULL;
464 dma->out_page_array = NULL;
465 dma->in_num_pages = 0;
466 dma->out_num_pages = 0;
467 dma->in_map_array = NULL;
468 dma->out_map_array = NULL;
469 dma->in_map_num_entries = 0;
470 dma->out_map_num_entries = 0;
471 }
472
473 sep->nr_dcb_creat = 0;
474 sep->num_lli_tables_created = 0;
475
476 dev_dbg(&sep->pdev->dev, "sep_free_dma_table_data_handler end\n");
477 return 0;
478}
479
480
481
482
483
484
485
486
487
488static int sep_request_daemon_mmap(struct file *filp,
489 struct vm_area_struct *vma)
490{
491 struct sep_device *sep = filp->private_data;
492 dma_addr_t bus_address;
493 int error = 0;
494
495 dev_dbg(&sep->pdev->dev, "daemon mmap start\n");
496
497 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
498 error = -EINVAL;
499 goto end_function;
500 }
501
502
503 bus_address = sep->shared_bus;
504
505 dev_dbg(&sep->pdev->dev, "bus_address is %08lx\n",
506 (unsigned long)bus_address);
507
508 if (remap_pfn_range(vma, vma->vm_start, bus_address >> PAGE_SHIFT,
509 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
510
511 dev_warn(&sep->pdev->dev, "remap_page_range failed\n");
512 error = -EAGAIN;
513 goto end_function;
514 }
515
516end_function:
517 dev_dbg(&sep->pdev->dev, "daemon mmap end\n");
518 return error;
519}
520
521
522
523
524
525
526
527
528
529static unsigned int sep_request_daemon_poll(struct file *filp,
530 poll_table *wait)
531{
532 u32 mask = 0;
533
534 u32 retval2;
535 unsigned long lck_flags;
536 struct sep_device *sep = filp->private_data;
537
538 dev_dbg(&sep->pdev->dev, "daemon poll: start\n");
539
540 poll_wait(filp, &sep->event_request_daemon, wait);
541
542 dev_dbg(&sep->pdev->dev, "daemon poll: send_ct is %lx reply ct is %lx\n",
543 sep->send_ct, sep->reply_ct);
544
545 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
546
547 if (sep->send_ct == sep->reply_ct) {
548 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
549
550 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
551 dev_dbg(&sep->pdev->dev,
552 "daemon poll: data check (GPR2) is %x\n", retval2);
553
554
555 if ((retval2 >> 30) & 0x1) {
556 dev_dbg(&sep->pdev->dev, "daemon poll: PRINTF request in\n");
557 mask |= POLLIN;
558 goto end_function;
559 }
560
561 if (retval2 >> 31) {
562 dev_dbg(&sep->pdev->dev, "daemon poll: NVS request in\n");
563 mask |= POLLPRI | POLLWRNORM;
564 }
565 } else {
566 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
567 dev_dbg(&sep->pdev->dev,
568 "daemon poll: no reply received; returning 0\n");
569 mask = 0;
570 }
571end_function:
572 dev_dbg(&sep->pdev->dev, "daemon poll: exit\n");
573 return mask;
574}
575
576
577
578
579
580
581
582
583static int sep_release(struct inode *inode, struct file *filp)
584{
585 struct sep_device *sep = filp->private_data;
586
587 dev_dbg(&sep->pdev->dev, "Release for pid %d\n", current->pid);
588
589 mutex_lock(&sep->sep_mutex);
590
591
592
593
594
595 dev_dbg(&sep->pdev->dev, "waking up event and mmap_event\n");
596 if (sep->pid_doing_transaction == current->pid) {
597 clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
598 clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags);
599 sep_free_dma_table_data_handler(sep);
600 wake_up(&sep->event);
601 sep->pid_doing_transaction = 0;
602 }
603
604 mutex_unlock(&sep->sep_mutex);
605 return 0;
606}
607
608
609
610
611
612
613
614
615static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
616{
617 dma_addr_t bus_addr;
618 struct sep_device *sep = filp->private_data;
619 unsigned long error = 0;
620
621 dev_dbg(&sep->pdev->dev, "mmap start\n");
622
623
624 wait_event_interruptible(sep->event,
625 test_and_set_bit(SEP_MMAP_LOCK_BIT,
626 &sep->in_use_flags) == 0);
627
628 if (signal_pending(current)) {
629 error = -EINTR;
630 goto end_function_with_error;
631 }
632
633
634
635
636
637
638
639
640
641
642 mutex_lock(&sep->sep_mutex);
643 sep->pid_doing_transaction = current->pid;
644 mutex_unlock(&sep->sep_mutex);
645
646
647 sep->data_pool_bytes_allocated = 0;
648 sep->num_of_data_allocations = 0;
649
650
651
652
653
654 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
655 error = -EINVAL;
656 goto end_function_with_error;
657 }
658
659 dev_dbg(&sep->pdev->dev, "shared_addr is %p\n", sep->shared_addr);
660
661
662 bus_addr = sep->shared_bus;
663
664 dev_dbg(&sep->pdev->dev,
665 "bus_address is %lx\n", (unsigned long)bus_addr);
666
667 if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT,
668 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
669 dev_warn(&sep->pdev->dev, "remap_page_range failed\n");
670 error = -EAGAIN;
671 goto end_function_with_error;
672 }
673 dev_dbg(&sep->pdev->dev, "mmap end\n");
674 goto end_function;
675
676end_function_with_error:
677
678 clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
679 mutex_lock(&sep->sep_mutex);
680 sep->pid_doing_transaction = 0;
681 mutex_unlock(&sep->sep_mutex);
682
683
684
685 dev_warn(&sep->pdev->dev, "mmap error - waking up event\n");
686 wake_up(&sep->event);
687
688end_function:
689 return error;
690}
691
692
693
694
695
696
697
698
699
700static unsigned int sep_poll(struct file *filp, poll_table *wait)
701{
702 u32 mask = 0;
703 u32 retval = 0;
704 u32 retval2 = 0;
705 unsigned long lck_flags;
706
707 struct sep_device *sep = filp->private_data;
708
709 dev_dbg(&sep->pdev->dev, "poll: start\n");
710
711
712 mutex_lock(&sep->sep_mutex);
713 if (current->pid != sep->pid_doing_transaction) {
714 dev_warn(&sep->pdev->dev, "poll; wrong pid\n");
715 mask = POLLERR;
716 mutex_unlock(&sep->sep_mutex);
717 goto end_function;
718 }
719 mutex_unlock(&sep->sep_mutex);
720
721
722 if (!test_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags)) {
723 dev_warn(&sep->pdev->dev, "poll; lock bit set\n");
724 mask = POLLERR;
725 goto end_function;
726 }
727
728
729 dev_dbg(&sep->pdev->dev, "poll: calling wait sep_event\n");
730
731 poll_wait(filp, &sep->event, wait);
732
733 dev_dbg(&sep->pdev->dev, "poll: send_ct is %lx reply ct is %lx\n",
734 sep->send_ct, sep->reply_ct);
735
736
737 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
738 if (retval2 != 0x0) {
739 dev_warn(&sep->pdev->dev, "poll; poll error %x\n", retval2);
740 mask |= POLLERR;
741 goto end_function;
742 }
743
744 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
745
746 if (sep->send_ct == sep->reply_ct) {
747 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
748 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
749 dev_dbg(&sep->pdev->dev, "poll: data ready check (GPR2) %x\n",
750 retval);
751
752
753 if ((retval >> 30) & 0x1) {
754 dev_dbg(&sep->pdev->dev, "poll: SEP printf request\n");
755 wake_up(&sep->event_request_daemon);
756 goto end_function;
757 }
758
759
760 if (retval >> 31) {
761 dev_dbg(&sep->pdev->dev, "poll: SEP request\n");
762 wake_up(&sep->event_request_daemon);
763 } else {
764 dev_dbg(&sep->pdev->dev, "poll: normal return\n");
765
766 clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags);
767 sep_dump_message(sep);
768 dev_dbg(&sep->pdev->dev,
769 "poll; SEP reply POLLIN | POLLRDNORM\n");
770 mask |= POLLIN | POLLRDNORM;
771 }
772 } else {
773 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
774 dev_dbg(&sep->pdev->dev,
775 "poll; no reply received; returning mask of 0\n");
776 mask = 0;
777 }
778
779end_function:
780 dev_dbg(&sep->pdev->dev, "poll: end\n");
781 return mask;
782}
783
784
785
786
787
788
789
790
791static u32 *sep_time_address(struct sep_device *sep)
792{
793 return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
794}
795
796
797
798
799
800
801
802
803static unsigned long sep_set_time(struct sep_device *sep)
804{
805 struct timeval time;
806 u32 *time_addr;
807
808
809 dev_dbg(&sep->pdev->dev, "sep_set_time start\n");
810
811 do_gettimeofday(&time);
812
813
814 time_addr = sep_time_address(sep);
815
816 time_addr[0] = SEP_TIME_VAL_TOKEN;
817 time_addr[1] = time.tv_sec;
818
819 dev_dbg(&sep->pdev->dev, "time.tv_sec is %lu\n", time.tv_sec);
820 dev_dbg(&sep->pdev->dev, "time_addr is %p\n", time_addr);
821 dev_dbg(&sep->pdev->dev, "sep->shared_addr is %p\n", sep->shared_addr);
822
823 return time.tv_sec;
824}
825
826
827
828
829
830
831
832
833
834static int sep_set_caller_id_handler(struct sep_device *sep, unsigned long arg)
835{
836 void __user *hash;
837 int error = 0;
838 int i;
839 struct caller_id_struct command_args;
840
841 dev_dbg(&sep->pdev->dev, "sep_set_caller_id_handler start\n");
842
843 for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) {
844 if (sep->caller_id_table[i].pid == 0)
845 break;
846 }
847
848 if (i == SEP_CALLER_ID_TABLE_NUM_ENTRIES) {
849 dev_warn(&sep->pdev->dev, "no more caller id entries left\n");
850 dev_warn(&sep->pdev->dev, "maximum number is %d\n",
851 SEP_CALLER_ID_TABLE_NUM_ENTRIES);
852 error = -EUSERS;
853 goto end_function;
854 }
855
856
857 if (copy_from_user(&command_args, (void __user *)arg,
858 sizeof(command_args))) {
859 error = -EFAULT;
860 goto end_function;
861 }
862
863 hash = (void __user *)(unsigned long)command_args.callerIdAddress;
864
865 if (!command_args.pid || !command_args.callerIdSizeInBytes) {
866 error = -EINVAL;
867 goto end_function;
868 }
869
870 dev_dbg(&sep->pdev->dev, "pid is %x\n", command_args.pid);
871 dev_dbg(&sep->pdev->dev, "callerIdSizeInBytes is %x\n",
872 command_args.callerIdSizeInBytes);
873
874 if (command_args.callerIdSizeInBytes >
875 SEP_CALLER_ID_HASH_SIZE_IN_BYTES) {
876 error = -EMSGSIZE;
877 goto end_function;
878 }
879
880 sep->caller_id_table[i].pid = command_args.pid;
881
882 if (copy_from_user(sep->caller_id_table[i].callerIdHash,
883 hash, command_args.callerIdSizeInBytes))
884 error = -EFAULT;
885end_function:
886 dev_dbg(&sep->pdev->dev, "sep_set_caller_id_handler end\n");
887 return error;
888}
889
890
891
892
893
894
895
896
897static int sep_set_current_caller_id(struct sep_device *sep)
898{
899 int i;
900 u32 *hash_buf_ptr;
901
902 dev_dbg(&sep->pdev->dev, "sep_set_current_caller_id start\n");
903 dev_dbg(&sep->pdev->dev, "current process is %d\n", current->pid);
904
905
906 memset(sep->shared_addr + SEP_CALLER_ID_OFFSET_BYTES,
907 0, SEP_CALLER_ID_HASH_SIZE_IN_BYTES);
908
909 for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) {
910 if (sep->caller_id_table[i].pid == current->pid) {
911 dev_dbg(&sep->pdev->dev, "Caller Id found\n");
912
913 memcpy(sep->shared_addr + SEP_CALLER_ID_OFFSET_BYTES,
914 (void *)(sep->caller_id_table[i].callerIdHash),
915 SEP_CALLER_ID_HASH_SIZE_IN_BYTES);
916 break;
917 }
918 }
919
920 hash_buf_ptr = (u32 *)sep->shared_addr +
921 SEP_CALLER_ID_OFFSET_BYTES;
922
923 for (i = 0; i < SEP_CALLER_ID_HASH_SIZE_IN_WORDS; i++)
924 hash_buf_ptr[i] = cpu_to_le32(hash_buf_ptr[i]);
925
926 dev_dbg(&sep->pdev->dev, "sep_set_current_caller_id end\n");
927 return 0;
928}
929
930
931
932
933
934
935
936
937
938
939static int sep_send_command_handler(struct sep_device *sep)
940{
941 unsigned long lck_flags;
942 int error = 0;
943
944 dev_dbg(&sep->pdev->dev, "sep_send_command_handler start\n");
945
946 if (test_and_set_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags)) {
947 error = -EPROTO;
948 goto end_function;
949 }
950 sep_set_time(sep);
951
952 sep_set_current_caller_id(sep);
953
954 sep_dump_message(sep);
955
956
957 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
958 sep->send_ct++;
959 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
960
961 dev_dbg(&sep->pdev->dev,
962 "sep_send_command_handler send_ct %lx reply_ct %lx\n",
963 sep->send_ct, sep->reply_ct);
964
965
966 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
967
968end_function:
969 dev_dbg(&sep->pdev->dev, "sep_send_command_handler end\n");
970 return error;
971}
972
973
974
975
976
977
978
979
980
981
982
983
984static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
985 unsigned long arg)
986{
987 int error = 0;
988 struct alloc_struct command_args;
989
990
991 u32 *token_addr;
992
993 dev_dbg(&sep->pdev->dev,
994 "sep_allocate_data_pool_memory_handler start\n");
995
996 if (copy_from_user(&command_args, (void __user *)arg,
997 sizeof(struct alloc_struct))) {
998 error = -EFAULT;
999 goto end_function;
1000 }
1001
1002
1003 if ((sep->data_pool_bytes_allocated + command_args.num_bytes) >
1004 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
1005 error = -ENOMEM;
1006 goto end_function;
1007 }
1008
1009 dev_dbg(&sep->pdev->dev,
1010 "bytes_allocated: %x\n", (int)sep->data_pool_bytes_allocated);
1011 dev_dbg(&sep->pdev->dev,
1012 "offset: %x\n", SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES);
1013
1014 command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
1015 sep->data_pool_bytes_allocated;
1016
1017 dev_dbg(&sep->pdev->dev,
1018 "command_args.offset: %x\n", command_args.offset);
1019
1020
1021 token_addr = (u32 *)(sep->shared_addr +
1022 SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES +
1023 (sep->num_of_data_allocations)*2*sizeof(u32));
1024
1025 dev_dbg(&sep->pdev->dev, "allocation offset: %x\n",
1026 SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES);
1027 dev_dbg(&sep->pdev->dev, "data pool token addr is %p\n", token_addr);
1028
1029 token_addr[0] = SEP_DATA_POOL_POINTERS_VAL_TOKEN;
1030 token_addr[1] = (u32)sep->shared_bus +
1031 SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
1032 sep->data_pool_bytes_allocated;
1033
1034 dev_dbg(&sep->pdev->dev, "data pool token [0] %x\n", token_addr[0]);
1035 dev_dbg(&sep->pdev->dev, "data pool token [1] %x\n", token_addr[1]);
1036
1037
1038 error = copy_to_user((void *)arg, (void *)&command_args,
1039 sizeof(struct alloc_struct));
1040 if (error) {
1041 error = -EFAULT;
1042 goto end_function;
1043 }
1044
1045
1046 sep->data_pool_bytes_allocated += command_args.num_bytes;
1047 sep->num_of_data_allocations += 1;
1048
1049 dev_dbg(&sep->pdev->dev, "data_allocations %d\n",
1050 sep->num_of_data_allocations);
1051 dev_dbg(&sep->pdev->dev, "bytes allocated %d\n",
1052 (int)sep->data_pool_bytes_allocated);
1053
1054end_function:
1055 dev_dbg(&sep->pdev->dev, "sep_allocate_data_pool_memory_handler end\n");
1056 return error;
1057}
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073static int sep_lock_kernel_pages(struct sep_device *sep,
1074 unsigned long kernel_virt_addr,
1075 u32 data_size,
1076 struct sep_lli_entry **lli_array_ptr,
1077 int in_out_flag)
1078
1079{
1080 int error = 0;
1081
1082 struct sep_lli_entry *lli_array;
1083
1084 struct sep_dma_map *map_array;
1085
1086 dev_dbg(&sep->pdev->dev, "sep_lock_kernel_pages start\n");
1087 dev_dbg(&sep->pdev->dev, "kernel_virt_addr is %08lx\n",
1088 (unsigned long)kernel_virt_addr);
1089 dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size);
1090
1091 lli_array = kmalloc(sizeof(struct sep_lli_entry), GFP_ATOMIC);
1092 if (!lli_array) {
1093 error = -ENOMEM;
1094 goto end_function;
1095 }
1096 map_array = kmalloc(sizeof(struct sep_dma_map), GFP_ATOMIC);
1097 if (!map_array) {
1098 error = -ENOMEM;
1099 goto end_function_with_error;
1100 }
1101
1102 map_array[0].dma_addr =
1103 dma_map_single(&sep->pdev->dev, (void *)kernel_virt_addr,
1104 data_size, DMA_BIDIRECTIONAL);
1105 map_array[0].size = data_size;
1106
1107
1108
1109
1110
1111
1112 lli_array[0].bus_address = (u32)map_array[0].dma_addr;
1113 lli_array[0].block_size = map_array[0].size;
1114
1115 dev_dbg(&sep->pdev->dev,
1116 "lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n",
1117 (unsigned long)lli_array[0].bus_address,
1118 lli_array[0].block_size);
1119
1120
1121 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1122 *lli_array_ptr = lli_array;
1123 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = 1;
1124 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
1125 sep->dma_res_arr[sep->nr_dcb_creat].in_map_array = map_array;
1126 sep->dma_res_arr[sep->nr_dcb_creat].in_map_num_entries = 1;
1127 } else {
1128 *lli_array_ptr = lli_array;
1129 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages = 1;
1130 sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = NULL;
1131 sep->dma_res_arr[sep->nr_dcb_creat].out_map_array = map_array;
1132 sep->dma_res_arr[sep->nr_dcb_creat].out_map_num_entries = 1;
1133 }
1134 goto end_function;
1135
1136end_function_with_error:
1137 kfree(lli_array);
1138
1139end_function:
1140 dev_dbg(&sep->pdev->dev, "sep_lock_kernel_pages end\n");
1141 return error;
1142}
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157static int sep_lock_user_pages(struct sep_device *sep,
1158 u32 app_virt_addr,
1159 u32 data_size,
1160 struct sep_lli_entry **lli_array_ptr,
1161 int in_out_flag)
1162
1163{
1164 int error = 0;
1165 u32 count;
1166 int result;
1167
1168 u32 end_page;
1169
1170 u32 start_page;
1171
1172 u32 num_pages;
1173
1174 struct page **page_array;
1175
1176 struct sep_lli_entry *lli_array;
1177
1178 struct sep_dma_map *map_array;
1179
1180 enum dma_data_direction dir;
1181
1182 dev_dbg(&sep->pdev->dev, "sep_lock_user_pages start\n");
1183
1184
1185 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1186 start_page = app_virt_addr >> PAGE_SHIFT;
1187 num_pages = end_page - start_page + 1;
1188
1189 dev_dbg(&sep->pdev->dev, "app_virt_addr is %x\n", app_virt_addr);
1190 dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size);
1191 dev_dbg(&sep->pdev->dev, "start_page is %x\n", start_page);
1192 dev_dbg(&sep->pdev->dev, "end_page is %x\n", end_page);
1193 dev_dbg(&sep->pdev->dev, "num_pages is %x\n", num_pages);
1194
1195 dev_dbg(&sep->pdev->dev, "starting page_array malloc\n");
1196
1197
1198 page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
1199 if (!page_array) {
1200 error = -ENOMEM;
1201 goto end_function;
1202 }
1203 map_array = kmalloc(sizeof(struct sep_dma_map) * num_pages, GFP_ATOMIC);
1204 if (!map_array) {
1205 dev_warn(&sep->pdev->dev, "kmalloc for map_array failed\n");
1206 error = -ENOMEM;
1207 goto end_function_with_error1;
1208 }
1209
1210 lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages,
1211 GFP_ATOMIC);
1212
1213 if (!lli_array) {
1214 dev_warn(&sep->pdev->dev, "kmalloc for lli_array failed\n");
1215 error = -ENOMEM;
1216 goto end_function_with_error2;
1217 }
1218
1219 dev_dbg(&sep->pdev->dev, "starting get_user_pages\n");
1220
1221
1222 down_read(¤t->mm->mmap_sem);
1223 result = get_user_pages(current, current->mm, app_virt_addr,
1224 num_pages,
1225 ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1),
1226 0, page_array, NULL);
1227
1228 up_read(¤t->mm->mmap_sem);
1229
1230
1231 if (result != num_pages) {
1232 dev_warn(&sep->pdev->dev,
1233 "not all pages locked by get_user_pages\n");
1234 error = -ENOMEM;
1235 goto end_function_with_error3;
1236 }
1237
1238 dev_dbg(&sep->pdev->dev, "get_user_pages succeeded\n");
1239
1240
1241 if (in_out_flag == SEP_DRIVER_IN_FLAG)
1242 dir = DMA_TO_DEVICE;
1243 else
1244 dir = DMA_FROM_DEVICE;
1245
1246
1247
1248
1249
1250 for (count = 0; count < num_pages; count++) {
1251
1252 map_array[count].dma_addr =
1253 dma_map_page(&sep->pdev->dev, page_array[count],
1254 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
1255
1256 map_array[count].size = PAGE_SIZE;
1257
1258
1259 lli_array[count].bus_address = (u32)map_array[count].dma_addr;
1260 lli_array[count].block_size = PAGE_SIZE;
1261
1262 dev_warn(&sep->pdev->dev, "lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is %x\n",
1263 count, (unsigned long)lli_array[count].bus_address,
1264 count, lli_array[count].block_size);
1265 }
1266
1267
1268 lli_array[0].bus_address =
1269 lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1270
1271
1272 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1273 lli_array[0].block_size = data_size;
1274 else
1275 lli_array[0].block_size =
1276 PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1277
1278 dev_dbg(&sep->pdev->dev,
1279 "lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n",
1280 (unsigned long)lli_array[count].bus_address,
1281 lli_array[count].block_size);
1282
1283
1284 if (num_pages > 1) {
1285 lli_array[num_pages - 1].block_size =
1286 (app_virt_addr + data_size) & (~PAGE_MASK);
1287
1288 dev_warn(&sep->pdev->dev,
1289 "lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is %x\n",
1290 num_pages - 1,
1291 (unsigned long)lli_array[count].bus_address,
1292 num_pages - 1,
1293 lli_array[count].block_size);
1294 }
1295
1296
1297 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1298 *lli_array_ptr = lli_array;
1299 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = num_pages;
1300 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = page_array;
1301 sep->dma_res_arr[sep->nr_dcb_creat].in_map_array = map_array;
1302 sep->dma_res_arr[sep->nr_dcb_creat].in_map_num_entries =
1303 num_pages;
1304 } else {
1305 *lli_array_ptr = lli_array;
1306 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages = num_pages;
1307 sep->dma_res_arr[sep->nr_dcb_creat].out_page_array =
1308 page_array;
1309 sep->dma_res_arr[sep->nr_dcb_creat].out_map_array = map_array;
1310 sep->dma_res_arr[sep->nr_dcb_creat].out_map_num_entries =
1311 num_pages;
1312 }
1313 goto end_function;
1314
1315end_function_with_error3:
1316
1317 kfree(lli_array);
1318
1319end_function_with_error2:
1320 kfree(map_array);
1321
1322end_function_with_error1:
1323
1324 kfree(page_array);
1325
1326end_function:
1327 dev_dbg(&sep->pdev->dev, "sep_lock_user_pages end\n");
1328 return error;
1329}
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343static u32 sep_calculate_lli_table_max_size(struct sep_device *sep,
1344 struct sep_lli_entry *lli_in_array_ptr,
1345 u32 num_array_entries,
1346 u32 *last_table_flag)
1347{
1348 u32 counter;
1349
1350 u32 table_data_size = 0;
1351
1352 u32 next_table_data_size;
1353
1354 *last_table_flag = 0;
1355
1356
1357
1358
1359
1360 for (counter = 0;
1361 (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
1362 (counter < num_array_entries); counter++)
1363 table_data_size += lli_in_array_ptr[counter].block_size;
1364
1365
1366
1367
1368
1369
1370 if (counter == num_array_entries) {
1371
1372 *last_table_flag = 1;
1373 goto end_function;
1374 }
1375
1376
1377
1378
1379
1380 next_table_data_size = 0;
1381 for (; counter < num_array_entries; counter++) {
1382 next_table_data_size += lli_in_array_ptr[counter].block_size;
1383 if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1384 break;
1385 }
1386
1387
1388
1389
1390
1391
1392 if (next_table_data_size &&
1393 next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1394
1395 table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE -
1396 next_table_data_size);
1397
1398 dev_dbg(&sep->pdev->dev, "table data size is %x\n",
1399 table_data_size);
1400end_function:
1401 return table_data_size;
1402}
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416static void sep_build_lli_table(struct sep_device *sep,
1417 struct sep_lli_entry *lli_array_ptr,
1418 struct sep_lli_entry *lli_table_ptr,
1419 u32 *num_processed_entries_ptr,
1420 u32 *num_table_entries_ptr,
1421 u32 table_data_size)
1422{
1423
1424 u32 curr_table_data_size;
1425
1426 u32 array_counter;
1427
1428 dev_dbg(&sep->pdev->dev, "sep_build_lli_table start\n");
1429
1430
1431 curr_table_data_size = 0;
1432 array_counter = 0;
1433 *num_table_entries_ptr = 1;
1434
1435 dev_dbg(&sep->pdev->dev, "table_data_size is %x\n", table_data_size);
1436
1437
1438 while (curr_table_data_size < table_data_size) {
1439
1440 (*num_table_entries_ptr)++;
1441
1442 lli_table_ptr->bus_address =
1443 cpu_to_le32(lli_array_ptr[array_counter].bus_address);
1444
1445 lli_table_ptr->block_size =
1446 cpu_to_le32(lli_array_ptr[array_counter].block_size);
1447
1448 curr_table_data_size += lli_array_ptr[array_counter].block_size;
1449
1450 dev_dbg(&sep->pdev->dev, "lli_table_ptr is %p\n",
1451 lli_table_ptr);
1452 dev_dbg(&sep->pdev->dev, "lli_table_ptr->bus_address is %08lx\n",
1453 (unsigned long)lli_table_ptr->bus_address);
1454 dev_dbg(&sep->pdev->dev, "lli_table_ptr->block_size is %x\n",
1455 lli_table_ptr->block_size);
1456
1457
1458 if (curr_table_data_size > table_data_size) {
1459 dev_dbg(&sep->pdev->dev,
1460 "curr_table_data_size too large\n");
1461
1462
1463 lli_table_ptr->block_size -=
1464 cpu_to_le32((curr_table_data_size - table_data_size));
1465
1466
1467 lli_array_ptr[array_counter].bus_address +=
1468 cpu_to_le32(lli_table_ptr->block_size);
1469
1470
1471 lli_array_ptr[array_counter].block_size =
1472 (curr_table_data_size - table_data_size);
1473 } else
1474
1475 array_counter++;
1476
1477 dev_dbg(&sep->pdev->dev,
1478 "lli_table_ptr->bus_address is %08lx\n",
1479 (unsigned long)lli_table_ptr->bus_address);
1480 dev_dbg(&sep->pdev->dev,
1481 "lli_table_ptr->block_size is %x\n",
1482 lli_table_ptr->block_size);
1483
1484
1485 lli_table_ptr++;
1486 }
1487
1488
1489 lli_table_ptr->bus_address = 0xffffffff;
1490 lli_table_ptr->block_size = 0;
1491
1492 dev_dbg(&sep->pdev->dev, "lli_table_ptr is %p\n", lli_table_ptr);
1493 dev_dbg(&sep->pdev->dev, "lli_table_ptr->bus_address is %08lx\n",
1494 (unsigned long)lli_table_ptr->bus_address);
1495 dev_dbg(&sep->pdev->dev, "lli_table_ptr->block_size is %x\n",
1496 lli_table_ptr->block_size);
1497
1498
1499 *num_processed_entries_ptr += array_counter;
1500
1501 dev_dbg(&sep->pdev->dev, "num_processed_entries_ptr is %x\n",
1502 *num_processed_entries_ptr);
1503
1504 dev_dbg(&sep->pdev->dev, "sep_build_lli_table end\n");
1505}
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
1518 void *virt_address)
1519{
1520 dev_dbg(&sep->pdev->dev, "sh virt to phys v %p\n", virt_address);
1521 dev_dbg(&sep->pdev->dev, "sh virt to phys p %08lx\n",
1522 (unsigned long)
1523 sep->shared_bus + (virt_address - sep->shared_addr));
1524
1525 return sep->shared_bus + (size_t)(virt_address - sep->shared_addr);
1526}
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
1539 dma_addr_t bus_address)
1540{
1541 dev_dbg(&sep->pdev->dev, "shared bus to virt b=%lx v=%lx\n",
1542 (unsigned long)bus_address, (unsigned long)(sep->shared_addr +
1543 (size_t)(bus_address - sep->shared_bus)));
1544
1545 return sep->shared_addr + (size_t)(bus_address - sep->shared_bus);
1546}
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557static void sep_debug_print_lli_tables(struct sep_device *sep,
1558 struct sep_lli_entry *lli_table_ptr,
1559 unsigned long num_table_entries,
1560 unsigned long table_data_size)
1561{
1562 unsigned long table_count = 1;
1563 unsigned long entries_count = 0;
1564
1565 dev_dbg(&sep->pdev->dev, "sep_debug_print_lli_tables start\n");
1566
1567 while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) {
1568 dev_dbg(&sep->pdev->dev,
1569 "lli table %08lx, table_data_size is %lu\n",
1570 table_count, table_data_size);
1571 dev_dbg(&sep->pdev->dev, "num_table_entries is %lu\n",
1572 num_table_entries);
1573
1574
1575 for (entries_count = 0; entries_count < num_table_entries;
1576 entries_count++, lli_table_ptr++) {
1577
1578 dev_dbg(&sep->pdev->dev,
1579 "lli_table_ptr address is %08lx\n",
1580 (unsigned long) lli_table_ptr);
1581
1582 dev_dbg(&sep->pdev->dev,
1583 "phys address is %08lx block size is %x\n",
1584 (unsigned long)lli_table_ptr->bus_address,
1585 lli_table_ptr->block_size);
1586 }
1587
1588 lli_table_ptr--;
1589
1590 dev_dbg(&sep->pdev->dev,
1591 "phys lli_table_ptr->block_size is %x\n",
1592 lli_table_ptr->block_size);
1593
1594 dev_dbg(&sep->pdev->dev,
1595 "phys lli_table_ptr->physical_address is %08lu\n",
1596 (unsigned long)lli_table_ptr->bus_address);
1597
1598
1599 table_data_size = lli_table_ptr->block_size & 0xffffff;
1600 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1601
1602 dev_dbg(&sep->pdev->dev,
1603 "phys table_data_size is %lu num_table_entries is"
1604 " %lu bus_address is%lu\n", table_data_size,
1605 num_table_entries, (unsigned long)lli_table_ptr->bus_address);
1606
1607 if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff)
1608 lli_table_ptr = (struct sep_lli_entry *)
1609 sep_shared_bus_to_virt(sep,
1610 (unsigned long)lli_table_ptr->bus_address);
1611
1612 table_count++;
1613 }
1614 dev_dbg(&sep->pdev->dev, "sep_debug_print_lli_tables end\n");
1615}
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627static void sep_prepare_empty_lli_table(struct sep_device *sep,
1628 dma_addr_t *lli_table_addr_ptr,
1629 u32 *num_entries_ptr,
1630 u32 *table_data_size_ptr)
1631{
1632 struct sep_lli_entry *lli_table_ptr;
1633
1634 dev_dbg(&sep->pdev->dev, "sep_prepare_empty_lli_table start\n");
1635
1636
1637 lli_table_ptr =
1638 (struct sep_lli_entry *)(sep->shared_addr +
1639 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1640 sep->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1641 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1642
1643 lli_table_ptr->bus_address = 0;
1644 lli_table_ptr->block_size = 0;
1645
1646 lli_table_ptr++;
1647 lli_table_ptr->bus_address = 0xFFFFFFFF;
1648 lli_table_ptr->block_size = 0;
1649
1650
1651 *lli_table_addr_ptr = sep->shared_bus +
1652 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1653 sep->num_lli_tables_created *
1654 sizeof(struct sep_lli_entry) *
1655 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1656
1657
1658 *num_entries_ptr = 2;
1659 *table_data_size_ptr = 0;
1660
1661
1662 sep->num_lli_tables_created++;
1663
1664 dev_dbg(&sep->pdev->dev, "sep_prepare_empty_lli_table start\n");
1665
1666}
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683static int sep_prepare_input_dma_table(struct sep_device *sep,
1684 unsigned long app_virt_addr,
1685 u32 data_size,
1686 u32 block_size,
1687 dma_addr_t *lli_table_ptr,
1688 u32 *num_entries_ptr,
1689 u32 *table_data_size_ptr,
1690 bool is_kva)
1691{
1692 int error = 0;
1693
1694 struct sep_lli_entry *info_entry_ptr;
1695
1696 struct sep_lli_entry *lli_array_ptr;
1697
1698 u32 current_entry = 0;
1699
1700 u32 sep_lli_entries = 0;
1701
1702 struct sep_lli_entry *in_lli_table_ptr;
1703
1704 u32 table_data_size = 0;
1705
1706 u32 last_table_flag = 0;
1707
1708 u32 num_entries_in_table = 0;
1709
1710 void *lli_table_alloc_addr = 0;
1711
1712 dev_dbg(&sep->pdev->dev, "sep_prepare_input_dma_table start\n");
1713 dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size);
1714 dev_dbg(&sep->pdev->dev, "block_size is %x\n", block_size);
1715
1716
1717 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
1718 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = 0;
1719
1720
1721 lli_table_alloc_addr = (void *)(sep->shared_addr +
1722 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1723 sep->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1724 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1725
1726 if (data_size == 0) {
1727
1728 sep_prepare_empty_lli_table(sep, lli_table_ptr,
1729 num_entries_ptr, table_data_size_ptr);
1730 goto update_dcb_counter;
1731 }
1732
1733
1734 if (is_kva == true)
1735
1736 error = sep_lock_kernel_pages(sep, app_virt_addr,
1737 data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG);
1738 else
1739
1740
1741
1742
1743 error = sep_lock_user_pages(sep, app_virt_addr,
1744 data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG);
1745
1746 if (error)
1747 goto end_function;
1748
1749 dev_dbg(&sep->pdev->dev, "output sep_in_num_pages is %x\n",
1750 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages);
1751
1752 current_entry = 0;
1753 info_entry_ptr = NULL;
1754
1755 sep_lli_entries = sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages;
1756
1757
1758 while (current_entry < sep_lli_entries) {
1759
1760
1761 in_lli_table_ptr =
1762 (struct sep_lli_entry *)lli_table_alloc_addr;
1763
1764 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1765 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1766
1767 if (lli_table_alloc_addr >
1768 ((void *)sep->shared_addr +
1769 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1770 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
1771
1772 error = -ENOMEM;
1773 goto end_function_error;
1774
1775 }
1776
1777
1778 sep->num_lli_tables_created++;
1779
1780
1781 table_data_size = sep_calculate_lli_table_max_size(sep,
1782 &lli_array_ptr[current_entry],
1783 (sep_lli_entries - current_entry),
1784 &last_table_flag);
1785
1786
1787
1788
1789
1790 if (!last_table_flag)
1791 table_data_size =
1792 (table_data_size / block_size) * block_size;
1793
1794 dev_dbg(&sep->pdev->dev, "output table_data_size is %x\n",
1795 table_data_size);
1796
1797
1798 sep_build_lli_table(sep, &lli_array_ptr[current_entry],
1799 in_lli_table_ptr,
1800 ¤t_entry, &num_entries_in_table, table_data_size);
1801
1802 if (info_entry_ptr == NULL) {
1803
1804
1805 *lli_table_ptr = sep_shared_area_virt_to_bus(sep,
1806 in_lli_table_ptr);
1807 *num_entries_ptr = num_entries_in_table;
1808 *table_data_size_ptr = table_data_size;
1809
1810 dev_dbg(&sep->pdev->dev,
1811 "output lli_table_in_ptr is %08lx\n",
1812 (unsigned long)*lli_table_ptr);
1813
1814 } else {
1815
1816 info_entry_ptr->bus_address =
1817 sep_shared_area_virt_to_bus(sep,
1818 in_lli_table_ptr);
1819 info_entry_ptr->block_size =
1820 ((num_entries_in_table) << 24) |
1821 (table_data_size);
1822 }
1823
1824 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1825 }
1826
1827 sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)
1828 sep_shared_area_bus_to_virt(sep, *lli_table_ptr),
1829 *num_entries_ptr, *table_data_size_ptr);
1830
1831 kfree(lli_array_ptr);
1832
1833update_dcb_counter:
1834
1835 sep->nr_dcb_creat++;
1836 goto end_function;
1837
1838end_function_error:
1839
1840 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_map_array);
1841 kfree(lli_array_ptr);
1842 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_page_array);
1843
1844end_function:
1845 dev_dbg(&sep->pdev->dev, "sep_prepare_input_dma_table end\n");
1846 return error;
1847
1848}
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869static int sep_construct_dma_tables_from_lli(
1870 struct sep_device *sep,
1871 struct sep_lli_entry *lli_in_array,
1872 u32 sep_in_lli_entries,
1873 struct sep_lli_entry *lli_out_array,
1874 u32 sep_out_lli_entries,
1875 u32 block_size,
1876 dma_addr_t *lli_table_in_ptr,
1877 dma_addr_t *lli_table_out_ptr,
1878 u32 *in_num_entries_ptr,
1879 u32 *out_num_entries_ptr,
1880 u32 *table_data_size_ptr)
1881{
1882
1883 void *lli_table_alloc_addr = 0;
1884
1885 struct sep_lli_entry *in_lli_table_ptr = NULL;
1886
1887 struct sep_lli_entry *out_lli_table_ptr = NULL;
1888
1889 struct sep_lli_entry *info_in_entry_ptr = NULL;
1890
1891 struct sep_lli_entry *info_out_entry_ptr = NULL;
1892
1893 u32 current_in_entry = 0;
1894
1895 u32 current_out_entry = 0;
1896
1897 u32 in_table_data_size = 0;
1898
1899 u32 out_table_data_size = 0;
1900
1901 u32 last_table_flag = 0;
1902
1903 u32 table_data_size = 0;
1904
1905 u32 num_entries_in_table = 0;
1906
1907 u32 num_entries_out_table = 0;
1908
1909 dev_dbg(&sep->pdev->dev, "sep_construct_dma_tables_from_lli start\n");
1910
1911
1912 lli_table_alloc_addr = (void *)(sep->shared_addr +
1913 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1914 (sep->num_lli_tables_created *
1915 (sizeof(struct sep_lli_entry) *
1916 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
1917
1918
1919 while (current_in_entry < sep_in_lli_entries) {
1920
1921 in_lli_table_ptr =
1922 (struct sep_lli_entry *)lli_table_alloc_addr;
1923
1924 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1925 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1926
1927
1928 out_lli_table_ptr =
1929 (struct sep_lli_entry *)lli_table_alloc_addr;
1930
1931
1932 if ((lli_table_alloc_addr + sizeof(struct sep_lli_entry) *
1933 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) >
1934 ((void *)sep->shared_addr +
1935 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1936 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
1937
1938 dev_warn(&sep->pdev->dev, "dma table limit overrun\n");
1939 return -ENOMEM;
1940 }
1941
1942
1943 sep->num_lli_tables_created += 2;
1944
1945 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1946 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1947
1948
1949 in_table_data_size =
1950 sep_calculate_lli_table_max_size(sep,
1951 &lli_in_array[current_in_entry],
1952 (sep_in_lli_entries - current_in_entry),
1953 &last_table_flag);
1954
1955
1956 out_table_data_size =
1957 sep_calculate_lli_table_max_size(sep,
1958 &lli_out_array[current_out_entry],
1959 (sep_out_lli_entries - current_out_entry),
1960 &last_table_flag);
1961
1962 dev_dbg(&sep->pdev->dev,
1963 "in_table_data_size is %x\n",
1964 in_table_data_size);
1965
1966 dev_dbg(&sep->pdev->dev,
1967 "out_table_data_size is %x\n",
1968 out_table_data_size);
1969
1970 table_data_size = in_table_data_size;
1971
1972 if (!last_table_flag) {
1973
1974
1975
1976
1977
1978 if (table_data_size > out_table_data_size)
1979 table_data_size = out_table_data_size;
1980
1981
1982
1983
1984
1985 table_data_size = (table_data_size / block_size) *
1986 block_size;
1987 }
1988
1989 dev_dbg(&sep->pdev->dev, "table_data_size is %x\n",
1990 table_data_size);
1991
1992
1993 sep_build_lli_table(sep, &lli_in_array[current_in_entry],
1994 in_lli_table_ptr,
1995 ¤t_in_entry,
1996 &num_entries_in_table,
1997 table_data_size);
1998
1999
2000 sep_build_lli_table(sep, &lli_out_array[current_out_entry],
2001 out_lli_table_ptr,
2002 ¤t_out_entry,
2003 &num_entries_out_table,
2004 table_data_size);
2005
2006
2007 if (info_in_entry_ptr == NULL) {
2008
2009 *lli_table_in_ptr =
2010 sep_shared_area_virt_to_bus(sep, in_lli_table_ptr);
2011
2012 *in_num_entries_ptr = num_entries_in_table;
2013
2014 *lli_table_out_ptr =
2015 sep_shared_area_virt_to_bus(sep,
2016 out_lli_table_ptr);
2017
2018 *out_num_entries_ptr = num_entries_out_table;
2019 *table_data_size_ptr = table_data_size;
2020
2021 dev_dbg(&sep->pdev->dev,
2022 "output lli_table_in_ptr is %08lx\n",
2023 (unsigned long)*lli_table_in_ptr);
2024 dev_dbg(&sep->pdev->dev,
2025 "output lli_table_out_ptr is %08lx\n",
2026 (unsigned long)*lli_table_out_ptr);
2027 } else {
2028
2029 info_in_entry_ptr->bus_address =
2030 sep_shared_area_virt_to_bus(sep,
2031 in_lli_table_ptr);
2032
2033 info_in_entry_ptr->block_size =
2034 ((num_entries_in_table) << 24) |
2035 (table_data_size);
2036
2037
2038 info_out_entry_ptr->bus_address =
2039 sep_shared_area_virt_to_bus(sep,
2040 out_lli_table_ptr);
2041
2042 info_out_entry_ptr->block_size =
2043 ((num_entries_out_table) << 24) |
2044 (table_data_size);
2045
2046 dev_dbg(&sep->pdev->dev,
2047 "output lli_table_in_ptr:%08lx %08x\n",
2048 (unsigned long)info_in_entry_ptr->bus_address,
2049 info_in_entry_ptr->block_size);
2050
2051 dev_dbg(&sep->pdev->dev,
2052 "output lli_table_out_ptr:%08lx %08x\n",
2053 (unsigned long)info_out_entry_ptr->bus_address,
2054 info_out_entry_ptr->block_size);
2055 }
2056
2057
2058 info_in_entry_ptr = in_lli_table_ptr +
2059 num_entries_in_table - 1;
2060 info_out_entry_ptr = out_lli_table_ptr +
2061 num_entries_out_table - 1;
2062
2063 dev_dbg(&sep->pdev->dev,
2064 "output num_entries_out_table is %x\n",
2065 (u32)num_entries_out_table);
2066 dev_dbg(&sep->pdev->dev,
2067 "output info_in_entry_ptr is %lx\n",
2068 (unsigned long)info_in_entry_ptr);
2069 dev_dbg(&sep->pdev->dev,
2070 "output info_out_entry_ptr is %lx\n",
2071 (unsigned long)info_out_entry_ptr);
2072 }
2073
2074
2075 sep_debug_print_lli_tables(sep,
2076 (struct sep_lli_entry *)
2077 sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr),
2078 *in_num_entries_ptr,
2079 *table_data_size_ptr);
2080
2081
2082 sep_debug_print_lli_tables(sep,
2083 (struct sep_lli_entry *)
2084 sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr),
2085 *out_num_entries_ptr,
2086 *table_data_size_ptr);
2087
2088 dev_dbg(&sep->pdev->dev, "sep_construct_dma_tables_from_lli end\n");
2089 return 0;
2090}
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111static int sep_prepare_input_output_dma_table(struct sep_device *sep,
2112 unsigned long app_virt_in_addr,
2113 unsigned long app_virt_out_addr,
2114 u32 data_size,
2115 u32 block_size,
2116 dma_addr_t *lli_table_in_ptr,
2117 dma_addr_t *lli_table_out_ptr,
2118 u32 *in_num_entries_ptr,
2119 u32 *out_num_entries_ptr,
2120 u32 *table_data_size_ptr,
2121 bool is_kva)
2122
2123{
2124 int error = 0;
2125
2126 struct sep_lli_entry *lli_in_array;
2127
2128 struct sep_lli_entry *lli_out_array;
2129
2130 dev_dbg(&sep->pdev->dev, "sep_prepare_input_output_dma_table start\n");
2131
2132 if (data_size == 0) {
2133
2134 sep_prepare_empty_lli_table(sep, lli_table_in_ptr,
2135 in_num_entries_ptr, table_data_size_ptr);
2136
2137 sep_prepare_empty_lli_table(sep, lli_table_out_ptr,
2138 out_num_entries_ptr, table_data_size_ptr);
2139
2140 goto update_dcb_counter;
2141 }
2142
2143
2144 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
2145 sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = NULL;
2146
2147
2148 if (is_kva == true) {
2149 error = sep_lock_kernel_pages(sep, app_virt_in_addr,
2150 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG);
2151
2152 if (error) {
2153 dev_warn(&sep->pdev->dev,
2154 "lock kernel for in failed\n");
2155 goto end_function;
2156 }
2157
2158 error = sep_lock_kernel_pages(sep, app_virt_out_addr,
2159 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG);
2160
2161 if (error) {
2162 dev_warn(&sep->pdev->dev,
2163 "lock kernel for out failed\n");
2164 goto end_function;
2165 }
2166 }
2167
2168 else {
2169 error = sep_lock_user_pages(sep, app_virt_in_addr,
2170 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG);
2171 if (error) {
2172 dev_warn(&sep->pdev->dev,
2173 "sep_lock_user_pages for input virtual buffer failed\n");
2174 goto end_function;
2175 }
2176
2177 error = sep_lock_user_pages(sep, app_virt_out_addr,
2178 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG);
2179
2180 if (error) {
2181 dev_warn(&sep->pdev->dev,
2182 "sep_lock_user_pages for output virtual buffer failed\n");
2183 goto end_function_free_lli_in;
2184 }
2185 }
2186
2187 dev_dbg(&sep->pdev->dev, "sep_in_num_pages is %x\n",
2188 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages);
2189 dev_dbg(&sep->pdev->dev, "sep_out_num_pages is %x\n",
2190 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages);
2191 dev_dbg(&sep->pdev->dev, "SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n",
2192 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
2193
2194
2195 error = sep_construct_dma_tables_from_lli(sep, lli_in_array,
2196 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages,
2197 lli_out_array,
2198 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages,
2199 block_size, lli_table_in_ptr, lli_table_out_ptr,
2200 in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
2201
2202 if (error) {
2203 dev_warn(&sep->pdev->dev,
2204 "sep_construct_dma_tables_from_lli failed\n");
2205 goto end_function_with_error;
2206 }
2207
2208 kfree(lli_out_array);
2209 kfree(lli_in_array);
2210
2211update_dcb_counter:
2212
2213 sep->nr_dcb_creat++;
2214
2215 dev_dbg(&sep->pdev->dev, "in_num_entries_ptr is %08x\n",
2216 *in_num_entries_ptr);
2217 dev_dbg(&sep->pdev->dev, "out_num_entries_ptr is %08x\n",
2218 *out_num_entries_ptr);
2219 dev_dbg(&sep->pdev->dev, "table_data_size_ptr is %08x\n",
2220 *table_data_size_ptr);
2221
2222 goto end_function;
2223
2224end_function_with_error:
2225 kfree(sep->dma_res_arr[sep->nr_dcb_creat].out_map_array);
2226 kfree(sep->dma_res_arr[sep->nr_dcb_creat].out_page_array);
2227 kfree(lli_out_array);
2228
2229
2230end_function_free_lli_in:
2231 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_map_array);
2232 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_page_array);
2233 kfree(lli_in_array);
2234
2235end_function:
2236 dev_dbg(&sep->pdev->dev,
2237 "sep_prepare_input_output_dma_table end result = %d\n", error);
2238
2239 return error;
2240
2241}
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259static int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
2260 unsigned long app_in_address,
2261 unsigned long app_out_address,
2262 u32 data_in_size,
2263 u32 block_size,
2264 u32 tail_block_size,
2265 bool isapplet,
2266 bool is_kva)
2267{
2268 int error = 0;
2269
2270 u32 tail_size = 0;
2271
2272 struct sep_dcblock *dcb_table_ptr = NULL;
2273
2274 dma_addr_t in_first_mlli_address = 0;
2275
2276 u32 in_first_num_entries = 0;
2277
2278 dma_addr_t out_first_mlli_address = 0;
2279
2280 u32 out_first_num_entries = 0;
2281
2282 u32 first_data_size = 0;
2283
2284 dev_dbg(&sep->pdev->dev, "prepare_input_output_dma_table_in_dcb start\n");
2285
2286 if (sep->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) {
2287
2288 dev_warn(&sep->pdev->dev, "no more DCBs available\n");
2289 error = -ENOSPC;
2290 goto end_function;
2291 }
2292
2293
2294 dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr +
2295 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES +
2296 (sep->nr_dcb_creat * sizeof(struct sep_dcblock)));
2297
2298
2299 dcb_table_ptr->input_mlli_address = 0;
2300 dcb_table_ptr->input_mlli_num_entries = 0;
2301 dcb_table_ptr->input_mlli_data_size = 0;
2302 dcb_table_ptr->output_mlli_address = 0;
2303 dcb_table_ptr->output_mlli_num_entries = 0;
2304 dcb_table_ptr->output_mlli_data_size = 0;
2305 dcb_table_ptr->tail_data_size = 0;
2306 dcb_table_ptr->out_vr_tail_pt = 0;
2307
2308 if (isapplet == true) {
2309 tail_size = data_in_size % block_size;
2310 if (tail_size) {
2311 if (data_in_size < tail_block_size) {
2312 dev_warn(&sep->pdev->dev, "data in size smaller than tail block size\n");
2313 error = -ENOSPC;
2314 goto end_function;
2315 }
2316 if (tail_block_size)
2317
2318
2319
2320
2321 tail_size = tail_block_size +
2322 ((data_in_size -
2323 tail_block_size) % block_size);
2324 }
2325
2326
2327 if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) {
2328 if (is_kva == true) {
2329 memcpy(dcb_table_ptr->tail_data,
2330 (void *)app_in_address, data_in_size);
2331 } else {
2332 if (copy_from_user(dcb_table_ptr->tail_data,
2333 (void __user *)app_in_address,
2334 data_in_size)) {
2335 error = -EFAULT;
2336 goto end_function;
2337 }
2338 }
2339
2340 dcb_table_ptr->tail_data_size = data_in_size;
2341
2342
2343 if (app_out_address)
2344 dcb_table_ptr->out_vr_tail_pt =
2345 (u32)app_out_address;
2346
2347
2348
2349
2350
2351
2352 tail_size = 0x0;
2353 data_in_size = 0x0;
2354 }
2355 if (tail_size) {
2356 if (is_kva == true) {
2357 memcpy(dcb_table_ptr->tail_data,
2358 (void *)(app_in_address + data_in_size -
2359 tail_size), tail_size);
2360 } else {
2361
2362 if (copy_from_user(dcb_table_ptr->tail_data,
2363 (void *)(app_in_address +
2364 data_in_size - tail_size), tail_size)) {
2365 error = -EFAULT;
2366 goto end_function;
2367 }
2368 }
2369 if (app_out_address)
2370
2371
2372
2373
2374 dcb_table_ptr->out_vr_tail_pt =
2375 (u32)app_out_address + data_in_size
2376 - tail_size;
2377
2378
2379 dcb_table_ptr->tail_data_size = tail_size;
2380
2381
2382
2383
2384 data_in_size = (data_in_size - tail_size);
2385 }
2386 }
2387
2388 if (app_out_address) {
2389
2390 error = sep_prepare_input_output_dma_table(sep,
2391 app_in_address,
2392 app_out_address,
2393 data_in_size,
2394 block_size,
2395 &in_first_mlli_address,
2396 &out_first_mlli_address,
2397 &in_first_num_entries,
2398 &out_first_num_entries,
2399 &first_data_size,
2400 is_kva);
2401 } else {
2402
2403 error = sep_prepare_input_dma_table(sep,
2404 app_in_address,
2405 data_in_size,
2406 block_size,
2407 &in_first_mlli_address,
2408 &in_first_num_entries,
2409 &first_data_size,
2410 is_kva);
2411 }
2412
2413 if (error) {
2414 dev_warn(&sep->pdev->dev, "prepare DMA table call failed from prepare DCB call\n");
2415 goto end_function;
2416 }
2417
2418
2419 dcb_table_ptr->input_mlli_address = in_first_mlli_address;
2420 dcb_table_ptr->input_mlli_num_entries = in_first_num_entries;
2421 dcb_table_ptr->input_mlli_data_size = first_data_size;
2422 dcb_table_ptr->output_mlli_address = out_first_mlli_address;
2423 dcb_table_ptr->output_mlli_num_entries = out_first_num_entries;
2424 dcb_table_ptr->output_mlli_data_size = first_data_size;
2425
2426end_function:
2427 dev_dbg(&sep->pdev->dev,
2428 "sep_prepare_input_output_dma_table_in_dcb end\n");
2429 return error;
2430
2431}
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443static int sep_create_sync_dma_tables_handler(struct sep_device *sep,
2444 unsigned long arg)
2445{
2446 int error = 0;
2447
2448
2449 struct bld_syn_tab_struct command_args;
2450
2451 dev_dbg(&sep->pdev->dev,
2452 "sep_create_sync_dma_tables_handler start\n");
2453
2454 if (copy_from_user(&command_args, (void __user *)arg,
2455 sizeof(struct bld_syn_tab_struct))) {
2456 error = -EFAULT;
2457 goto end_function;
2458 }
2459
2460 dev_dbg(&sep->pdev->dev, "app_in_address is %08llx\n",
2461 command_args.app_in_address);
2462 dev_dbg(&sep->pdev->dev, "app_out_address is %08llx\n",
2463 command_args.app_out_address);
2464 dev_dbg(&sep->pdev->dev, "data_size is %u\n",
2465 command_args.data_in_size);
2466 dev_dbg(&sep->pdev->dev, "block_size is %u\n",
2467 command_args.block_size);
2468
2469
2470 if (!command_args.app_in_address) {
2471 error = -EINVAL;
2472 goto end_function;
2473 }
2474
2475 error = sep_prepare_input_output_dma_table_in_dcb(sep,
2476 (unsigned long)command_args.app_in_address,
2477 (unsigned long)command_args.app_out_address,
2478 command_args.data_in_size,
2479 command_args.block_size,
2480 0x0,
2481 false,
2482 false);
2483
2484end_function:
2485 dev_dbg(&sep->pdev->dev, "sep_create_sync_dma_tables_handler end\n");
2486 return error;
2487}
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
2498 bool is_kva)
2499{
2500 int i = 0;
2501 int error = 0;
2502 int error_temp = 0;
2503 struct sep_dcblock *dcb_table_ptr;
2504 unsigned long pt_hold;
2505 void *tail_pt;
2506
2507 dev_dbg(&sep->pdev->dev, "sep_free_dma_tables_and_dcb start\n");
2508
2509 if (isapplet == true) {
2510
2511 dcb_table_ptr = (struct sep_dcblock *)
2512 (sep->shared_addr +
2513 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES);
2514
2515
2516 for (i = 0; i < sep->nr_dcb_creat; i++, dcb_table_ptr++) {
2517 if (dcb_table_ptr->out_vr_tail_pt) {
2518 pt_hold = (unsigned long)dcb_table_ptr->out_vr_tail_pt;
2519 tail_pt = (void *)pt_hold;
2520 if (is_kva == true) {
2521 memcpy(tail_pt,
2522 dcb_table_ptr->tail_data,
2523 dcb_table_ptr->tail_data_size);
2524 } else {
2525 error_temp = copy_to_user(
2526 tail_pt,
2527 dcb_table_ptr->tail_data,
2528 dcb_table_ptr->tail_data_size);
2529 }
2530 if (error_temp) {
2531
2532 error = -EFAULT;
2533 break;
2534 }
2535 }
2536 }
2537 }
2538
2539 sep_free_dma_table_data_handler(sep);
2540
2541 dev_dbg(&sep->pdev->dev, "sep_free_dma_tables_and_dcb end\n");
2542 return error;
2543}
2544
2545
2546
2547
2548
2549
2550
2551static int sep_get_static_pool_addr_handler(struct sep_device *sep)
2552{
2553 u32 *static_pool_addr = NULL;
2554
2555 dev_dbg(&sep->pdev->dev, "sep_get_static_pool_addr_handler start\n");
2556
2557 static_pool_addr = (u32 *)(sep->shared_addr +
2558 SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES);
2559
2560 static_pool_addr[0] = SEP_STATIC_POOL_VAL_TOKEN;
2561 static_pool_addr[1] = (u32)sep->shared_bus +
2562 SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
2563
2564 dev_dbg(&sep->pdev->dev, "static pool: physical %x\n",
2565 (u32)static_pool_addr[1]);
2566
2567 dev_dbg(&sep->pdev->dev, "sep_get_static_pool_addr_handler end\n");
2568
2569 return 0;
2570}
2571
2572
2573
2574
2575
2576static int sep_start_handler(struct sep_device *sep)
2577{
2578 unsigned long reg_val;
2579 unsigned long error = 0;
2580
2581 dev_dbg(&sep->pdev->dev, "sep_start_handler start\n");
2582
2583
2584 do {
2585 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2586 } while (!reg_val);
2587
2588
2589 if (reg_val == 0x1)
2590
2591 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2592 dev_dbg(&sep->pdev->dev, "sep_start_handler end\n");
2593 return error;
2594}
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604static u32 sep_check_sum_calc(u8 *data, u32 length)
2605{
2606 u32 sum = 0;
2607 u16 *Tdata = (u16 *)data;
2608
2609 while (length > 1) {
2610
2611 sum += *Tdata++;
2612 length -= 2;
2613 }
2614
2615
2616 if (length > 0)
2617 sum += *(u8 *)Tdata;
2618
2619
2620 while (sum>>16)
2621 sum = (sum & 0xffff) + (sum >> 16);
2622
2623 return ~sum & 0xFFFF;
2624}
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641static int sep_init_handler(struct sep_device *sep, unsigned long arg)
2642{
2643 u32 message_buff[14];
2644 u32 counter;
2645 int error = 0;
2646 u32 reg_val;
2647 dma_addr_t new_base_addr;
2648 unsigned long addr_hold;
2649 struct init_struct command_args;
2650
2651 dev_dbg(&sep->pdev->dev, "sep_init_handler start\n");
2652
2653
2654 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2655
2656 if (reg_val != 0x2) {
2657 error = SEP_ALREADY_INITIALIZED_ERR;
2658 dev_warn(&sep->pdev->dev, "init; device already initialized\n");
2659 goto end_function;
2660 }
2661
2662
2663 if (!capable(CAP_SYS_ADMIN)) {
2664 error = -EACCES;
2665 goto end_function;
2666 }
2667
2668
2669 error = copy_from_user(&command_args, (void __user *)arg,
2670 sizeof(struct init_struct));
2671
2672 if (error) {
2673 error = -EFAULT;
2674 goto end_function;
2675 }
2676
2677
2678 if (!command_args.message_addr || !command_args.sep_sram_addr ||
2679 command_args.message_size_in_words > 14) {
2680 error = -EINVAL;
2681 goto end_function;
2682 }
2683
2684
2685 addr_hold = (unsigned long)command_args.message_addr;
2686 error = copy_from_user(message_buff,
2687 (void __user *)addr_hold,
2688 command_args.message_size_in_words*sizeof(u32));
2689
2690 if (error) {
2691 error = -EFAULT;
2692 goto end_function;
2693 }
2694
2695
2696 error = sep_load_firmware(sep);
2697
2698 if (error) {
2699 dev_warn(&sep->pdev->dev,
2700 "init; copy SEP init message failed %x\n", error);
2701 goto end_function;
2702 }
2703
2704
2705 new_base_addr = sep->shared_bus;
2706
2707 if (sep->resident_bus < new_base_addr)
2708 new_base_addr = sep->resident_bus;
2709
2710 if (sep->cache_bus < new_base_addr)
2711 new_base_addr = sep->cache_bus;
2712
2713 if (sep->dcache_bus < new_base_addr)
2714 new_base_addr = sep->dcache_bus;
2715
2716
2717 message_buff[3] = (u32)new_base_addr;
2718 message_buff[4] = (u32)sep->shared_bus;
2719 message_buff[6] = (u32)sep->resident_bus;
2720 message_buff[7] = (u32)sep->cache_bus;
2721 message_buff[8] = (u32)sep->dcache_bus;
2722
2723 message_buff[command_args.message_size_in_words - 1] = 0x0;
2724 message_buff[command_args.message_size_in_words - 1] =
2725 sep_check_sum_calc((u8 *)message_buff,
2726 command_args.message_size_in_words*sizeof(u32));
2727
2728
2729 for (counter = 0; counter < command_args.message_size_in_words;
2730 counter++)
2731 dev_dbg(&sep->pdev->dev, "init; SEP message word %d is %x\n",
2732 counter, message_buff[counter]);
2733
2734
2735 sep_write_reg(sep, HW_SRAM_ADDR_REG_ADDR, command_args.sep_sram_addr);
2736
2737
2738 for (counter = 0; counter < command_args.message_size_in_words;
2739 counter++) {
2740 sep_write_reg(sep, HW_SRAM_DATA_REG_ADDR,
2741 message_buff[counter]);
2742 sep_wait_sram_write(sep);
2743 }
2744
2745
2746 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
2747
2748
2749 dev_dbg(&sep->pdev->dev, "init; waiting for msg response\n");
2750
2751 do {
2752 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2753 } while (!(reg_val & 0xFFFFFFFD));
2754
2755 if (reg_val == 0x1) {
2756 dev_warn(&sep->pdev->dev, "init; device int failed\n");
2757 error = sep_read_reg(sep, 0x8060);
2758 dev_warn(&sep->pdev->dev, "init; sw monitor is %x\n", error);
2759 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2760 dev_warn(&sep->pdev->dev, "init; error is %x\n", error);
2761 goto end_function;
2762 }
2763 dev_dbg(&sep->pdev->dev, "init; end CC INIT, reg_val is %x\n", reg_val);
2764
2765
2766 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x10);
2767
2768
2769 dev_dbg(&sep->pdev->dev, "init; waiting for zero set response\n");
2770
2771 do {
2772 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2773 } while (reg_val != 0);
2774
2775end_function:
2776 dev_dbg(&sep->pdev->dev, "init is done\n");
2777 return error;
2778}
2779
2780
2781
2782
2783
2784
2785
2786static int sep_end_transaction_handler(struct sep_device *sep)
2787{
2788 dev_dbg(&sep->pdev->dev, "sep_end_transaction_handler start\n");
2789
2790
2791 memset((void *)(sep->shared_addr +
2792 SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES),
2793 0, sep->num_of_data_allocations*2*sizeof(u32));
2794
2795
2796 sep_free_dma_table_data_handler(sep);
2797
2798 clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
2799
2800
2801
2802
2803
2804
2805 mutex_lock(&sep->sep_mutex);
2806 sep->pid_doing_transaction = 0;
2807 mutex_unlock(&sep->sep_mutex);
2808
2809 wake_up(&sep->event);
2810
2811 dev_dbg(&sep->pdev->dev, "waking up event\n");
2812 dev_dbg(&sep->pdev->dev, "sep_end_transaction_handler end\n");
2813
2814 return 0;
2815}
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg)
2826{
2827 int error;
2828
2829 struct build_dcb_struct command_args;
2830
2831 dev_dbg(&sep->pdev->dev, "sep_prepare_dcb_handler start\n");
2832
2833
2834 if (copy_from_user(&command_args, (void __user *)arg,
2835 sizeof(struct build_dcb_struct))) {
2836 error = -EFAULT;
2837 goto end_function;
2838 }
2839
2840 dev_dbg(&sep->pdev->dev, "app_in_address is %08llx\n",
2841 command_args.app_in_address);
2842 dev_dbg(&sep->pdev->dev, "app_out_address is %08llx\n",
2843 command_args.app_out_address);
2844 dev_dbg(&sep->pdev->dev, "data_size is %x\n",
2845 command_args.data_in_size);
2846 dev_dbg(&sep->pdev->dev, "block_size is %x\n",
2847 command_args.block_size);
2848 dev_dbg(&sep->pdev->dev, "tail block_size is %x\n",
2849 command_args.tail_block_size);
2850
2851 error = sep_prepare_input_output_dma_table_in_dcb(sep,
2852 (unsigned long)command_args.app_in_address,
2853 (unsigned long)command_args.app_out_address,
2854 command_args.data_in_size, command_args.block_size,
2855 command_args.tail_block_size, true, false);
2856
2857end_function:
2858 dev_dbg(&sep->pdev->dev, "sep_prepare_dcb_handler end\n");
2859 return error;
2860
2861}
2862
2863
2864
2865
2866
2867
2868
2869
2870static int sep_free_dcb_handler(struct sep_device *sep)
2871{
2872 int error ;
2873
2874 dev_dbg(&sep->pdev->dev, "sep_prepare_dcb_handler start\n");
2875 dev_dbg(&sep->pdev->dev, "num of DCBs %x\n", sep->nr_dcb_creat);
2876
2877 error = sep_free_dma_tables_and_dcb(sep, false, false);
2878
2879 dev_dbg(&sep->pdev->dev, "sep_free_dcb_handler end\n");
2880 return error;
2881}
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891static int sep_rar_prepare_output_msg_handler(struct sep_device *sep,
2892 unsigned long arg)
2893{
2894 int error = 0;
2895
2896 struct rar_hndl_to_bus_struct command_args;
2897 struct RAR_buffer rar_buf;
2898
2899 dma_addr_t rar_bus = 0;
2900
2901 u32 *rar_addr;
2902
2903 dev_dbg(&sep->pdev->dev, "sep_rar_prepare_output_msg_handler start\n");
2904
2905
2906 if (copy_from_user(&command_args, (void __user *)arg,
2907 sizeof(command_args))) {
2908 error = -EFAULT;
2909 goto end_function;
2910 }
2911
2912
2913 if (command_args.rar_handle) {
2914 memset(&rar_buf, 0, sizeof(rar_buf));
2915 rar_buf.info.handle = (u32)command_args.rar_handle;
2916
2917 if (rar_handle_to_bus(&rar_buf, 1) != 1) {
2918 dev_dbg(&sep->pdev->dev, "rar_handle_to_bus failure\n");
2919 error = -EFAULT;
2920 goto end_function;
2921 }
2922 rar_bus = rar_buf.bus_address;
2923 }
2924 dev_dbg(&sep->pdev->dev, "rar msg; rar_addr_bus = %x\n", (u32)rar_bus);
2925
2926
2927 rar_addr = (u32 *)(sep->shared_addr +
2928 SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES);
2929
2930
2931 rar_addr[0] = SEP_RAR_VAL_TOKEN;
2932 rar_addr[1] = rar_bus;
2933
2934end_function:
2935 dev_dbg(&sep->pdev->dev, "sep_rar_prepare_output_msg_handler start\n");
2936 return error;
2937}
2938
2939
2940
2941
2942
2943
2944
2945
2946static int sep_realloc_ext_cache_handler(struct sep_device *sep,
2947 unsigned long arg)
2948{
2949
2950 u32 *system_addr;
2951
2952
2953 system_addr = (u32 *)(sep->shared_addr +
2954 SEP_DRIVER_SYSTEM_EXT_CACHE_ADDR_OFFSET_IN_BYTES);
2955
2956
2957 system_addr[0] = SEP_EXT_CACHE_ADDR_VAL_TOKEN;
2958 dev_dbg(&sep->pdev->dev, "ext cache init; system addr 0 is %x\n",
2959 system_addr[0]);
2960 system_addr[1] = sep->extapp_bus;
2961 dev_dbg(&sep->pdev->dev, "ext cache init; system addr 1 is %x\n",
2962 system_addr[1]);
2963
2964 return 0;
2965}
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2976{
2977 int error = 0;
2978 struct sep_device *sep = filp->private_data;
2979
2980 dev_dbg(&sep->pdev->dev, "ioctl start\n");
2981
2982 dev_dbg(&sep->pdev->dev, "cmd is %x\n", cmd);
2983
2984
2985 mutex_lock(&sep->sep_mutex);
2986 if ((current->pid != sep->pid_doing_transaction) &&
2987 (sep->pid_doing_transaction != 0)) {
2988 dev_dbg(&sep->pdev->dev, "ioctl pid is not owner\n");
2989 mutex_unlock(&sep->sep_mutex);
2990 error = -EACCES;
2991 goto end_function;
2992 }
2993
2994 mutex_unlock(&sep->sep_mutex);
2995
2996
2997 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
2998 error = -ENOTTY;
2999 goto end_function;
3000 }
3001
3002
3003 mutex_lock(&sep->ioctl_mutex);
3004
3005 switch (cmd) {
3006 case SEP_IOCSENDSEPCOMMAND:
3007
3008 error = sep_send_command_handler(sep);
3009 break;
3010 case SEP_IOCALLOCDATAPOLL:
3011
3012 error = sep_allocate_data_pool_memory_handler(sep, arg);
3013 break;
3014 case SEP_IOCCREATESYMDMATABLE:
3015
3016 error = sep_create_sync_dma_tables_handler(sep, arg);
3017 break;
3018 case SEP_IOCFREEDMATABLEDATA:
3019
3020 error = sep_free_dma_table_data_handler(sep);
3021 break;
3022 case SEP_IOCSEPSTART:
3023
3024 if (sep->pdev->revision == 0)
3025 error = sep_start_handler(sep);
3026 else
3027 error = -EPERM;
3028 break;
3029 case SEP_IOCSEPINIT:
3030
3031 if (sep->pdev->revision == 0)
3032 error = sep_init_handler(sep, arg);
3033 else
3034 error = -EPERM;
3035 break;
3036 case SEP_IOCGETSTATICPOOLADDR:
3037
3038 error = sep_get_static_pool_addr_handler(sep);
3039 break;
3040 case SEP_IOCENDTRANSACTION:
3041 error = sep_end_transaction_handler(sep);
3042 break;
3043 case SEP_IOCREALLOCEXTCACHE:
3044 if (sep->pdev->revision == 0)
3045 error = sep_realloc_ext_cache_handler(sep, arg);
3046 else
3047 error = -EPERM;
3048 break;
3049 case SEP_IOCRARPREPAREMESSAGE:
3050 error = sep_rar_prepare_output_msg_handler(sep, arg);
3051 break;
3052 case SEP_IOCPREPAREDCB:
3053 error = sep_prepare_dcb_handler(sep, arg);
3054 break;
3055 case SEP_IOCFREEDCB:
3056 error = sep_free_dcb_handler(sep);
3057 break;
3058 default:
3059 dev_dbg(&sep->pdev->dev, "invalid ioctl %x\n", cmd);
3060 error = -ENOTTY;
3061 break;
3062 }
3063 mutex_unlock(&sep->ioctl_mutex);
3064
3065end_function:
3066 dev_dbg(&sep->pdev->dev, "ioctl end\n");
3067 return error;
3068}
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078static long sep_singleton_ioctl(struct file *filp, u32 cmd, unsigned long arg)
3079{
3080 long error = 0;
3081 struct sep_device *sep = filp->private_data;
3082
3083 dev_dbg(&sep->pdev->dev, "singleton_ioctl start\n");
3084 dev_dbg(&sep->pdev->dev, "cmd is %x\n", cmd);
3085
3086
3087 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
3088 error = -ENOTTY;
3089 goto end_function;
3090 }
3091
3092
3093 mutex_lock(&sep->sep_mutex);
3094 if ((current->pid != sep->pid_doing_transaction) &&
3095 (sep->pid_doing_transaction != 0)) {
3096 dev_dbg(&sep->pdev->dev, "singleton ioctl pid is not owner\n");
3097 mutex_unlock(&sep->sep_mutex);
3098 error = -EACCES;
3099 goto end_function;
3100 }
3101
3102 mutex_unlock(&sep->sep_mutex);
3103
3104 switch (cmd) {
3105 case SEP_IOCTLSETCALLERID:
3106 mutex_lock(&sep->ioctl_mutex);
3107 error = sep_set_caller_id_handler(sep, arg);
3108 mutex_unlock(&sep->ioctl_mutex);
3109 break;
3110 default:
3111 error = sep_ioctl(filp, cmd, arg);
3112 break;
3113 }
3114
3115end_function:
3116 dev_dbg(&sep->pdev->dev, "singleton ioctl end\n");
3117 return error;
3118}
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128static long sep_request_daemon_ioctl(struct file *filp, u32 cmd,
3129 unsigned long arg)
3130{
3131
3132 long error;
3133 struct sep_device *sep = filp->private_data;
3134
3135 dev_dbg(&sep->pdev->dev, "daemon ioctl: start\n");
3136 dev_dbg(&sep->pdev->dev, "daemon ioctl: cmd is %x\n", cmd);
3137
3138
3139 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
3140 error = -ENOTTY;
3141 goto end_function;
3142 }
3143
3144
3145 mutex_lock(&sep->ioctl_mutex);
3146
3147 switch (cmd) {
3148 case SEP_IOCSENDSEPRPLYCOMMAND:
3149
3150 error = sep_req_daemon_send_reply_command_handler(sep);
3151 break;
3152 case SEP_IOCENDTRANSACTION:
3153
3154
3155
3156
3157
3158 error = 0;
3159 break;
3160 default:
3161 dev_dbg(&sep->pdev->dev, "daemon ioctl: no such IOCTL\n");
3162 error = -ENOTTY;
3163 }
3164 mutex_unlock(&sep->ioctl_mutex);
3165
3166end_function:
3167 dev_dbg(&sep->pdev->dev, "daemon ioctl: end\n");
3168 return error;
3169
3170}
3171
3172
3173
3174
3175
3176
3177static irqreturn_t sep_inthandler(int irq, void *dev_id)
3178{
3179 irqreturn_t int_error = IRQ_HANDLED;
3180 unsigned long lck_flags;
3181 u32 reg_val, reg_val2 = 0;
3182 struct sep_device *sep = dev_id;
3183
3184
3185 reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
3186 dev_dbg(&sep->pdev->dev, "SEP Interrupt - reg is %08x\n", reg_val);
3187
3188 if (reg_val & (0x1 << 13)) {
3189
3190 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
3191 sep->reply_ct++;
3192 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
3193
3194 dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n",
3195 sep->send_ct, sep->reply_ct);
3196
3197
3198 reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
3199 dev_dbg(&sep->pdev->dev,
3200 "SEP Interrupt - reg2 is %08x\n", reg_val2);
3201
3202 if ((reg_val2 >> 30) & 0x1) {
3203 dev_dbg(&sep->pdev->dev, "int: printf request\n");
3204 wake_up(&sep->event_request_daemon);
3205 } else if (reg_val2 >> 31) {
3206 dev_dbg(&sep->pdev->dev, "int: daemon request\n");
3207 wake_up(&sep->event_request_daemon);
3208 } else {
3209 dev_dbg(&sep->pdev->dev, "int: SEP reply\n");
3210 wake_up(&sep->event);
3211 }
3212 } else {
3213 dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n");
3214 int_error = IRQ_NONE;
3215 }
3216 if (int_error == IRQ_HANDLED)
3217 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
3218
3219 return int_error;
3220}
3221
3222
3223
3224
3225
3226
3227
3228
3229static int sep_reconfig_shared_area(struct sep_device *sep)
3230{
3231 int ret_val;
3232
3233
3234 unsigned long end_time;
3235
3236 dev_dbg(&sep->pdev->dev, "reconfig shared area start\n");
3237
3238
3239 dev_dbg(&sep->pdev->dev, "sending %08llx to sep\n",
3240 (unsigned long long)sep->shared_bus);
3241
3242 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
3243
3244
3245 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3246
3247 end_time = jiffies + (WAIT_TIME * HZ);
3248
3249 while ((time_before(jiffies, end_time)) && (ret_val != 0xffffffff) &&
3250 (ret_val != sep->shared_bus))
3251 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3252
3253
3254 if (ret_val != sep->shared_bus) {
3255 dev_warn(&sep->pdev->dev, "could not reconfig shared area\n");
3256 dev_warn(&sep->pdev->dev, "result was %x\n", ret_val);
3257 ret_val = -ENOMEM;
3258 } else
3259 ret_val = 0;
3260
3261 dev_dbg(&sep->pdev->dev, "reconfig shared area end\n");
3262 return ret_val;
3263}
3264
3265
3266static const struct file_operations singleton_file_operations = {
3267 .owner = THIS_MODULE,
3268 .unlocked_ioctl = sep_singleton_ioctl,
3269 .poll = sep_poll,
3270 .open = sep_singleton_open,
3271 .release = sep_singleton_release,
3272 .mmap = sep_mmap,
3273};
3274
3275
3276static const struct file_operations daemon_file_operations = {
3277 .owner = THIS_MODULE,
3278 .unlocked_ioctl = sep_request_daemon_ioctl,
3279 .poll = sep_request_daemon_poll,
3280 .open = sep_request_daemon_open,
3281 .release = sep_request_daemon_release,
3282 .mmap = sep_request_daemon_mmap,
3283};
3284
3285
3286static const struct file_operations sep_file_operations = {
3287 .owner = THIS_MODULE,
3288 .unlocked_ioctl = sep_ioctl,
3289 .poll = sep_poll,
3290 .open = sep_open,
3291 .release = sep_release,
3292 .mmap = sep_mmap,
3293};
3294
3295
3296
3297
3298
3299
3300
3301static int sep_register_driver_with_fs(struct sep_device *sep)
3302{
3303 int ret_val;
3304
3305 sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR;
3306 sep->miscdev_sep.name = SEP_DEV_NAME;
3307 sep->miscdev_sep.fops = &sep_file_operations;
3308
3309 sep->miscdev_singleton.minor = MISC_DYNAMIC_MINOR;
3310 sep->miscdev_singleton.name = SEP_DEV_SINGLETON;
3311 sep->miscdev_singleton.fops = &singleton_file_operations;
3312
3313 sep->miscdev_daemon.minor = MISC_DYNAMIC_MINOR;
3314 sep->miscdev_daemon.name = SEP_DEV_DAEMON;
3315 sep->miscdev_daemon.fops = &daemon_file_operations;
3316
3317 ret_val = misc_register(&sep->miscdev_sep);
3318 if (ret_val) {
3319 dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n",
3320 ret_val);
3321 return ret_val;
3322 }
3323
3324 ret_val = misc_register(&sep->miscdev_singleton);
3325 if (ret_val) {
3326 dev_warn(&sep->pdev->dev, "misc reg fails for sing %x\n",
3327 ret_val);
3328 misc_deregister(&sep->miscdev_sep);
3329 return ret_val;
3330 }
3331
3332 ret_val = misc_register(&sep->miscdev_daemon);
3333 if (ret_val) {
3334 dev_warn(&sep->pdev->dev, "misc reg fails for dmn %x\n",
3335 ret_val);
3336 misc_deregister(&sep->miscdev_sep);
3337 misc_deregister(&sep->miscdev_singleton);
3338
3339 return ret_val;
3340 }
3341 return ret_val;
3342}
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353static int __devinit sep_probe(struct pci_dev *pdev,
3354 const struct pci_device_id *ent)
3355{
3356 int error = 0;
3357 struct sep_device *sep;
3358
3359 pr_debug("SEP pci probe starting\n");
3360 if (sep_dev != NULL) {
3361 dev_warn(&pdev->dev, "only one SEP supported.\n");
3362 return -EBUSY;
3363 }
3364
3365
3366 error = pci_enable_device(pdev);
3367 if (error) {
3368 dev_warn(&pdev->dev, "error enabling pci device\n");
3369 goto end_function;
3370 }
3371
3372
3373 sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC);
3374 if (sep_dev == NULL) {
3375 dev_warn(&pdev->dev,
3376 "can't kmalloc the sep_device structure\n");
3377 error = -ENOMEM;
3378 goto end_function_disable_device;
3379 }
3380
3381
3382
3383
3384
3385
3386
3387 sep = sep_dev;
3388
3389 sep->pdev = pci_dev_get(pdev);
3390
3391 init_waitqueue_head(&sep->event);
3392 init_waitqueue_head(&sep->event_request_daemon);
3393 spin_lock_init(&sep->snd_rply_lck);
3394 mutex_init(&sep->sep_mutex);
3395 mutex_init(&sep->ioctl_mutex);
3396
3397 dev_dbg(&sep->pdev->dev, "PCI obtained, device being prepared\n");
3398 dev_dbg(&sep->pdev->dev, "revision is %d\n", sep->pdev->revision);
3399
3400
3401 sep->reg_physical_addr = pci_resource_start(sep->pdev, 0);
3402 if (!sep->reg_physical_addr) {
3403 dev_warn(&sep->pdev->dev, "Error getting register start\n");
3404 error = -ENODEV;
3405 goto end_function_free_sep_dev;
3406 }
3407
3408 sep->reg_physical_end = pci_resource_end(sep->pdev, 0);
3409 if (!sep->reg_physical_end) {
3410 dev_warn(&sep->pdev->dev, "Error getting register end\n");
3411 error = -ENODEV;
3412 goto end_function_free_sep_dev;
3413 }
3414
3415 sep->reg_addr = ioremap_nocache(sep->reg_physical_addr,
3416 (size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1));
3417 if (!sep->reg_addr) {
3418 dev_warn(&sep->pdev->dev, "Error getting register virtual\n");
3419 error = -ENODEV;
3420 goto end_function_free_sep_dev;
3421 }
3422
3423 dev_dbg(&sep->pdev->dev,
3424 "Register area start %llx end %llx virtual %p\n",
3425 (unsigned long long)sep->reg_physical_addr,
3426 (unsigned long long)sep->reg_physical_end,
3427 sep->reg_addr);
3428
3429
3430 sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
3431 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES +
3432 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
3433 SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
3434 SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
3435
3436 if (sep_map_and_alloc_shared_area(sep)) {
3437 error = -ENOMEM;
3438
3439 goto end_function_error;
3440 }
3441
3442 sep->rar_size = FAKE_RAR_SIZE;
3443 sep->rar_addr = dma_alloc_coherent(&sep->pdev->dev,
3444 sep->rar_size, &sep->rar_bus, GFP_KERNEL);
3445 if (sep->rar_addr == NULL) {
3446 dev_warn(&sep->pdev->dev, "can't allocate mfld rar\n");
3447 error = -ENOMEM;
3448 goto end_function_deallocate_sep_shared_area;
3449 }
3450
3451 dev_dbg(&sep->pdev->dev, "rar start is %p, phy is %llx,"
3452 " size is %zx\n", sep->rar_addr,
3453 (unsigned long long)sep->rar_bus,
3454 sep->rar_size);
3455
3456 dev_dbg(&sep->pdev->dev, "about to write IMR and ICR REG_ADDR\n");
3457
3458
3459 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
3460
3461
3462 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
3463
3464
3465 sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
3466 sep->reply_ct &= 0x3FFFFFFF;
3467 sep->send_ct = sep->reply_ct;
3468
3469 dev_dbg(&sep->pdev->dev, "about to call request_irq\n");
3470
3471 error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED,
3472 "sep_driver", sep);
3473
3474 if (error)
3475 goto end_function_dealloc_rar;
3476
3477
3478 if (sep->pdev->revision == 4) {
3479 error = sep_reconfig_shared_area(sep);
3480 if (error)
3481 goto end_function_free_irq;
3482 }
3483
3484
3485 error = sep_register_driver_with_fs(sep);
3486 if (error == 0)
3487
3488 return 0;
3489
3490end_function_free_irq:
3491 free_irq(pdev->irq, sep);
3492
3493end_function_dealloc_rar:
3494 if (sep->rar_addr)
3495 dma_free_coherent(&sep->pdev->dev, sep->rar_size,
3496 sep->rar_addr, sep->rar_bus);
3497 goto end_function;
3498
3499end_function_deallocate_sep_shared_area:
3500
3501 sep_unmap_and_free_shared_area(sep);
3502
3503end_function_error:
3504 iounmap(sep->reg_addr);
3505
3506end_function_free_sep_dev:
3507 pci_dev_put(sep_dev->pdev);
3508 kfree(sep_dev);
3509 sep_dev = NULL;
3510
3511end_function_disable_device:
3512 pci_disable_device(pdev);
3513
3514end_function:
3515 return error;
3516}
3517
3518static void sep_remove(struct pci_dev *pdev)
3519{
3520 struct sep_device *sep = sep_dev;
3521
3522
3523 misc_deregister(&sep->miscdev_sep);
3524 misc_deregister(&sep->miscdev_singleton);
3525 misc_deregister(&sep->miscdev_daemon);
3526
3527
3528 free_irq(sep->pdev->irq, sep);
3529
3530
3531 sep_unmap_and_free_shared_area(sep_dev);
3532 iounmap((void *) sep_dev->reg_addr);
3533}
3534
3535static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = {
3536 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MFLD_PCI_DEVICE_ID)},
3537 {0}
3538};
3539
3540MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
3541
3542
3543static struct pci_driver sep_pci_driver = {
3544 .name = "sep_sec_driver",
3545 .id_table = sep_pci_id_tbl,
3546 .probe = sep_probe,
3547 .remove = sep_remove
3548};
3549
3550
3551
3552
3553
3554
3555
3556static int __init sep_init(void)
3557{
3558 return pci_register_driver(&sep_pci_driver);
3559}
3560
3561
3562
3563
3564
3565
3566
3567
3568static void __exit sep_exit(void)
3569{
3570 pci_unregister_driver(&sep_pci_driver);
3571}
3572
3573
3574module_init(sep_init);
3575module_exit(sep_exit);
3576
3577MODULE_LICENSE("GPL");
3578