1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/types.h>
27#include <linux/slab.h>
28#include <linux/mm.h>
29#include <linux/spinlock.h>
30#include <linux/string.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33#include <linux/bootmem.h>
34#include <asm/tce.h>
35#include <asm/calgary.h>
36#include <asm/proto.h>
37#include <asm/cacheflush.h>
38
39
40static inline void flush_tce(void* tceaddr)
41{
42
43 if (cpu_has_clflush)
44 clflush(tceaddr);
45 else
46 wbinvd();
47}
48
49void tce_build(struct iommu_table *tbl, unsigned long index,
50 unsigned int npages, unsigned long uaddr, int direction)
51{
52 u64* tp;
53 u64 t;
54 u64 rpn;
55
56 t = (1 << TCE_READ_SHIFT);
57 if (direction != DMA_TO_DEVICE)
58 t |= (1 << TCE_WRITE_SHIFT);
59
60 tp = ((u64*)tbl->it_base) + index;
61
62 while (npages--) {
63 rpn = (virt_to_bus((void*)uaddr)) >> PAGE_SHIFT;
64 t &= ~TCE_RPN_MASK;
65 t |= (rpn << TCE_RPN_SHIFT);
66
67 *tp = cpu_to_be64(t);
68 flush_tce(tp);
69
70 uaddr += PAGE_SIZE;
71 tp++;
72 }
73}
74
75void tce_free(struct iommu_table *tbl, long index, unsigned int npages)
76{
77 u64* tp;
78
79 tp = ((u64*)tbl->it_base) + index;
80
81 while (npages--) {
82 *tp = cpu_to_be64(0);
83 flush_tce(tp);
84 tp++;
85 }
86}
87
88static inline unsigned int table_size_to_number_of_entries(unsigned char size)
89{
90
91
92
93
94
95 return (1 << size) << 13;
96}
97
98static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl)
99{
100 unsigned int bitmapsz;
101 unsigned long bmppages;
102 int ret;
103
104 tbl->it_busno = dev->bus->number;
105
106
107 tbl->it_size = table_size_to_number_of_entries(specified_table_size);
108
109
110
111
112
113 bitmapsz = tbl->it_size / BITS_PER_BYTE;
114 bmppages = __get_free_pages(GFP_KERNEL, get_order(bitmapsz));
115 if (!bmppages) {
116 printk(KERN_ERR "Calgary: cannot allocate bitmap\n");
117 ret = -ENOMEM;
118 goto done;
119 }
120
121 tbl->it_map = (unsigned long*)bmppages;
122
123 memset(tbl->it_map, 0, bitmapsz);
124
125 tbl->it_hint = 0;
126
127 spin_lock_init(&tbl->it_lock);
128
129 return 0;
130
131done:
132 return ret;
133}
134
135int __init build_tce_table(struct pci_dev *dev, void __iomem *bbar)
136{
137 struct iommu_table *tbl;
138 int ret;
139
140 if (pci_iommu(dev->bus)) {
141 printk(KERN_ERR "Calgary: dev %p has sysdata->iommu %p\n",
142 dev, pci_iommu(dev->bus));
143 BUG();
144 }
145
146 tbl = kzalloc(sizeof(struct iommu_table), GFP_KERNEL);
147 if (!tbl) {
148 printk(KERN_ERR "Calgary: error allocating iommu_table\n");
149 ret = -ENOMEM;
150 goto done;
151 }
152
153 ret = tce_table_setparms(dev, tbl);
154 if (ret)
155 goto free_tbl;
156
157 tbl->bbar = bbar;
158
159 set_pci_iommu(dev->bus, tbl);
160
161 return 0;
162
163free_tbl:
164 kfree(tbl);
165done:
166 return ret;
167}
168
169void * __init alloc_tce_table(void)
170{
171 unsigned int size;
172
173 size = table_size_to_number_of_entries(specified_table_size);
174 size *= TCE_ENTRY_SIZE;
175
176 return __alloc_bootmem_low(size, size, 0);
177}
178
179void __init free_tce_table(void *tbl)
180{
181 unsigned int size;
182
183 if (!tbl)
184 return;
185
186 size = table_size_to_number_of_entries(specified_table_size);
187 size *= TCE_ENTRY_SIZE;
188
189 free_bootmem(__pa(tbl), size);
190}
191