1
2
3
4
5
6
7
8
9#include <linux/types.h>
10#include <linux/pci.h>
11#include <linux/export.h>
12#include <asm/sn/addrs.h>
13#include <asm/sn/geo.h>
14#include <asm/sn/pcibr_provider.h>
15#include <asm/sn/pcibus_provider_defs.h>
16#include <asm/sn/pcidev.h>
17#include <asm/sn/pic.h>
18#include <asm/sn/sn_sal.h>
19#include <asm/sn/tiocp.h>
20#include "tio.h"
21#include "xtalk/xwidgetdev.h"
22#include "xtalk/hubdev.h"
23
24extern int sn_ioif_inited;
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43static dma_addr_t
44pcibr_dmamap_ate32(struct pcidev_info *info,
45 u64 paddr, size_t req_size, u64 flags, int dma_flags)
46{
47
48 struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
49 struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
50 pdi_pcibus_info;
51 u8 internal_device = (PCI_SLOT(pcidev_info->pdi_host_pcidev_info->
52 pdi_linux_pcidev->devfn)) - 1;
53 int ate_count;
54 int ate_index;
55 u64 ate_flags = flags | PCI32_ATE_V;
56 u64 ate;
57 u64 pci_addr;
58 u64 xio_addr;
59 u64 offset;
60
61
62 if (IS_PIC_SOFT(pcibus_info) && IS_PCIX(pcibus_info)) {
63 return 0;
64 }
65
66
67 if (!(MINIMAL_ATE_FLAG(paddr, req_size))) {
68 ate_count = IOPG((IOPGSIZE - 1)
69 +req_size
70 - 1) + 1;
71 } else {
72 ate_count = IOPG(req_size
73 - 1) + 1;
74 }
75
76
77 ate_index = pcibr_ate_alloc(pcibus_info, ate_count);
78 if (ate_index < 0)
79 return 0;
80
81
82 if (IS_PCIX(pcibus_info))
83 ate_flags &= ~(PCI32_ATE_PREF);
84
85 if (SN_DMA_ADDRTYPE(dma_flags == SN_DMA_ADDR_PHYS))
86 xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
87 PHYS_TO_TIODMA(paddr);
88 else
89 xio_addr = paddr;
90
91 offset = IOPGOFF(xio_addr);
92 ate = ate_flags | (xio_addr - offset);
93
94
95 if (IS_PIC_SOFT(pcibus_info)) {
96 ate |= (pcibus_info->pbi_hub_xid << PIC_ATE_TARGETID_SHFT);
97 }
98
99
100
101
102
103 if (dma_flags & SN_DMA_MSI) {
104 ate |= PCI32_ATE_MSI;
105 if (IS_TIOCP_SOFT(pcibus_info))
106 ate |= PCI32_ATE_PIO;
107 }
108
109 ate_write(pcibus_info, ate_index, ate_count, ate);
110
111
112
113
114 pci_addr = PCI32_MAPPED_BASE + offset + IOPGSIZE * ate_index;
115
116
117
118
119
120 if (pcibus_info->pbi_devreg[internal_device] & PCIBR_DEV_SWAP_DIR)
121 ATE_SWAP_ON(pci_addr);
122
123
124 return pci_addr;
125}
126
127static dma_addr_t
128pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr,
129 u64 dma_attributes, int dma_flags)
130{
131 struct pcibus_info *pcibus_info = (struct pcibus_info *)
132 ((info->pdi_host_pcidev_info)->pdi_pcibus_info);
133 u64 pci_addr;
134
135
136 if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
137 pci_addr = IS_PIC_SOFT(pcibus_info) ?
138 PHYS_TO_DMA(paddr) :
139 PHYS_TO_TIODMA(paddr);
140 else
141 pci_addr = paddr;
142 pci_addr |= dma_attributes;
143
144
145 if (IS_PCIX(pcibus_info))
146 pci_addr &= ~PCI64_ATTR_PREF;
147
148
149 if (IS_PIC_SOFT(pcibus_info)) {
150 pci_addr |=
151 ((u64) pcibus_info->
152 pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT);
153 } else
154 pci_addr |= (dma_flags & SN_DMA_MSI) ?
155 TIOCP_PCI64_CMDTYPE_MSI :
156 TIOCP_PCI64_CMDTYPE_MEM;
157
158
159 if (!IS_PCIX(pcibus_info) && PCI_FUNC(info->pdi_linux_pcidev->devfn))
160 pci_addr |= PCI64_ATTR_VIRTUAL;
161
162 return pci_addr;
163}
164
165static dma_addr_t
166pcibr_dmatrans_direct32(struct pcidev_info * info,
167 u64 paddr, size_t req_size, u64 flags, int dma_flags)
168{
169 struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
170 struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
171 pdi_pcibus_info;
172 u64 xio_addr;
173
174 u64 xio_base;
175 u64 offset;
176 u64 endoff;
177
178 if (IS_PCIX(pcibus_info)) {
179 return 0;
180 }
181
182 if (dma_flags & SN_DMA_MSI)
183 return 0;
184
185 if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
186 xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
187 PHYS_TO_TIODMA(paddr);
188 else
189 xio_addr = paddr;
190
191 xio_base = pcibus_info->pbi_dir_xbase;
192 offset = xio_addr - xio_base;
193 endoff = req_size + offset;
194 if ((req_size > (1ULL << 31)) ||
195 (xio_addr < xio_base) ||
196 (endoff > (1ULL << 31))) {
197 return 0;
198 }
199
200 return PCI32_DIRECT_BASE | offset;
201}
202
203
204
205
206
207void
208pcibr_dma_unmap(struct pci_dev *hwdev, dma_addr_t dma_handle, int direction)
209{
210 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
211 struct pcibus_info *pcibus_info =
212 (struct pcibus_info *)pcidev_info->pdi_pcibus_info;
213
214 if (IS_PCI32_MAPPED(dma_handle)) {
215 int ate_index;
216
217 ate_index =
218 IOPG((ATE_SWAP_OFF(dma_handle) - PCI32_MAPPED_BASE));
219 pcibr_ate_free(pcibus_info, ate_index);
220 }
221}
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238void sn_dma_flush(u64 addr)
239{
240 nasid_t nasid;
241 int is_tio;
242 int wid_num;
243 int i, j;
244 unsigned long flags;
245 u64 itte;
246 struct hubdev_info *hubinfo;
247 struct sn_flush_device_kernel *p;
248 struct sn_flush_device_common *common;
249 struct sn_flush_nasid_entry *flush_nasid_list;
250
251 if (!sn_ioif_inited)
252 return;
253
254 nasid = NASID_GET(addr);
255 if (-1 == nasid_to_cnodeid(nasid))
256 return;
257
258 hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo;
259
260 BUG_ON(!hubinfo);
261
262 flush_nasid_list = &hubinfo->hdi_flush_nasid_list;
263 if (flush_nasid_list->widget_p == NULL)
264 return;
265
266 is_tio = (nasid & 1);
267 if (is_tio) {
268 int itte_index;
269
270 if (TIO_HWIN(addr))
271 itte_index = 0;
272 else if (TIO_BWIN_WINDOWNUM(addr))
273 itte_index = TIO_BWIN_WINDOWNUM(addr);
274 else
275 itte_index = -1;
276
277 if (itte_index >= 0) {
278 itte = flush_nasid_list->iio_itte[itte_index];
279 if (! TIO_ITTE_VALID(itte))
280 return;
281 wid_num = TIO_ITTE_WIDGET(itte);
282 } else
283 wid_num = TIO_SWIN_WIDGETNUM(addr);
284 } else {
285 if (BWIN_WINDOWNUM(addr)) {
286 itte = flush_nasid_list->iio_itte[BWIN_WINDOWNUM(addr)];
287 wid_num = IIO_ITTE_WIDGET(itte);
288 } else
289 wid_num = SWIN_WIDGETNUM(addr);
290 }
291 if (flush_nasid_list->widget_p[wid_num] == NULL)
292 return;
293 p = &flush_nasid_list->widget_p[wid_num][0];
294
295
296 for (i = 0; i < DEV_PER_WIDGET; i++,p++) {
297 common = p->common;
298 for (j = 0; j < PCI_ROM_RESOURCE; j++) {
299 if (common->sfdl_bar_list[j].start == 0)
300 break;
301 if (addr >= common->sfdl_bar_list[j].start
302 && addr <= common->sfdl_bar_list[j].end)
303 break;
304 }
305 if (j < PCI_ROM_RESOURCE && common->sfdl_bar_list[j].start != 0)
306 break;
307 }
308
309
310 if (i == DEV_PER_WIDGET)
311 return;
312
313
314
315
316
317
318 if (is_tio) {
319
320
321
322
323
324
325 u32 tio_id = HUB_L(TIO_IOSPACE_ADDR(nasid, TIO_NODE_ID));
326 u32 revnum = XWIDGET_PART_REV_NUM(tio_id);
327
328
329 if ((1 << XWIDGET_PART_REV_NUM_REV(revnum)) & PV907516) {
330 return;
331 } else {
332 pcireg_wrb_flush_get(common->sfdl_pcibus_info,
333 (common->sfdl_slot - 1));
334 }
335 } else {
336 spin_lock_irqsave(&p->sfdl_flush_lock, flags);
337 *common->sfdl_flush_addr = 0;
338
339
340 *(volatile u32 *)(common->sfdl_force_int_addr) = 1;
341
342
343 while (*(common->sfdl_flush_addr) != 0x10f)
344 cpu_relax();
345
346
347 spin_unlock_irqrestore(&p->sfdl_flush_lock, flags);
348 }
349 return;
350}
351
352
353
354
355
356dma_addr_t
357pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size, int dma_flags)
358{
359 dma_addr_t dma_handle;
360 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
361
362
363 if (hwdev->dma_mask < 0x7fffffff) {
364 return 0;
365 }
366
367 if (hwdev->dma_mask == ~0UL) {
368
369
370
371
372
373 dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
374 PCI64_ATTR_PREF, dma_flags);
375 } else {
376
377 dma_handle = pcibr_dmatrans_direct32(pcidev_info, phys_addr,
378 size, 0, dma_flags);
379 if (!dma_handle) {
380
381
382
383
384
385 dma_handle = pcibr_dmamap_ate32(pcidev_info, phys_addr,
386 size, PCI32_ATE_PREF,
387 dma_flags);
388 }
389 }
390
391 return dma_handle;
392}
393
394dma_addr_t
395pcibr_dma_map_consistent(struct pci_dev * hwdev, unsigned long phys_addr,
396 size_t size, int dma_flags)
397{
398 dma_addr_t dma_handle;
399 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
400
401 if (hwdev->dev.coherent_dma_mask == ~0UL) {
402 dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
403 PCI64_ATTR_BAR, dma_flags);
404 } else {
405 dma_handle = (dma_addr_t) pcibr_dmamap_ate32(pcidev_info,
406 phys_addr, size,
407 PCI32_ATE_BAR, dma_flags);
408 }
409
410 return dma_handle;
411}
412
413EXPORT_SYMBOL(sn_dma_flush);
414