1
2
3
4
5
6#include <drm/drmP.h>
7#include "psb_drv.h"
8#include "psb_reg.h"
9#include "mmu.h"
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39static inline uint32_t psb_mmu_pt_index(uint32_t offset)
40{
41 return (offset >> PSB_PTE_SHIFT) & 0x3FF;
42}
43
44static inline uint32_t psb_mmu_pd_index(uint32_t offset)
45{
46 return offset >> PSB_PDE_SHIFT;
47}
48
49#if defined(CONFIG_X86)
50static inline void psb_clflush(void *addr)
51{
52 __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
53}
54
55static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
56{
57 if (!driver->has_clflush)
58 return;
59
60 mb();
61 psb_clflush(addr);
62 mb();
63}
64#else
65
66static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
67{;
68}
69
70#endif
71
72static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force)
73{
74 struct drm_device *dev = driver->dev;
75 struct drm_psb_private *dev_priv = dev->dev_private;
76
77 if (atomic_read(&driver->needs_tlbflush) || force) {
78 uint32_t val = PSB_RSGX32(PSB_CR_BIF_CTRL);
79 PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
80
81
82 wmb();
83 PSB_WSGX32(val & ~_PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
84 (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
85 if (driver->msvdx_mmu_invaldc)
86 atomic_set(driver->msvdx_mmu_invaldc, 1);
87 }
88 atomic_set(&driver->needs_tlbflush, 0);
89}
90
91#if 0
92static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
93{
94 down_write(&driver->sem);
95 psb_mmu_flush_pd_locked(driver, force);
96 up_write(&driver->sem);
97}
98#endif
99
100void psb_mmu_flush(struct psb_mmu_driver *driver)
101{
102 struct drm_device *dev = driver->dev;
103 struct drm_psb_private *dev_priv = dev->dev_private;
104 uint32_t val;
105
106 down_write(&driver->sem);
107 val = PSB_RSGX32(PSB_CR_BIF_CTRL);
108 if (atomic_read(&driver->needs_tlbflush))
109 PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
110 else
111 PSB_WSGX32(val | _PSB_CB_CTRL_FLUSH, PSB_CR_BIF_CTRL);
112
113
114
115 wmb();
116 PSB_WSGX32(val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC),
117 PSB_CR_BIF_CTRL);
118 (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
119
120 atomic_set(&driver->needs_tlbflush, 0);
121 if (driver->msvdx_mmu_invaldc)
122 atomic_set(driver->msvdx_mmu_invaldc, 1);
123 up_write(&driver->sem);
124}
125
126void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
127{
128 struct drm_device *dev = pd->driver->dev;
129 struct drm_psb_private *dev_priv = dev->dev_private;
130 uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
131 PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
132
133 down_write(&pd->driver->sem);
134 PSB_WSGX32(page_to_pfn(pd->p) << PAGE_SHIFT, offset);
135 wmb();
136 psb_mmu_flush_pd_locked(pd->driver, 1);
137 pd->hw_context = hw_context;
138 up_write(&pd->driver->sem);
139
140}
141
142static inline unsigned long psb_pd_addr_end(unsigned long addr,
143 unsigned long end)
144{
145 addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
146 return (addr < end) ? addr : end;
147}
148
149static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
150{
151 uint32_t mask = PSB_PTE_VALID;
152
153 if (type & PSB_MMU_CACHED_MEMORY)
154 mask |= PSB_PTE_CACHED;
155 if (type & PSB_MMU_RO_MEMORY)
156 mask |= PSB_PTE_RO;
157 if (type & PSB_MMU_WO_MEMORY)
158 mask |= PSB_PTE_WO;
159
160 return (pfn << PAGE_SHIFT) | mask;
161}
162
163struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
164 int trap_pagefaults, int invalid_type)
165{
166 struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
167 uint32_t *v;
168 int i;
169
170 if (!pd)
171 return NULL;
172
173 pd->p = alloc_page(GFP_DMA32);
174 if (!pd->p)
175 goto out_err1;
176 pd->dummy_pt = alloc_page(GFP_DMA32);
177 if (!pd->dummy_pt)
178 goto out_err2;
179 pd->dummy_page = alloc_page(GFP_DMA32);
180 if (!pd->dummy_page)
181 goto out_err3;
182
183 if (!trap_pagefaults) {
184 pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
185 invalid_type);
186 pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
187 invalid_type);
188 } else {
189 pd->invalid_pde = 0;
190 pd->invalid_pte = 0;
191 }
192
193 v = kmap(pd->dummy_pt);
194 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
195 v[i] = pd->invalid_pte;
196
197 kunmap(pd->dummy_pt);
198
199 v = kmap(pd->p);
200 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
201 v[i] = pd->invalid_pde;
202
203 kunmap(pd->p);
204
205 clear_page(kmap(pd->dummy_page));
206 kunmap(pd->dummy_page);
207
208 pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
209 if (!pd->tables)
210 goto out_err4;
211
212 pd->hw_context = -1;
213 pd->pd_mask = PSB_PTE_VALID;
214 pd->driver = driver;
215
216 return pd;
217
218out_err4:
219 __free_page(pd->dummy_page);
220out_err3:
221 __free_page(pd->dummy_pt);
222out_err2:
223 __free_page(pd->p);
224out_err1:
225 kfree(pd);
226 return NULL;
227}
228
229static void psb_mmu_free_pt(struct psb_mmu_pt *pt)
230{
231 __free_page(pt->p);
232 kfree(pt);
233}
234
235void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
236{
237 struct psb_mmu_driver *driver = pd->driver;
238 struct drm_device *dev = driver->dev;
239 struct drm_psb_private *dev_priv = dev->dev_private;
240 struct psb_mmu_pt *pt;
241 int i;
242
243 down_write(&driver->sem);
244 if (pd->hw_context != -1) {
245 PSB_WSGX32(0, PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4);
246 psb_mmu_flush_pd_locked(driver, 1);
247 }
248
249
250
251
252 for (i = 0; i < 1024; ++i) {
253 pt = pd->tables[i];
254 if (pt)
255 psb_mmu_free_pt(pt);
256 }
257
258 vfree(pd->tables);
259 __free_page(pd->dummy_page);
260 __free_page(pd->dummy_pt);
261 __free_page(pd->p);
262 kfree(pd);
263 up_write(&driver->sem);
264}
265
266static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
267{
268 struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
269 void *v;
270 uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
271 uint32_t clflush_count = PAGE_SIZE / clflush_add;
272 spinlock_t *lock = &pd->driver->lock;
273 uint8_t *clf;
274 uint32_t *ptes;
275 int i;
276
277 if (!pt)
278 return NULL;
279
280 pt->p = alloc_page(GFP_DMA32);
281 if (!pt->p) {
282 kfree(pt);
283 return NULL;
284 }
285
286 spin_lock(lock);
287
288 v = kmap_atomic(pt->p);
289 clf = (uint8_t *) v;
290 ptes = (uint32_t *) v;
291 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
292 *ptes++ = pd->invalid_pte;
293
294#if defined(CONFIG_X86)
295 if (pd->driver->has_clflush && pd->hw_context != -1) {
296 mb();
297 for (i = 0; i < clflush_count; ++i) {
298 psb_clflush(clf);
299 clf += clflush_add;
300 }
301 mb();
302 }
303#endif
304 kunmap_atomic(v);
305 spin_unlock(lock);
306
307 pt->count = 0;
308 pt->pd = pd;
309 pt->index = 0;
310
311 return pt;
312}
313
314struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
315 unsigned long addr)
316{
317 uint32_t index = psb_mmu_pd_index(addr);
318 struct psb_mmu_pt *pt;
319 uint32_t *v;
320 spinlock_t *lock = &pd->driver->lock;
321
322 spin_lock(lock);
323 pt = pd->tables[index];
324 while (!pt) {
325 spin_unlock(lock);
326 pt = psb_mmu_alloc_pt(pd);
327 if (!pt)
328 return NULL;
329 spin_lock(lock);
330
331 if (pd->tables[index]) {
332 spin_unlock(lock);
333 psb_mmu_free_pt(pt);
334 spin_lock(lock);
335 pt = pd->tables[index];
336 continue;
337 }
338
339 v = kmap_atomic(pd->p);
340 pd->tables[index] = pt;
341 v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
342 pt->index = index;
343 kunmap_atomic((void *) v);
344
345 if (pd->hw_context != -1) {
346 psb_mmu_clflush(pd->driver, (void *)&v[index]);
347 atomic_set(&pd->driver->needs_tlbflush, 1);
348 }
349 }
350 pt->v = kmap_atomic(pt->p);
351 return pt;
352}
353
354static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
355 unsigned long addr)
356{
357 uint32_t index = psb_mmu_pd_index(addr);
358 struct psb_mmu_pt *pt;
359 spinlock_t *lock = &pd->driver->lock;
360
361 spin_lock(lock);
362 pt = pd->tables[index];
363 if (!pt) {
364 spin_unlock(lock);
365 return NULL;
366 }
367 pt->v = kmap_atomic(pt->p);
368 return pt;
369}
370
371static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
372{
373 struct psb_mmu_pd *pd = pt->pd;
374 uint32_t *v;
375
376 kunmap_atomic(pt->v);
377 if (pt->count == 0) {
378 v = kmap_atomic(pd->p);
379 v[pt->index] = pd->invalid_pde;
380 pd->tables[pt->index] = NULL;
381
382 if (pd->hw_context != -1) {
383 psb_mmu_clflush(pd->driver, (void *)&v[pt->index]);
384 atomic_set(&pd->driver->needs_tlbflush, 1);
385 }
386 kunmap_atomic(v);
387 spin_unlock(&pd->driver->lock);
388 psb_mmu_free_pt(pt);
389 return;
390 }
391 spin_unlock(&pd->driver->lock);
392}
393
394static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, unsigned long addr,
395 uint32_t pte)
396{
397 pt->v[psb_mmu_pt_index(addr)] = pte;
398}
399
400static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
401 unsigned long addr)
402{
403 pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
404}
405
406struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
407{
408 struct psb_mmu_pd *pd;
409
410 down_read(&driver->sem);
411 pd = driver->default_pd;
412 up_read(&driver->sem);
413
414 return pd;
415}
416
417
418uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
419{
420 struct psb_mmu_pd *pd;
421
422 pd = psb_mmu_get_default_pd(driver);
423 return page_to_pfn(pd->p) << PAGE_SHIFT;
424}
425
426void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
427{
428 struct drm_device *dev = driver->dev;
429 struct drm_psb_private *dev_priv = dev->dev_private;
430
431 PSB_WSGX32(driver->bif_ctrl, PSB_CR_BIF_CTRL);
432 psb_mmu_free_pagedir(driver->default_pd);
433 kfree(driver);
434}
435
436struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
437 int trap_pagefaults,
438 int invalid_type,
439 atomic_t *msvdx_mmu_invaldc)
440{
441 struct psb_mmu_driver *driver;
442 struct drm_psb_private *dev_priv = dev->dev_private;
443
444 driver = kmalloc(sizeof(*driver), GFP_KERNEL);
445
446 if (!driver)
447 return NULL;
448
449 driver->dev = dev;
450 driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
451 invalid_type);
452 if (!driver->default_pd)
453 goto out_err1;
454
455 spin_lock_init(&driver->lock);
456 init_rwsem(&driver->sem);
457 down_write(&driver->sem);
458 atomic_set(&driver->needs_tlbflush, 1);
459 driver->msvdx_mmu_invaldc = msvdx_mmu_invaldc;
460
461 driver->bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL);
462 PSB_WSGX32(driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT,
463 PSB_CR_BIF_CTRL);
464 PSB_WSGX32(driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT,
465 PSB_CR_BIF_CTRL);
466
467 driver->has_clflush = 0;
468
469#if defined(CONFIG_X86)
470 if (boot_cpu_has(X86_FEATURE_CLFLUSH)) {
471 uint32_t tfms, misc, cap0, cap4, clflush_size;
472
473
474
475
476
477
478 cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
479 clflush_size = ((misc >> 8) & 0xff) * 8;
480 driver->has_clflush = 1;
481 driver->clflush_add =
482 PAGE_SIZE * clflush_size / sizeof(uint32_t);
483 driver->clflush_mask = driver->clflush_add - 1;
484 driver->clflush_mask = ~driver->clflush_mask;
485 }
486#endif
487
488 up_write(&driver->sem);
489 return driver;
490
491out_err1:
492 kfree(driver);
493 return NULL;
494}
495
496#if defined(CONFIG_X86)
497static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
498 uint32_t num_pages, uint32_t desired_tile_stride,
499 uint32_t hw_tile_stride)
500{
501 struct psb_mmu_pt *pt;
502 uint32_t rows = 1;
503 uint32_t i;
504 unsigned long addr;
505 unsigned long end;
506 unsigned long next;
507 unsigned long add;
508 unsigned long row_add;
509 unsigned long clflush_add = pd->driver->clflush_add;
510 unsigned long clflush_mask = pd->driver->clflush_mask;
511
512 if (!pd->driver->has_clflush)
513 return;
514
515 if (hw_tile_stride)
516 rows = num_pages / desired_tile_stride;
517 else
518 desired_tile_stride = num_pages;
519
520 add = desired_tile_stride << PAGE_SHIFT;
521 row_add = hw_tile_stride << PAGE_SHIFT;
522 mb();
523 for (i = 0; i < rows; ++i) {
524
525 addr = address;
526 end = addr + add;
527
528 do {
529 next = psb_pd_addr_end(addr, end);
530 pt = psb_mmu_pt_map_lock(pd, addr);
531 if (!pt)
532 continue;
533 do {
534 psb_clflush(&pt->v[psb_mmu_pt_index(addr)]);
535 } while (addr += clflush_add,
536 (addr & clflush_mask) < next);
537
538 psb_mmu_pt_unmap_unlock(pt);
539 } while (addr = next, next != end);
540 address += row_add;
541 }
542 mb();
543}
544#else
545static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
546 uint32_t num_pages, uint32_t desired_tile_stride,
547 uint32_t hw_tile_stride)
548{
549 drm_ttm_cache_flush();
550}
551#endif
552
553void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
554 unsigned long address, uint32_t num_pages)
555{
556 struct psb_mmu_pt *pt;
557 unsigned long addr;
558 unsigned long end;
559 unsigned long next;
560 unsigned long f_address = address;
561
562 down_read(&pd->driver->sem);
563
564 addr = address;
565 end = addr + (num_pages << PAGE_SHIFT);
566
567 do {
568 next = psb_pd_addr_end(addr, end);
569 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
570 if (!pt)
571 goto out;
572 do {
573 psb_mmu_invalidate_pte(pt, addr);
574 --pt->count;
575 } while (addr += PAGE_SIZE, addr < next);
576 psb_mmu_pt_unmap_unlock(pt);
577
578 } while (addr = next, next != end);
579
580out:
581 if (pd->hw_context != -1)
582 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
583
584 up_read(&pd->driver->sem);
585
586 if (pd->hw_context != -1)
587 psb_mmu_flush(pd->driver);
588
589 return;
590}
591
592void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
593 uint32_t num_pages, uint32_t desired_tile_stride,
594 uint32_t hw_tile_stride)
595{
596 struct psb_mmu_pt *pt;
597 uint32_t rows = 1;
598 uint32_t i;
599 unsigned long addr;
600 unsigned long end;
601 unsigned long next;
602 unsigned long add;
603 unsigned long row_add;
604 unsigned long f_address = address;
605
606 if (hw_tile_stride)
607 rows = num_pages / desired_tile_stride;
608 else
609 desired_tile_stride = num_pages;
610
611 add = desired_tile_stride << PAGE_SHIFT;
612 row_add = hw_tile_stride << PAGE_SHIFT;
613
614 down_read(&pd->driver->sem);
615
616
617
618 for (i = 0; i < rows; ++i) {
619
620 addr = address;
621 end = addr + add;
622
623 do {
624 next = psb_pd_addr_end(addr, end);
625 pt = psb_mmu_pt_map_lock(pd, addr);
626 if (!pt)
627 continue;
628 do {
629 psb_mmu_invalidate_pte(pt, addr);
630 --pt->count;
631
632 } while (addr += PAGE_SIZE, addr < next);
633 psb_mmu_pt_unmap_unlock(pt);
634
635 } while (addr = next, next != end);
636 address += row_add;
637 }
638 if (pd->hw_context != -1)
639 psb_mmu_flush_ptes(pd, f_address, num_pages,
640 desired_tile_stride, hw_tile_stride);
641
642 up_read(&pd->driver->sem);
643
644 if (pd->hw_context != -1)
645 psb_mmu_flush(pd->driver);
646}
647
648int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
649 unsigned long address, uint32_t num_pages,
650 int type)
651{
652 struct psb_mmu_pt *pt;
653 uint32_t pte;
654 unsigned long addr;
655 unsigned long end;
656 unsigned long next;
657 unsigned long f_address = address;
658 int ret = -ENOMEM;
659
660 down_read(&pd->driver->sem);
661
662 addr = address;
663 end = addr + (num_pages << PAGE_SHIFT);
664
665 do {
666 next = psb_pd_addr_end(addr, end);
667 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
668 if (!pt) {
669 ret = -ENOMEM;
670 goto out;
671 }
672 do {
673 pte = psb_mmu_mask_pte(start_pfn++, type);
674 psb_mmu_set_pte(pt, addr, pte);
675 pt->count++;
676 } while (addr += PAGE_SIZE, addr < next);
677 psb_mmu_pt_unmap_unlock(pt);
678
679 } while (addr = next, next != end);
680 ret = 0;
681
682out:
683 if (pd->hw_context != -1)
684 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
685
686 up_read(&pd->driver->sem);
687
688 if (pd->hw_context != -1)
689 psb_mmu_flush(pd->driver);
690
691 return 0;
692}
693
694int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
695 unsigned long address, uint32_t num_pages,
696 uint32_t desired_tile_stride, uint32_t hw_tile_stride,
697 int type)
698{
699 struct psb_mmu_pt *pt;
700 uint32_t rows = 1;
701 uint32_t i;
702 uint32_t pte;
703 unsigned long addr;
704 unsigned long end;
705 unsigned long next;
706 unsigned long add;
707 unsigned long row_add;
708 unsigned long f_address = address;
709 int ret = -ENOMEM;
710
711 if (hw_tile_stride) {
712 if (num_pages % desired_tile_stride != 0)
713 return -EINVAL;
714 rows = num_pages / desired_tile_stride;
715 } else {
716 desired_tile_stride = num_pages;
717 }
718
719 add = desired_tile_stride << PAGE_SHIFT;
720 row_add = hw_tile_stride << PAGE_SHIFT;
721
722 down_read(&pd->driver->sem);
723
724 for (i = 0; i < rows; ++i) {
725
726 addr = address;
727 end = addr + add;
728
729 do {
730 next = psb_pd_addr_end(addr, end);
731 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
732 if (!pt)
733 goto out;
734 do {
735 pte = psb_mmu_mask_pte(page_to_pfn(*pages++),
736 type);
737 psb_mmu_set_pte(pt, addr, pte);
738 pt->count++;
739 } while (addr += PAGE_SIZE, addr < next);
740 psb_mmu_pt_unmap_unlock(pt);
741
742 } while (addr = next, next != end);
743
744 address += row_add;
745 }
746
747 ret = 0;
748out:
749 if (pd->hw_context != -1)
750 psb_mmu_flush_ptes(pd, f_address, num_pages,
751 desired_tile_stride, hw_tile_stride);
752
753 up_read(&pd->driver->sem);
754
755 if (pd->hw_context != -1)
756 psb_mmu_flush(pd->driver);
757
758 return ret;
759}
760
761int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
762 unsigned long *pfn)
763{
764 int ret;
765 struct psb_mmu_pt *pt;
766 uint32_t tmp;
767 spinlock_t *lock = &pd->driver->lock;
768
769 down_read(&pd->driver->sem);
770 pt = psb_mmu_pt_map_lock(pd, virtual);
771 if (!pt) {
772 uint32_t *v;
773
774 spin_lock(lock);
775 v = kmap_atomic(pd->p);
776 tmp = v[psb_mmu_pd_index(virtual)];
777 kunmap_atomic(v);
778 spin_unlock(lock);
779
780 if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
781 !(pd->invalid_pte & PSB_PTE_VALID)) {
782 ret = -EINVAL;
783 goto out;
784 }
785 ret = 0;
786 *pfn = pd->invalid_pte >> PAGE_SHIFT;
787 goto out;
788 }
789 tmp = pt->v[psb_mmu_pt_index(virtual)];
790 if (!(tmp & PSB_PTE_VALID)) {
791 ret = -EINVAL;
792 } else {
793 ret = 0;
794 *pfn = tmp >> PAGE_SHIFT;
795 }
796 psb_mmu_pt_unmap_unlock(pt);
797out:
798 up_read(&pd->driver->sem);
799 return ret;
800}
801