1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/kernel.h>
28#include <linux/errno.h>
29#include <linux/spinlock.h>
30#include <linux/mm.h>
31#include <linux/hugetlb.h>
32#include <linux/device.h>
33#include <linux/io.h>
34#include <linux/uaccess.h>
35#include <linux/security.h>
36#include <linux/sync_core.h>
37#include <linux/prefetch.h>
38#include <asm/pgtable.h>
39#include "gru.h"
40#include "grutables.h"
41#include "grulib.h"
42#include "gru_instructions.h"
43#include <asm/uv/uv_hub.h>
44
45
46#define VTOP_SUCCESS 0
47#define VTOP_INVALID -1
48#define VTOP_RETRY -2
49
50
51
52
53
54static inline int is_gru_paddr(unsigned long paddr)
55{
56 return paddr >= gru_start_paddr && paddr < gru_end_paddr;
57}
58
59
60
61
62struct vm_area_struct *gru_find_vma(unsigned long vaddr)
63{
64 struct vm_area_struct *vma;
65
66 vma = find_vma(current->mm, vaddr);
67 if (vma && vma->vm_start <= vaddr && vma->vm_ops == &gru_vm_ops)
68 return vma;
69 return NULL;
70}
71
72
73
74
75
76
77
78
79
80static struct gru_thread_state *gru_find_lock_gts(unsigned long vaddr)
81{
82 struct mm_struct *mm = current->mm;
83 struct vm_area_struct *vma;
84 struct gru_thread_state *gts = NULL;
85
86 down_read(&mm->mmap_sem);
87 vma = gru_find_vma(vaddr);
88 if (vma)
89 gts = gru_find_thread_state(vma, TSID(vaddr, vma));
90 if (gts)
91 mutex_lock(>s->ts_ctxlock);
92 else
93 up_read(&mm->mmap_sem);
94 return gts;
95}
96
97static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
98{
99 struct mm_struct *mm = current->mm;
100 struct vm_area_struct *vma;
101 struct gru_thread_state *gts = ERR_PTR(-EINVAL);
102
103 down_write(&mm->mmap_sem);
104 vma = gru_find_vma(vaddr);
105 if (!vma)
106 goto err;
107
108 gts = gru_alloc_thread_state(vma, TSID(vaddr, vma));
109 if (IS_ERR(gts))
110 goto err;
111 mutex_lock(>s->ts_ctxlock);
112 downgrade_write(&mm->mmap_sem);
113 return gts;
114
115err:
116 up_write(&mm->mmap_sem);
117 return gts;
118}
119
120
121
122
123static void gru_unlock_gts(struct gru_thread_state *gts)
124{
125 mutex_unlock(>s->ts_ctxlock);
126 up_read(¤t->mm->mmap_sem);
127}
128
129
130
131
132
133
134
135
136
137static void gru_cb_set_istatus_active(struct gru_instruction_bits *cbk)
138{
139 if (cbk) {
140 cbk->istatus = CBS_ACTIVE;
141 }
142}
143
144
145
146
147
148
149
150
151
152
153
154
155static void get_clear_fault_map(struct gru_state *gru,
156 struct gru_tlb_fault_map *imap,
157 struct gru_tlb_fault_map *dmap)
158{
159 unsigned long i, k;
160 struct gru_tlb_fault_map *tfm;
161
162 tfm = get_tfm_for_cpu(gru, gru_cpu_fault_map_id());
163 prefetchw(tfm);
164 for (i = 0; i < BITS_TO_LONGS(GRU_NUM_CBE); i++) {
165 k = tfm->fault_bits[i];
166 if (k)
167 k = xchg(&tfm->fault_bits[i], 0UL);
168 imap->fault_bits[i] = k;
169 k = tfm->done_bits[i];
170 if (k)
171 k = xchg(&tfm->done_bits[i], 0UL);
172 dmap->fault_bits[i] = k;
173 }
174
175
176
177
178
179 gru_flush_cache(tfm);
180}
181
182
183
184
185
186
187
188
189
190
191static int non_atomic_pte_lookup(struct vm_area_struct *vma,
192 unsigned long vaddr, int write,
193 unsigned long *paddr, int *pageshift)
194{
195 struct page *page;
196
197#ifdef CONFIG_HUGETLB_PAGE
198 *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
199#else
200 *pageshift = PAGE_SHIFT;
201#endif
202 if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page, NULL) <= 0)
203 return -EFAULT;
204 *paddr = page_to_phys(page);
205 put_page(page);
206 return 0;
207}
208
209
210
211
212
213
214
215
216
217
218
219static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
220 int write, unsigned long *paddr, int *pageshift)
221{
222 pgd_t *pgdp;
223 p4d_t *p4dp;
224 pud_t *pudp;
225 pmd_t *pmdp;
226 pte_t pte;
227
228 pgdp = pgd_offset(vma->vm_mm, vaddr);
229 if (unlikely(pgd_none(*pgdp)))
230 goto err;
231
232 p4dp = p4d_offset(pgdp, vaddr);
233 if (unlikely(p4d_none(*p4dp)))
234 goto err;
235
236 pudp = pud_offset(p4dp, vaddr);
237 if (unlikely(pud_none(*pudp)))
238 goto err;
239
240 pmdp = pmd_offset(pudp, vaddr);
241 if (unlikely(pmd_none(*pmdp)))
242 goto err;
243#ifdef CONFIG_X86_64
244 if (unlikely(pmd_large(*pmdp)))
245 pte = *(pte_t *) pmdp;
246 else
247#endif
248 pte = *pte_offset_kernel(pmdp, vaddr);
249
250 if (unlikely(!pte_present(pte) ||
251 (write && (!pte_write(pte) || !pte_dirty(pte)))))
252 return 1;
253
254 *paddr = pte_pfn(pte) << PAGE_SHIFT;
255#ifdef CONFIG_HUGETLB_PAGE
256 *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
257#else
258 *pageshift = PAGE_SHIFT;
259#endif
260 return 0;
261
262err:
263 return 1;
264}
265
266static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
267 int write, int atomic, unsigned long *gpa, int *pageshift)
268{
269 struct mm_struct *mm = gts->ts_mm;
270 struct vm_area_struct *vma;
271 unsigned long paddr;
272 int ret, ps;
273
274 vma = find_vma(mm, vaddr);
275 if (!vma)
276 goto inval;
277
278
279
280
281
282 rmb();
283 ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
284 if (ret) {
285 if (atomic)
286 goto upm;
287 if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))
288 goto inval;
289 }
290 if (is_gru_paddr(paddr))
291 goto inval;
292 paddr = paddr & ~((1UL << ps) - 1);
293 *gpa = uv_soc_phys_ram_to_gpa(paddr);
294 *pageshift = ps;
295 return VTOP_SUCCESS;
296
297inval:
298 return VTOP_INVALID;
299upm:
300 return VTOP_RETRY;
301}
302
303
304
305
306
307
308
309
310static void gru_flush_cache_cbe(struct gru_control_block_extended *cbe)
311{
312 if (unlikely(cbe)) {
313 cbe->cbrexecstatus = 0;
314 gru_flush_cache(cbe);
315 }
316}
317
318
319
320
321
322
323static void gru_preload_tlb(struct gru_state *gru,
324 struct gru_thread_state *gts, int atomic,
325 unsigned long fault_vaddr, int asid, int write,
326 unsigned char tlb_preload_count,
327 struct gru_tlb_fault_handle *tfh,
328 struct gru_control_block_extended *cbe)
329{
330 unsigned long vaddr = 0, gpa;
331 int ret, pageshift;
332
333 if (cbe->opccpy != OP_BCOPY)
334 return;
335
336 if (fault_vaddr == cbe->cbe_baddr0)
337 vaddr = fault_vaddr + GRU_CACHE_LINE_BYTES * cbe->cbe_src_cl - 1;
338 else if (fault_vaddr == cbe->cbe_baddr1)
339 vaddr = fault_vaddr + (1 << cbe->xtypecpy) * cbe->cbe_nelemcur - 1;
340
341 fault_vaddr &= PAGE_MASK;
342 vaddr &= PAGE_MASK;
343 vaddr = min(vaddr, fault_vaddr + tlb_preload_count * PAGE_SIZE);
344
345 while (vaddr > fault_vaddr) {
346 ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
347 if (ret || tfh_write_only(tfh, gpa, GAA_RAM, vaddr, asid, write,
348 GRU_PAGESIZE(pageshift)))
349 return;
350 gru_dbg(grudev,
351 "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, rw %d, ps %d, gpa 0x%lx\n",
352 atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh,
353 vaddr, asid, write, pageshift, gpa);
354 vaddr -= PAGE_SIZE;
355 STAT(tlb_preload_page);
356 }
357}
358
359
360
361
362
363
364
365
366
367
368
369static int gru_try_dropin(struct gru_state *gru,
370 struct gru_thread_state *gts,
371 struct gru_tlb_fault_handle *tfh,
372 struct gru_instruction_bits *cbk)
373{
374 struct gru_control_block_extended *cbe = NULL;
375 unsigned char tlb_preload_count = gts->ts_tlb_preload_count;
376 int pageshift = 0, asid, write, ret, atomic = !cbk, indexway;
377 unsigned long gpa = 0, vaddr = 0;
378
379
380
381
382
383
384
385
386
387
388
389 if (unlikely(tlb_preload_count)) {
390 cbe = gru_tfh_to_cbe(tfh);
391 prefetchw(cbe);
392 }
393
394
395
396
397
398
399 if (tfh->status != TFHSTATUS_EXCEPTION) {
400 gru_flush_cache(tfh);
401 sync_core();
402 if (tfh->status != TFHSTATUS_EXCEPTION)
403 goto failnoexception;
404 STAT(tfh_stale_on_fault);
405 }
406 if (tfh->state == TFHSTATE_IDLE)
407 goto failidle;
408 if (tfh->state == TFHSTATE_MISS_FMM && cbk)
409 goto failfmm;
410
411 write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0;
412 vaddr = tfh->missvaddr;
413 asid = tfh->missasid;
414 indexway = tfh->indexway;
415 if (asid == 0)
416 goto failnoasid;
417
418 rmb();
419
420
421
422
423
424 if (atomic_read(>s->ts_gms->ms_range_active))
425 goto failactive;
426
427 ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
428 if (ret == VTOP_INVALID)
429 goto failinval;
430 if (ret == VTOP_RETRY)
431 goto failupm;
432
433 if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) {
434 gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift);
435 if (atomic || !gru_update_cch(gts)) {
436 gts->ts_force_cch_reload = 1;
437 goto failupm;
438 }
439 }
440
441 if (unlikely(cbe) && pageshift == PAGE_SHIFT) {
442 gru_preload_tlb(gru, gts, atomic, vaddr, asid, write, tlb_preload_count, tfh, cbe);
443 gru_flush_cache_cbe(cbe);
444 }
445
446 gru_cb_set_istatus_active(cbk);
447 gts->ustats.tlbdropin++;
448 tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
449 GRU_PAGESIZE(pageshift));
450 gru_dbg(grudev,
451 "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, indexway 0x%x,"
452 " rw %d, ps %d, gpa 0x%lx\n",
453 atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh, vaddr, asid,
454 indexway, write, pageshift, gpa);
455 STAT(tlb_dropin);
456 return 0;
457
458failnoasid:
459
460 STAT(tlb_dropin_fail_no_asid);
461 gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
462 if (!cbk)
463 tfh_user_polling_mode(tfh);
464 else
465 gru_flush_cache(tfh);
466 gru_flush_cache_cbe(cbe);
467 return -EAGAIN;
468
469failupm:
470
471 tfh_user_polling_mode(tfh);
472 gru_flush_cache_cbe(cbe);
473 STAT(tlb_dropin_fail_upm);
474 gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
475 return 1;
476
477failfmm:
478
479 gru_flush_cache(tfh);
480 gru_flush_cache_cbe(cbe);
481 STAT(tlb_dropin_fail_fmm);
482 gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state);
483 return 0;
484
485failnoexception:
486
487 gru_flush_cache(tfh);
488 gru_flush_cache_cbe(cbe);
489 if (cbk)
490 gru_flush_cache(cbk);
491 STAT(tlb_dropin_fail_no_exception);
492 gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n",
493 tfh, tfh->status, tfh->state);
494 return 0;
495
496failidle:
497
498 gru_flush_cache(tfh);
499 gru_flush_cache_cbe(cbe);
500 if (cbk)
501 gru_flush_cache(cbk);
502 STAT(tlb_dropin_fail_idle);
503 gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d\n", tfh, tfh->state);
504 return 0;
505
506failinval:
507
508 tfh_exception(tfh);
509 gru_flush_cache_cbe(cbe);
510 STAT(tlb_dropin_fail_invalid);
511 gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
512 return -EFAULT;
513
514failactive:
515
516 if (!cbk)
517 tfh_user_polling_mode(tfh);
518 else
519 gru_flush_cache(tfh);
520 gru_flush_cache_cbe(cbe);
521 STAT(tlb_dropin_fail_range_active);
522 gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n",
523 tfh, vaddr);
524 return 1;
525}
526
527
528
529
530
531
532
533static irqreturn_t gru_intr(int chiplet, int blade)
534{
535 struct gru_state *gru;
536 struct gru_tlb_fault_map imap, dmap;
537 struct gru_thread_state *gts;
538 struct gru_tlb_fault_handle *tfh = NULL;
539 struct completion *cmp;
540 int cbrnum, ctxnum;
541
542 STAT(intr);
543
544 gru = &gru_base[blade]->bs_grus[chiplet];
545 if (!gru) {
546 dev_err(grudev, "GRU: invalid interrupt: cpu %d, chiplet %d\n",
547 raw_smp_processor_id(), chiplet);
548 return IRQ_NONE;
549 }
550 get_clear_fault_map(gru, &imap, &dmap);
551 gru_dbg(grudev,
552 "cpu %d, chiplet %d, gid %d, imap %016lx %016lx, dmap %016lx %016lx\n",
553 smp_processor_id(), chiplet, gru->gs_gid,
554 imap.fault_bits[0], imap.fault_bits[1],
555 dmap.fault_bits[0], dmap.fault_bits[1]);
556
557 for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) {
558 STAT(intr_cbr);
559 cmp = gru->gs_blade->bs_async_wq;
560 if (cmp)
561 complete(cmp);
562 gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n",
563 gru->gs_gid, cbrnum, cmp ? cmp->done : -1);
564 }
565
566 for_each_cbr_in_tfm(cbrnum, imap.fault_bits) {
567 STAT(intr_tfh);
568 tfh = get_tfh_by_index(gru, cbrnum);
569 prefetchw(tfh);
570
571
572
573
574
575
576
577 ctxnum = tfh->ctxnum;
578 gts = gru->gs_gts[ctxnum];
579
580
581 if (!gts) {
582 STAT(intr_spurious);
583 continue;
584 }
585
586
587
588
589
590 gts->ustats.fmm_tlbmiss++;
591 if (!gts->ts_force_cch_reload &&
592 down_read_trylock(>s->ts_mm->mmap_sem)) {
593 gru_try_dropin(gru, gts, tfh, NULL);
594 up_read(>s->ts_mm->mmap_sem);
595 } else {
596 tfh_user_polling_mode(tfh);
597 STAT(intr_mm_lock_failed);
598 }
599 }
600 return IRQ_HANDLED;
601}
602
603irqreturn_t gru0_intr(int irq, void *dev_id)
604{
605 return gru_intr(0, uv_numa_blade_id());
606}
607
608irqreturn_t gru1_intr(int irq, void *dev_id)
609{
610 return gru_intr(1, uv_numa_blade_id());
611}
612
613irqreturn_t gru_intr_mblade(int irq, void *dev_id)
614{
615 int blade;
616
617 for_each_possible_blade(blade) {
618 if (uv_blade_nr_possible_cpus(blade))
619 continue;
620 gru_intr(0, blade);
621 gru_intr(1, blade);
622 }
623 return IRQ_HANDLED;
624}
625
626
627static int gru_user_dropin(struct gru_thread_state *gts,
628 struct gru_tlb_fault_handle *tfh,
629 void *cb)
630{
631 struct gru_mm_struct *gms = gts->ts_gms;
632 int ret;
633
634 gts->ustats.upm_tlbmiss++;
635 while (1) {
636 wait_event(gms->ms_wait_queue,
637 atomic_read(&gms->ms_range_active) == 0);
638 prefetchw(tfh);
639 ret = gru_try_dropin(gts->ts_gru, gts, tfh, cb);
640 if (ret <= 0)
641 return ret;
642 STAT(call_os_wait_queue);
643 }
644}
645
646
647
648
649
650
651int gru_handle_user_call_os(unsigned long cb)
652{
653 struct gru_tlb_fault_handle *tfh;
654 struct gru_thread_state *gts;
655 void *cbk;
656 int ucbnum, cbrnum, ret = -EINVAL;
657
658 STAT(call_os);
659
660
661 ucbnum = get_cb_number((void *)cb);
662 if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB)
663 return -EINVAL;
664
665 gts = gru_find_lock_gts(cb);
666 if (!gts)
667 return -EINVAL;
668 gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
669
670 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
671 goto exit;
672
673 gru_check_context_placement(gts);
674
675
676
677
678 if (gts->ts_gru && gts->ts_force_cch_reload) {
679 gts->ts_force_cch_reload = 0;
680 gru_update_cch(gts);
681 }
682
683 ret = -EAGAIN;
684 cbrnum = thread_cbr_number(gts, ucbnum);
685 if (gts->ts_gru) {
686 tfh = get_tfh_by_index(gts->ts_gru, cbrnum);
687 cbk = get_gseg_base_address_cb(gts->ts_gru->gs_gru_base_vaddr,
688 gts->ts_ctxnum, ucbnum);
689 ret = gru_user_dropin(gts, tfh, cbk);
690 }
691exit:
692 gru_unlock_gts(gts);
693 return ret;
694}
695
696
697
698
699
700int gru_get_exception_detail(unsigned long arg)
701{
702 struct control_block_extended_exc_detail excdet;
703 struct gru_control_block_extended *cbe;
704 struct gru_thread_state *gts;
705 int ucbnum, cbrnum, ret;
706
707 STAT(user_exception);
708 if (copy_from_user(&excdet, (void __user *)arg, sizeof(excdet)))
709 return -EFAULT;
710
711 gts = gru_find_lock_gts(excdet.cb);
712 if (!gts)
713 return -EINVAL;
714
715 gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", excdet.cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
716 ucbnum = get_cb_number((void *)excdet.cb);
717 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) {
718 ret = -EINVAL;
719 } else if (gts->ts_gru) {
720 cbrnum = thread_cbr_number(gts, ucbnum);
721 cbe = get_cbe_by_index(gts->ts_gru, cbrnum);
722 gru_flush_cache(cbe);
723 sync_core();
724 excdet.opc = cbe->opccpy;
725 excdet.exopc = cbe->exopccpy;
726 excdet.ecause = cbe->ecause;
727 excdet.exceptdet0 = cbe->idef1upd;
728 excdet.exceptdet1 = cbe->idef3upd;
729 excdet.cbrstate = cbe->cbrstate;
730 excdet.cbrexecstatus = cbe->cbrexecstatus;
731 gru_flush_cache_cbe(cbe);
732 ret = 0;
733 } else {
734 ret = -EAGAIN;
735 }
736 gru_unlock_gts(gts);
737
738 gru_dbg(grudev,
739 "cb 0x%lx, op %d, exopc %d, cbrstate %d, cbrexecstatus 0x%x, ecause 0x%x, "
740 "exdet0 0x%lx, exdet1 0x%x\n",
741 excdet.cb, excdet.opc, excdet.exopc, excdet.cbrstate, excdet.cbrexecstatus,
742 excdet.ecause, excdet.exceptdet0, excdet.exceptdet1);
743 if (!ret && copy_to_user((void __user *)arg, &excdet, sizeof(excdet)))
744 ret = -EFAULT;
745 return ret;
746}
747
748
749
750
751static int gru_unload_all_contexts(void)
752{
753 struct gru_thread_state *gts;
754 struct gru_state *gru;
755 int gid, ctxnum;
756
757 if (!capable(CAP_SYS_ADMIN))
758 return -EPERM;
759 foreach_gid(gid) {
760 gru = GID_TO_GRU(gid);
761 spin_lock(&gru->gs_lock);
762 for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
763 gts = gru->gs_gts[ctxnum];
764 if (gts && mutex_trylock(>s->ts_ctxlock)) {
765 spin_unlock(&gru->gs_lock);
766 gru_unload_context(gts, 1);
767 mutex_unlock(>s->ts_ctxlock);
768 spin_lock(&gru->gs_lock);
769 }
770 }
771 spin_unlock(&gru->gs_lock);
772 }
773 return 0;
774}
775
776int gru_user_unload_context(unsigned long arg)
777{
778 struct gru_thread_state *gts;
779 struct gru_unload_context_req req;
780
781 STAT(user_unload_context);
782 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
783 return -EFAULT;
784
785 gru_dbg(grudev, "gseg 0x%lx\n", req.gseg);
786
787 if (!req.gseg)
788 return gru_unload_all_contexts();
789
790 gts = gru_find_lock_gts(req.gseg);
791 if (!gts)
792 return -EINVAL;
793
794 if (gts->ts_gru)
795 gru_unload_context(gts, 1);
796 gru_unlock_gts(gts);
797
798 return 0;
799}
800
801
802
803
804
805int gru_user_flush_tlb(unsigned long arg)
806{
807 struct gru_thread_state *gts;
808 struct gru_flush_tlb_req req;
809 struct gru_mm_struct *gms;
810
811 STAT(user_flush_tlb);
812 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
813 return -EFAULT;
814
815 gru_dbg(grudev, "gseg 0x%lx, vaddr 0x%lx, len 0x%lx\n", req.gseg,
816 req.vaddr, req.len);
817
818 gts = gru_find_lock_gts(req.gseg);
819 if (!gts)
820 return -EINVAL;
821
822 gms = gts->ts_gms;
823 gru_unlock_gts(gts);
824 gru_flush_tlb_range(gms, req.vaddr, req.len);
825
826 return 0;
827}
828
829
830
831
832long gru_get_gseg_statistics(unsigned long arg)
833{
834 struct gru_thread_state *gts;
835 struct gru_get_gseg_statistics_req req;
836
837 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
838 return -EFAULT;
839
840
841
842
843
844
845 gts = gru_find_lock_gts(req.gseg);
846 if (gts) {
847 memcpy(&req.stats, >s->ustats, sizeof(gts->ustats));
848 gru_unlock_gts(gts);
849 } else {
850 memset(&req.stats, 0, sizeof(gts->ustats));
851 }
852
853 if (copy_to_user((void __user *)arg, &req, sizeof(req)))
854 return -EFAULT;
855
856 return 0;
857}
858
859
860
861
862
863int gru_set_context_option(unsigned long arg)
864{
865 struct gru_thread_state *gts;
866 struct gru_set_context_option_req req;
867 int ret = 0;
868
869 STAT(set_context_option);
870 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
871 return -EFAULT;
872 gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx\n", req.op, req.gseg, req.val1);
873
874 gts = gru_find_lock_gts(req.gseg);
875 if (!gts) {
876 gts = gru_alloc_locked_gts(req.gseg);
877 if (IS_ERR(gts))
878 return PTR_ERR(gts);
879 }
880
881 switch (req.op) {
882 case sco_blade_chiplet:
883
884 if (req.val0 < -1 || req.val0 >= GRU_CHIPLETS_PER_HUB ||
885 req.val1 < -1 || req.val1 >= GRU_MAX_BLADES ||
886 (req.val1 >= 0 && !gru_base[req.val1])) {
887 ret = -EINVAL;
888 } else {
889 gts->ts_user_blade_id = req.val1;
890 gts->ts_user_chiplet_id = req.val0;
891 gru_check_context_placement(gts);
892 }
893 break;
894 case sco_gseg_owner:
895
896 gts->ts_tgid_owner = current->tgid;
897 break;
898 case sco_cch_req_slice:
899
900 gts->ts_cch_req_slice = req.val1 & 3;
901 break;
902 default:
903 ret = -EINVAL;
904 }
905 gru_unlock_gts(gts);
906
907 return ret;
908}
909