1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/kernel.h>
28#include <linux/errno.h>
29#include <linux/spinlock.h>
30#include <linux/mm.h>
31#include <linux/hugetlb.h>
32#include <linux/device.h>
33#include <linux/io.h>
34#include <linux/uaccess.h>
35#include <linux/security.h>
36#include <linux/prefetch.h>
37#include <asm/pgtable.h>
38#include "gru.h"
39#include "grutables.h"
40#include "grulib.h"
41#include "gru_instructions.h"
42#include <asm/uv/uv_hub.h>
43
44
45#define VTOP_SUCCESS 0
46#define VTOP_INVALID -1
47#define VTOP_RETRY -2
48
49
50
51
52
53static inline int is_gru_paddr(unsigned long paddr)
54{
55 return paddr >= gru_start_paddr && paddr < gru_end_paddr;
56}
57
58
59
60
61struct vm_area_struct *gru_find_vma(unsigned long vaddr)
62{
63 struct vm_area_struct *vma;
64
65 vma = find_vma(current->mm, vaddr);
66 if (vma && vma->vm_start <= vaddr && vma->vm_ops == &gru_vm_ops)
67 return vma;
68 return NULL;
69}
70
71
72
73
74
75
76
77
78
79static struct gru_thread_state *gru_find_lock_gts(unsigned long vaddr)
80{
81 struct mm_struct *mm = current->mm;
82 struct vm_area_struct *vma;
83 struct gru_thread_state *gts = NULL;
84
85 down_read(&mm->mmap_sem);
86 vma = gru_find_vma(vaddr);
87 if (vma)
88 gts = gru_find_thread_state(vma, TSID(vaddr, vma));
89 if (gts)
90 mutex_lock(>s->ts_ctxlock);
91 else
92 up_read(&mm->mmap_sem);
93 return gts;
94}
95
96static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
97{
98 struct mm_struct *mm = current->mm;
99 struct vm_area_struct *vma;
100 struct gru_thread_state *gts = ERR_PTR(-EINVAL);
101
102 down_write(&mm->mmap_sem);
103 vma = gru_find_vma(vaddr);
104 if (!vma)
105 goto err;
106
107 gts = gru_alloc_thread_state(vma, TSID(vaddr, vma));
108 if (IS_ERR(gts))
109 goto err;
110 mutex_lock(>s->ts_ctxlock);
111 downgrade_write(&mm->mmap_sem);
112 return gts;
113
114err:
115 up_write(&mm->mmap_sem);
116 return gts;
117}
118
119
120
121
122static void gru_unlock_gts(struct gru_thread_state *gts)
123{
124 mutex_unlock(>s->ts_ctxlock);
125 up_read(¤t->mm->mmap_sem);
126}
127
128
129
130
131
132
133
134
135
136static void gru_cb_set_istatus_active(struct gru_instruction_bits *cbk)
137{
138 if (cbk) {
139 cbk->istatus = CBS_ACTIVE;
140 }
141}
142
143
144
145
146
147
148
149
150
151
152
153
154static void get_clear_fault_map(struct gru_state *gru,
155 struct gru_tlb_fault_map *imap,
156 struct gru_tlb_fault_map *dmap)
157{
158 unsigned long i, k;
159 struct gru_tlb_fault_map *tfm;
160
161 tfm = get_tfm_for_cpu(gru, gru_cpu_fault_map_id());
162 prefetchw(tfm);
163 for (i = 0; i < BITS_TO_LONGS(GRU_NUM_CBE); i++) {
164 k = tfm->fault_bits[i];
165 if (k)
166 k = xchg(&tfm->fault_bits[i], 0UL);
167 imap->fault_bits[i] = k;
168 k = tfm->done_bits[i];
169 if (k)
170 k = xchg(&tfm->done_bits[i], 0UL);
171 dmap->fault_bits[i] = k;
172 }
173
174
175
176
177
178 gru_flush_cache(tfm);
179}
180
181
182
183
184
185
186
187
188
189
190static int non_atomic_pte_lookup(struct vm_area_struct *vma,
191 unsigned long vaddr, int write,
192 unsigned long *paddr, int *pageshift)
193{
194 struct page *page;
195
196#ifdef CONFIG_HUGETLB_PAGE
197 *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
198#else
199 *pageshift = PAGE_SHIFT;
200#endif
201 if (get_user_pages
202 (current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0)
203 return -EFAULT;
204 *paddr = page_to_phys(page);
205 put_page(page);
206 return 0;
207}
208
209
210
211
212
213
214
215
216
217
218
219static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
220 int write, unsigned long *paddr, int *pageshift)
221{
222 pgd_t *pgdp;
223 pmd_t *pmdp;
224 pud_t *pudp;
225 pte_t pte;
226
227 pgdp = pgd_offset(vma->vm_mm, vaddr);
228 if (unlikely(pgd_none(*pgdp)))
229 goto err;
230
231 pudp = pud_offset(pgdp, vaddr);
232 if (unlikely(pud_none(*pudp)))
233 goto err;
234
235 pmdp = pmd_offset(pudp, vaddr);
236 if (unlikely(pmd_none(*pmdp)))
237 goto err;
238#ifdef CONFIG_X86_64
239 if (unlikely(pmd_large(*pmdp)))
240 pte = *(pte_t *) pmdp;
241 else
242#endif
243 pte = *pte_offset_kernel(pmdp, vaddr);
244
245 if (unlikely(!pte_present(pte) ||
246 (write && (!pte_write(pte) || !pte_dirty(pte)))))
247 return 1;
248
249 *paddr = pte_pfn(pte) << PAGE_SHIFT;
250#ifdef CONFIG_HUGETLB_PAGE
251 *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
252#else
253 *pageshift = PAGE_SHIFT;
254#endif
255 return 0;
256
257err:
258 return 1;
259}
260
261static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
262 int write, int atomic, unsigned long *gpa, int *pageshift)
263{
264 struct mm_struct *mm = gts->ts_mm;
265 struct vm_area_struct *vma;
266 unsigned long paddr;
267 int ret, ps;
268
269 vma = find_vma(mm, vaddr);
270 if (!vma)
271 goto inval;
272
273
274
275
276
277 rmb();
278 ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
279 if (ret) {
280 if (atomic)
281 goto upm;
282 if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))
283 goto inval;
284 }
285 if (is_gru_paddr(paddr))
286 goto inval;
287 paddr = paddr & ~((1UL << ps) - 1);
288 *gpa = uv_soc_phys_ram_to_gpa(paddr);
289 *pageshift = ps;
290 return VTOP_SUCCESS;
291
292inval:
293 return VTOP_INVALID;
294upm:
295 return VTOP_RETRY;
296}
297
298
299
300
301
302
303
304
305static void gru_flush_cache_cbe(struct gru_control_block_extended *cbe)
306{
307 if (unlikely(cbe)) {
308 cbe->cbrexecstatus = 0;
309 gru_flush_cache(cbe);
310 }
311}
312
313
314
315
316
317
318static void gru_preload_tlb(struct gru_state *gru,
319 struct gru_thread_state *gts, int atomic,
320 unsigned long fault_vaddr, int asid, int write,
321 unsigned char tlb_preload_count,
322 struct gru_tlb_fault_handle *tfh,
323 struct gru_control_block_extended *cbe)
324{
325 unsigned long vaddr = 0, gpa;
326 int ret, pageshift;
327
328 if (cbe->opccpy != OP_BCOPY)
329 return;
330
331 if (fault_vaddr == cbe->cbe_baddr0)
332 vaddr = fault_vaddr + GRU_CACHE_LINE_BYTES * cbe->cbe_src_cl - 1;
333 else if (fault_vaddr == cbe->cbe_baddr1)
334 vaddr = fault_vaddr + (1 << cbe->xtypecpy) * cbe->cbe_nelemcur - 1;
335
336 fault_vaddr &= PAGE_MASK;
337 vaddr &= PAGE_MASK;
338 vaddr = min(vaddr, fault_vaddr + tlb_preload_count * PAGE_SIZE);
339
340 while (vaddr > fault_vaddr) {
341 ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
342 if (ret || tfh_write_only(tfh, gpa, GAA_RAM, vaddr, asid, write,
343 GRU_PAGESIZE(pageshift)))
344 return;
345 gru_dbg(grudev,
346 "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, rw %d, ps %d, gpa 0x%lx\n",
347 atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh,
348 vaddr, asid, write, pageshift, gpa);
349 vaddr -= PAGE_SIZE;
350 STAT(tlb_preload_page);
351 }
352}
353
354
355
356
357
358
359
360
361
362
363
364static int gru_try_dropin(struct gru_state *gru,
365 struct gru_thread_state *gts,
366 struct gru_tlb_fault_handle *tfh,
367 struct gru_instruction_bits *cbk)
368{
369 struct gru_control_block_extended *cbe = NULL;
370 unsigned char tlb_preload_count = gts->ts_tlb_preload_count;
371 int pageshift = 0, asid, write, ret, atomic = !cbk, indexway;
372 unsigned long gpa = 0, vaddr = 0;
373
374
375
376
377
378
379
380
381
382
383
384 if (unlikely(tlb_preload_count)) {
385 cbe = gru_tfh_to_cbe(tfh);
386 prefetchw(cbe);
387 }
388
389
390
391
392
393
394 if (tfh->status != TFHSTATUS_EXCEPTION) {
395 gru_flush_cache(tfh);
396 sync_core();
397 if (tfh->status != TFHSTATUS_EXCEPTION)
398 goto failnoexception;
399 STAT(tfh_stale_on_fault);
400 }
401 if (tfh->state == TFHSTATE_IDLE)
402 goto failidle;
403 if (tfh->state == TFHSTATE_MISS_FMM && cbk)
404 goto failfmm;
405
406 write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0;
407 vaddr = tfh->missvaddr;
408 asid = tfh->missasid;
409 indexway = tfh->indexway;
410 if (asid == 0)
411 goto failnoasid;
412
413 rmb();
414
415
416
417
418
419 if (atomic_read(>s->ts_gms->ms_range_active))
420 goto failactive;
421
422 ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
423 if (ret == VTOP_INVALID)
424 goto failinval;
425 if (ret == VTOP_RETRY)
426 goto failupm;
427
428 if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) {
429 gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift);
430 if (atomic || !gru_update_cch(gts)) {
431 gts->ts_force_cch_reload = 1;
432 goto failupm;
433 }
434 }
435
436 if (unlikely(cbe) && pageshift == PAGE_SHIFT) {
437 gru_preload_tlb(gru, gts, atomic, vaddr, asid, write, tlb_preload_count, tfh, cbe);
438 gru_flush_cache_cbe(cbe);
439 }
440
441 gru_cb_set_istatus_active(cbk);
442 gts->ustats.tlbdropin++;
443 tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
444 GRU_PAGESIZE(pageshift));
445 gru_dbg(grudev,
446 "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, indexway 0x%x,"
447 " rw %d, ps %d, gpa 0x%lx\n",
448 atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh, vaddr, asid,
449 indexway, write, pageshift, gpa);
450 STAT(tlb_dropin);
451 return 0;
452
453failnoasid:
454
455 STAT(tlb_dropin_fail_no_asid);
456 gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
457 if (!cbk)
458 tfh_user_polling_mode(tfh);
459 else
460 gru_flush_cache(tfh);
461 gru_flush_cache_cbe(cbe);
462 return -EAGAIN;
463
464failupm:
465
466 tfh_user_polling_mode(tfh);
467 gru_flush_cache_cbe(cbe);
468 STAT(tlb_dropin_fail_upm);
469 gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
470 return 1;
471
472failfmm:
473
474 gru_flush_cache(tfh);
475 gru_flush_cache_cbe(cbe);
476 STAT(tlb_dropin_fail_fmm);
477 gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state);
478 return 0;
479
480failnoexception:
481
482 gru_flush_cache(tfh);
483 gru_flush_cache_cbe(cbe);
484 if (cbk)
485 gru_flush_cache(cbk);
486 STAT(tlb_dropin_fail_no_exception);
487 gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n",
488 tfh, tfh->status, tfh->state);
489 return 0;
490
491failidle:
492
493 gru_flush_cache(tfh);
494 gru_flush_cache_cbe(cbe);
495 if (cbk)
496 gru_flush_cache(cbk);
497 STAT(tlb_dropin_fail_idle);
498 gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d\n", tfh, tfh->state);
499 return 0;
500
501failinval:
502
503 tfh_exception(tfh);
504 gru_flush_cache_cbe(cbe);
505 STAT(tlb_dropin_fail_invalid);
506 gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
507 return -EFAULT;
508
509failactive:
510
511 if (!cbk)
512 tfh_user_polling_mode(tfh);
513 else
514 gru_flush_cache(tfh);
515 gru_flush_cache_cbe(cbe);
516 STAT(tlb_dropin_fail_range_active);
517 gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n",
518 tfh, vaddr);
519 return 1;
520}
521
522
523
524
525
526
527
528static irqreturn_t gru_intr(int chiplet, int blade)
529{
530 struct gru_state *gru;
531 struct gru_tlb_fault_map imap, dmap;
532 struct gru_thread_state *gts;
533 struct gru_tlb_fault_handle *tfh = NULL;
534 struct completion *cmp;
535 int cbrnum, ctxnum;
536
537 STAT(intr);
538
539 gru = &gru_base[blade]->bs_grus[chiplet];
540 if (!gru) {
541 dev_err(grudev, "GRU: invalid interrupt: cpu %d, chiplet %d\n",
542 raw_smp_processor_id(), chiplet);
543 return IRQ_NONE;
544 }
545 get_clear_fault_map(gru, &imap, &dmap);
546 gru_dbg(grudev,
547 "cpu %d, chiplet %d, gid %d, imap %016lx %016lx, dmap %016lx %016lx\n",
548 smp_processor_id(), chiplet, gru->gs_gid,
549 imap.fault_bits[0], imap.fault_bits[1],
550 dmap.fault_bits[0], dmap.fault_bits[1]);
551
552 for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) {
553 STAT(intr_cbr);
554 cmp = gru->gs_blade->bs_async_wq;
555 if (cmp)
556 complete(cmp);
557 gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n",
558 gru->gs_gid, cbrnum, cmp ? cmp->done : -1);
559 }
560
561 for_each_cbr_in_tfm(cbrnum, imap.fault_bits) {
562 STAT(intr_tfh);
563 tfh = get_tfh_by_index(gru, cbrnum);
564 prefetchw(tfh);
565
566
567
568
569
570
571
572 ctxnum = tfh->ctxnum;
573 gts = gru->gs_gts[ctxnum];
574
575
576 if (!gts) {
577 STAT(intr_spurious);
578 continue;
579 }
580
581
582
583
584
585 gts->ustats.fmm_tlbmiss++;
586 if (!gts->ts_force_cch_reload &&
587 down_read_trylock(>s->ts_mm->mmap_sem)) {
588 gru_try_dropin(gru, gts, tfh, NULL);
589 up_read(>s->ts_mm->mmap_sem);
590 } else {
591 tfh_user_polling_mode(tfh);
592 STAT(intr_mm_lock_failed);
593 }
594 }
595 return IRQ_HANDLED;
596}
597
598irqreturn_t gru0_intr(int irq, void *dev_id)
599{
600 return gru_intr(0, uv_numa_blade_id());
601}
602
603irqreturn_t gru1_intr(int irq, void *dev_id)
604{
605 return gru_intr(1, uv_numa_blade_id());
606}
607
608irqreturn_t gru_intr_mblade(int irq, void *dev_id)
609{
610 int blade;
611
612 for_each_possible_blade(blade) {
613 if (uv_blade_nr_possible_cpus(blade))
614 continue;
615 gru_intr(0, blade);
616 gru_intr(1, blade);
617 }
618 return IRQ_HANDLED;
619}
620
621
622static int gru_user_dropin(struct gru_thread_state *gts,
623 struct gru_tlb_fault_handle *tfh,
624 void *cb)
625{
626 struct gru_mm_struct *gms = gts->ts_gms;
627 int ret;
628
629 gts->ustats.upm_tlbmiss++;
630 while (1) {
631 wait_event(gms->ms_wait_queue,
632 atomic_read(&gms->ms_range_active) == 0);
633 prefetchw(tfh);
634 ret = gru_try_dropin(gts->ts_gru, gts, tfh, cb);
635 if (ret <= 0)
636 return ret;
637 STAT(call_os_wait_queue);
638 }
639}
640
641
642
643
644
645
646int gru_handle_user_call_os(unsigned long cb)
647{
648 struct gru_tlb_fault_handle *tfh;
649 struct gru_thread_state *gts;
650 void *cbk;
651 int ucbnum, cbrnum, ret = -EINVAL;
652
653 STAT(call_os);
654
655
656 ucbnum = get_cb_number((void *)cb);
657 if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB)
658 return -EINVAL;
659
660 gts = gru_find_lock_gts(cb);
661 if (!gts)
662 return -EINVAL;
663 gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
664
665 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
666 goto exit;
667
668 gru_check_context_placement(gts);
669
670
671
672
673 if (gts->ts_gru && gts->ts_force_cch_reload) {
674 gts->ts_force_cch_reload = 0;
675 gru_update_cch(gts);
676 }
677
678 ret = -EAGAIN;
679 cbrnum = thread_cbr_number(gts, ucbnum);
680 if (gts->ts_gru) {
681 tfh = get_tfh_by_index(gts->ts_gru, cbrnum);
682 cbk = get_gseg_base_address_cb(gts->ts_gru->gs_gru_base_vaddr,
683 gts->ts_ctxnum, ucbnum);
684 ret = gru_user_dropin(gts, tfh, cbk);
685 }
686exit:
687 gru_unlock_gts(gts);
688 return ret;
689}
690
691
692
693
694
695int gru_get_exception_detail(unsigned long arg)
696{
697 struct control_block_extended_exc_detail excdet;
698 struct gru_control_block_extended *cbe;
699 struct gru_thread_state *gts;
700 int ucbnum, cbrnum, ret;
701
702 STAT(user_exception);
703 if (copy_from_user(&excdet, (void __user *)arg, sizeof(excdet)))
704 return -EFAULT;
705
706 gts = gru_find_lock_gts(excdet.cb);
707 if (!gts)
708 return -EINVAL;
709
710 gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", excdet.cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
711 ucbnum = get_cb_number((void *)excdet.cb);
712 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) {
713 ret = -EINVAL;
714 } else if (gts->ts_gru) {
715 cbrnum = thread_cbr_number(gts, ucbnum);
716 cbe = get_cbe_by_index(gts->ts_gru, cbrnum);
717 gru_flush_cache(cbe);
718 sync_core();
719 excdet.opc = cbe->opccpy;
720 excdet.exopc = cbe->exopccpy;
721 excdet.ecause = cbe->ecause;
722 excdet.exceptdet0 = cbe->idef1upd;
723 excdet.exceptdet1 = cbe->idef3upd;
724 excdet.cbrstate = cbe->cbrstate;
725 excdet.cbrexecstatus = cbe->cbrexecstatus;
726 gru_flush_cache_cbe(cbe);
727 ret = 0;
728 } else {
729 ret = -EAGAIN;
730 }
731 gru_unlock_gts(gts);
732
733 gru_dbg(grudev,
734 "cb 0x%lx, op %d, exopc %d, cbrstate %d, cbrexecstatus 0x%x, ecause 0x%x, "
735 "exdet0 0x%lx, exdet1 0x%x\n",
736 excdet.cb, excdet.opc, excdet.exopc, excdet.cbrstate, excdet.cbrexecstatus,
737 excdet.ecause, excdet.exceptdet0, excdet.exceptdet1);
738 if (!ret && copy_to_user((void __user *)arg, &excdet, sizeof(excdet)))
739 ret = -EFAULT;
740 return ret;
741}
742
743
744
745
746static int gru_unload_all_contexts(void)
747{
748 struct gru_thread_state *gts;
749 struct gru_state *gru;
750 int gid, ctxnum;
751
752 if (!capable(CAP_SYS_ADMIN))
753 return -EPERM;
754 foreach_gid(gid) {
755 gru = GID_TO_GRU(gid);
756 spin_lock(&gru->gs_lock);
757 for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
758 gts = gru->gs_gts[ctxnum];
759 if (gts && mutex_trylock(>s->ts_ctxlock)) {
760 spin_unlock(&gru->gs_lock);
761 gru_unload_context(gts, 1);
762 mutex_unlock(>s->ts_ctxlock);
763 spin_lock(&gru->gs_lock);
764 }
765 }
766 spin_unlock(&gru->gs_lock);
767 }
768 return 0;
769}
770
771int gru_user_unload_context(unsigned long arg)
772{
773 struct gru_thread_state *gts;
774 struct gru_unload_context_req req;
775
776 STAT(user_unload_context);
777 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
778 return -EFAULT;
779
780 gru_dbg(grudev, "gseg 0x%lx\n", req.gseg);
781
782 if (!req.gseg)
783 return gru_unload_all_contexts();
784
785 gts = gru_find_lock_gts(req.gseg);
786 if (!gts)
787 return -EINVAL;
788
789 if (gts->ts_gru)
790 gru_unload_context(gts, 1);
791 gru_unlock_gts(gts);
792
793 return 0;
794}
795
796
797
798
799
800int gru_user_flush_tlb(unsigned long arg)
801{
802 struct gru_thread_state *gts;
803 struct gru_flush_tlb_req req;
804 struct gru_mm_struct *gms;
805
806 STAT(user_flush_tlb);
807 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
808 return -EFAULT;
809
810 gru_dbg(grudev, "gseg 0x%lx, vaddr 0x%lx, len 0x%lx\n", req.gseg,
811 req.vaddr, req.len);
812
813 gts = gru_find_lock_gts(req.gseg);
814 if (!gts)
815 return -EINVAL;
816
817 gms = gts->ts_gms;
818 gru_unlock_gts(gts);
819 gru_flush_tlb_range(gms, req.vaddr, req.len);
820
821 return 0;
822}
823
824
825
826
827long gru_get_gseg_statistics(unsigned long arg)
828{
829 struct gru_thread_state *gts;
830 struct gru_get_gseg_statistics_req req;
831
832 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
833 return -EFAULT;
834
835
836
837
838
839
840 gts = gru_find_lock_gts(req.gseg);
841 if (gts) {
842 memcpy(&req.stats, >s->ustats, sizeof(gts->ustats));
843 gru_unlock_gts(gts);
844 } else {
845 memset(&req.stats, 0, sizeof(gts->ustats));
846 }
847
848 if (copy_to_user((void __user *)arg, &req, sizeof(req)))
849 return -EFAULT;
850
851 return 0;
852}
853
854
855
856
857
858int gru_set_context_option(unsigned long arg)
859{
860 struct gru_thread_state *gts;
861 struct gru_set_context_option_req req;
862 int ret = 0;
863
864 STAT(set_context_option);
865 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
866 return -EFAULT;
867 gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx\n", req.op, req.gseg, req.val1);
868
869 gts = gru_find_lock_gts(req.gseg);
870 if (!gts) {
871 gts = gru_alloc_locked_gts(req.gseg);
872 if (IS_ERR(gts))
873 return PTR_ERR(gts);
874 }
875
876 switch (req.op) {
877 case sco_blade_chiplet:
878
879 if (req.val0 < -1 || req.val0 >= GRU_CHIPLETS_PER_HUB ||
880 req.val1 < -1 || req.val1 >= GRU_MAX_BLADES ||
881 (req.val1 >= 0 && !gru_base[req.val1])) {
882 ret = -EINVAL;
883 } else {
884 gts->ts_user_blade_id = req.val1;
885 gts->ts_user_chiplet_id = req.val0;
886 gru_check_context_placement(gts);
887 }
888 break;
889 case sco_gseg_owner:
890
891 gts->ts_tgid_owner = current->tgid;
892 break;
893 case sco_cch_req_slice:
894
895 gts->ts_cch_req_slice = req.val1 & 3;
896 break;
897 default:
898 ret = -EINVAL;
899 }
900 gru_unlock_gts(gts);
901
902 return ret;
903}
904