1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/mutex.h>
24#include <linux/log2.h>
25#include <linux/sched.h>
26#include <linux/sched/mm.h>
27#include <linux/sched/task.h>
28#include <linux/slab.h>
29#include <linux/amd-iommu.h>
30#include <linux/notifier.h>
31#include <linux/compat.h>
32#include <linux/mman.h>
33#include <linux/file.h>
34#include "amdgpu_amdkfd.h"
35
36struct mm_struct;
37
38#include "kfd_priv.h"
39#include "kfd_device_queue_manager.h"
40#include "kfd_dbgmgr.h"
41#include "kfd_iommu.h"
42
43
44
45
46
47DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
48static DEFINE_MUTEX(kfd_processes_mutex);
49
50DEFINE_SRCU(kfd_processes_srcu);
51
52
53static struct workqueue_struct *kfd_process_wq;
54
55
56
57
58
59
60
61static struct workqueue_struct *kfd_restore_wq;
62
63static struct kfd_process *find_process(const struct task_struct *thread);
64static void kfd_process_ref_release(struct kref *ref);
65static struct kfd_process *create_process(const struct task_struct *thread,
66 struct file *filep);
67
68static void evict_process_worker(struct work_struct *work);
69static void restore_process_worker(struct work_struct *work);
70
71
72int kfd_process_create_wq(void)
73{
74 if (!kfd_process_wq)
75 kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
76 if (!kfd_restore_wq)
77 kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0);
78
79 if (!kfd_process_wq || !kfd_restore_wq) {
80 kfd_process_destroy_wq();
81 return -ENOMEM;
82 }
83
84 return 0;
85}
86
87void kfd_process_destroy_wq(void)
88{
89 if (kfd_process_wq) {
90 destroy_workqueue(kfd_process_wq);
91 kfd_process_wq = NULL;
92 }
93 if (kfd_restore_wq) {
94 destroy_workqueue(kfd_restore_wq);
95 kfd_restore_wq = NULL;
96 }
97}
98
99static void kfd_process_free_gpuvm(struct kgd_mem *mem,
100 struct kfd_process_device *pdd)
101{
102 struct kfd_dev *dev = pdd->dev;
103
104 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->vm);
105 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem);
106}
107
108
109
110
111
112
113
114static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
115 uint64_t gpu_va, uint32_t size,
116 uint32_t flags, void **kptr)
117{
118 struct kfd_dev *kdev = pdd->dev;
119 struct kgd_mem *mem = NULL;
120 int handle;
121 int err;
122
123 err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
124 pdd->vm, &mem, NULL, flags);
125 if (err)
126 goto err_alloc_mem;
127
128 err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->vm);
129 if (err)
130 goto err_map_mem;
131
132 err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->kgd, mem, true);
133 if (err) {
134 pr_debug("Sync memory failed, wait interrupted by user signal\n");
135 goto sync_memory_failed;
136 }
137
138
139
140
141
142
143 handle = kfd_process_device_create_obj_handle(pdd, mem);
144
145 if (handle < 0) {
146 err = handle;
147 goto free_gpuvm;
148 }
149
150 if (kptr) {
151 err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->kgd,
152 (struct kgd_mem *)mem, kptr, NULL);
153 if (err) {
154 pr_debug("Map GTT BO to kernel failed\n");
155 goto free_obj_handle;
156 }
157 }
158
159 return err;
160
161free_obj_handle:
162 kfd_process_device_remove_obj_handle(pdd, handle);
163free_gpuvm:
164sync_memory_failed:
165 kfd_process_free_gpuvm(mem, pdd);
166 return err;
167
168err_map_mem:
169 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem);
170err_alloc_mem:
171 *kptr = NULL;
172 return err;
173}
174
175
176
177
178
179
180
181static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
182{
183 struct qcm_process_device *qpd = &pdd->qpd;
184 uint32_t flags = ALLOC_MEM_FLAGS_GTT |
185 ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
186 ALLOC_MEM_FLAGS_WRITABLE |
187 ALLOC_MEM_FLAGS_EXECUTABLE;
188 void *kaddr;
189 int ret;
190
191 if (qpd->ib_kaddr || !qpd->ib_base)
192 return 0;
193
194
195 ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags,
196 &kaddr);
197 if (ret)
198 return ret;
199
200 qpd->ib_kaddr = kaddr;
201
202 return 0;
203}
204
205struct kfd_process *kfd_create_process(struct file *filep)
206{
207 struct kfd_process *process;
208 struct task_struct *thread = current;
209
210 if (!thread->mm)
211 return ERR_PTR(-EINVAL);
212
213
214 if (thread->group_leader->mm != thread->mm)
215 return ERR_PTR(-EINVAL);
216
217
218
219
220
221
222 mutex_lock(&kfd_processes_mutex);
223
224
225 process = find_process(thread);
226 if (process)
227 pr_debug("Process already found\n");
228 else
229 process = create_process(thread, filep);
230
231 mutex_unlock(&kfd_processes_mutex);
232
233 return process;
234}
235
236struct kfd_process *kfd_get_process(const struct task_struct *thread)
237{
238 struct kfd_process *process;
239
240 if (!thread->mm)
241 return ERR_PTR(-EINVAL);
242
243
244 if (thread->group_leader->mm != thread->mm)
245 return ERR_PTR(-EINVAL);
246
247 process = find_process(thread);
248 if (!process)
249 return ERR_PTR(-EINVAL);
250
251 return process;
252}
253
254static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
255{
256 struct kfd_process *process;
257
258 hash_for_each_possible_rcu(kfd_processes_table, process,
259 kfd_processes, (uintptr_t)mm)
260 if (process->mm == mm)
261 return process;
262
263 return NULL;
264}
265
266static struct kfd_process *find_process(const struct task_struct *thread)
267{
268 struct kfd_process *p;
269 int idx;
270
271 idx = srcu_read_lock(&kfd_processes_srcu);
272 p = find_process_by_mm(thread->mm);
273 srcu_read_unlock(&kfd_processes_srcu, idx);
274
275 return p;
276}
277
278void kfd_unref_process(struct kfd_process *p)
279{
280 kref_put(&p->ref, kfd_process_ref_release);
281}
282
283static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
284{
285 struct kfd_process *p = pdd->process;
286 void *mem;
287 int id;
288
289
290
291
292
293 idr_for_each_entry(&pdd->alloc_idr, mem, id) {
294 struct kfd_process_device *peer_pdd;
295
296 list_for_each_entry(peer_pdd, &p->per_device_data,
297 per_device_list) {
298 if (!peer_pdd->vm)
299 continue;
300 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
301 peer_pdd->dev->kgd, mem, peer_pdd->vm);
302 }
303
304 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem);
305 kfd_process_device_remove_obj_handle(pdd, id);
306 }
307}
308
309static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
310{
311 struct kfd_process_device *pdd;
312
313 list_for_each_entry(pdd, &p->per_device_data, per_device_list)
314 kfd_process_device_free_bos(pdd);
315}
316
317static void kfd_process_destroy_pdds(struct kfd_process *p)
318{
319 struct kfd_process_device *pdd, *temp;
320
321 list_for_each_entry_safe(pdd, temp, &p->per_device_data,
322 per_device_list) {
323 pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n",
324 pdd->dev->id, p->pasid);
325
326 if (pdd->drm_file) {
327 amdgpu_amdkfd_gpuvm_release_process_vm(
328 pdd->dev->kgd, pdd->vm);
329 fput(pdd->drm_file);
330 }
331 else if (pdd->vm)
332 amdgpu_amdkfd_gpuvm_destroy_process_vm(
333 pdd->dev->kgd, pdd->vm);
334
335 list_del(&pdd->per_device_list);
336
337 if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
338 free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
339 get_order(KFD_CWSR_TBA_TMA_SIZE));
340
341 kfree(pdd->qpd.doorbell_bitmap);
342 idr_destroy(&pdd->alloc_idr);
343
344 kfree(pdd);
345 }
346}
347
348
349
350
351
352
353static void kfd_process_wq_release(struct work_struct *work)
354{
355 struct kfd_process *p = container_of(work, struct kfd_process,
356 release_work);
357
358 kfd_iommu_unbind_process(p);
359
360 kfd_process_free_outstanding_kfd_bos(p);
361
362 kfd_process_destroy_pdds(p);
363 dma_fence_put(p->ef);
364
365 kfd_event_free_process(p);
366
367 kfd_pasid_free(p->pasid);
368 kfd_free_process_doorbells(p);
369
370 mutex_destroy(&p->mutex);
371
372 put_task_struct(p->lead_thread);
373
374 kfree(p);
375}
376
377static void kfd_process_ref_release(struct kref *ref)
378{
379 struct kfd_process *p = container_of(ref, struct kfd_process, ref);
380
381 INIT_WORK(&p->release_work, kfd_process_wq_release);
382 queue_work(kfd_process_wq, &p->release_work);
383}
384
385static void kfd_process_destroy_delayed(struct rcu_head *rcu)
386{
387 struct kfd_process *p = container_of(rcu, struct kfd_process, rcu);
388
389 kfd_unref_process(p);
390}
391
392static void kfd_process_notifier_release(struct mmu_notifier *mn,
393 struct mm_struct *mm)
394{
395 struct kfd_process *p;
396 struct kfd_process_device *pdd = NULL;
397
398
399
400
401
402 p = container_of(mn, struct kfd_process, mmu_notifier);
403 if (WARN_ON(p->mm != mm))
404 return;
405
406 mutex_lock(&kfd_processes_mutex);
407 hash_del_rcu(&p->kfd_processes);
408 mutex_unlock(&kfd_processes_mutex);
409 synchronize_srcu(&kfd_processes_srcu);
410
411 cancel_delayed_work_sync(&p->eviction_work);
412 cancel_delayed_work_sync(&p->restore_work);
413
414 mutex_lock(&p->mutex);
415
416
417
418
419
420 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
421 struct kfd_dev *dev = pdd->dev;
422
423 mutex_lock(kfd_get_dbgmgr_mutex());
424 if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
425 if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
426 kfd_dbgmgr_destroy(dev->dbgmgr);
427 dev->dbgmgr = NULL;
428 }
429 }
430 mutex_unlock(kfd_get_dbgmgr_mutex());
431 }
432
433 kfd_process_dequeue_from_all_devices(p);
434 pqm_uninit(&p->pqm);
435
436
437 p->mm = NULL;
438
439 mutex_unlock(&p->mutex);
440
441 mmu_notifier_unregister_no_release(&p->mmu_notifier, mm);
442 mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed);
443}
444
445static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
446 .release = kfd_process_notifier_release,
447};
448
449static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
450{
451 unsigned long offset;
452 struct kfd_process_device *pdd;
453
454 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
455 struct kfd_dev *dev = pdd->dev;
456 struct qcm_process_device *qpd = &pdd->qpd;
457
458 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
459 continue;
460
461 offset = (KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id))
462 << PAGE_SHIFT;
463 qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
464 KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
465 MAP_SHARED, offset);
466
467 if (IS_ERR_VALUE(qpd->tba_addr)) {
468 int err = qpd->tba_addr;
469
470 pr_err("Failure to set tba address. error %d.\n", err);
471 qpd->tba_addr = 0;
472 qpd->cwsr_kaddr = NULL;
473 return err;
474 }
475
476 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
477
478 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
479 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
480 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
481 }
482
483 return 0;
484}
485
486static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
487{
488 struct kfd_dev *dev = pdd->dev;
489 struct qcm_process_device *qpd = &pdd->qpd;
490 uint32_t flags = ALLOC_MEM_FLAGS_GTT |
491 ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_EXECUTABLE;
492 void *kaddr;
493 int ret;
494
495 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
496 return 0;
497
498
499 ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base,
500 KFD_CWSR_TBA_TMA_SIZE, flags, &kaddr);
501 if (ret)
502 return ret;
503
504 qpd->cwsr_kaddr = kaddr;
505 qpd->tba_addr = qpd->cwsr_base;
506
507 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
508
509 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
510 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
511 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
512
513 return 0;
514}
515
516static struct kfd_process *create_process(const struct task_struct *thread,
517 struct file *filep)
518{
519 struct kfd_process *process;
520 int err = -ENOMEM;
521
522 process = kzalloc(sizeof(*process), GFP_KERNEL);
523
524 if (!process)
525 goto err_alloc_process;
526
527 process->pasid = kfd_pasid_alloc();
528 if (process->pasid == 0)
529 goto err_alloc_pasid;
530
531 if (kfd_alloc_process_doorbells(process) < 0)
532 goto err_alloc_doorbells;
533
534 kref_init(&process->ref);
535
536 mutex_init(&process->mutex);
537
538 process->mm = thread->mm;
539
540
541 process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
542 err = mmu_notifier_register(&process->mmu_notifier, process->mm);
543 if (err)
544 goto err_mmu_notifier;
545
546 hash_add_rcu(kfd_processes_table, &process->kfd_processes,
547 (uintptr_t)process->mm);
548
549 process->lead_thread = thread->group_leader;
550 get_task_struct(process->lead_thread);
551
552 INIT_LIST_HEAD(&process->per_device_data);
553
554 kfd_event_init_process(process);
555
556 err = pqm_init(&process->pqm, process);
557 if (err != 0)
558 goto err_process_pqm_init;
559
560
561 process->is_32bit_user_mode = in_compat_syscall();
562 err = kfd_init_apertures(process);
563 if (err != 0)
564 goto err_init_apertures;
565
566 INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
567 INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
568 process->last_restore_timestamp = get_jiffies_64();
569
570 err = kfd_process_init_cwsr_apu(process, filep);
571 if (err)
572 goto err_init_cwsr;
573
574 return process;
575
576err_init_cwsr:
577 kfd_process_free_outstanding_kfd_bos(process);
578 kfd_process_destroy_pdds(process);
579err_init_apertures:
580 pqm_uninit(&process->pqm);
581err_process_pqm_init:
582 hash_del_rcu(&process->kfd_processes);
583 synchronize_rcu();
584 mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
585err_mmu_notifier:
586 mutex_destroy(&process->mutex);
587 kfd_free_process_doorbells(process);
588err_alloc_doorbells:
589 kfd_pasid_free(process->pasid);
590err_alloc_pasid:
591 kfree(process);
592err_alloc_process:
593 return ERR_PTR(err);
594}
595
596static int init_doorbell_bitmap(struct qcm_process_device *qpd,
597 struct kfd_dev *dev)
598{
599 unsigned int i;
600
601 if (!KFD_IS_SOC15(dev->device_info->asic_family))
602 return 0;
603
604 qpd->doorbell_bitmap =
605 kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
606 BITS_PER_BYTE), GFP_KERNEL);
607 if (!qpd->doorbell_bitmap)
608 return -ENOMEM;
609
610
611 for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS; i++)
612 if ((dev->shared_resources.reserved_doorbell_mask & i) ==
613 dev->shared_resources.reserved_doorbell_val) {
614 set_bit(i, qpd->doorbell_bitmap);
615 pr_debug("reserved doorbell 0x%03x\n", i);
616 }
617
618 return 0;
619}
620
621struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
622 struct kfd_process *p)
623{
624 struct kfd_process_device *pdd = NULL;
625
626 list_for_each_entry(pdd, &p->per_device_data, per_device_list)
627 if (pdd->dev == dev)
628 return pdd;
629
630 return NULL;
631}
632
633struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
634 struct kfd_process *p)
635{
636 struct kfd_process_device *pdd = NULL;
637
638 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
639 if (!pdd)
640 return NULL;
641
642 if (init_doorbell_bitmap(&pdd->qpd, dev)) {
643 pr_err("Failed to init doorbell for process\n");
644 kfree(pdd);
645 return NULL;
646 }
647
648 pdd->dev = dev;
649 INIT_LIST_HEAD(&pdd->qpd.queues_list);
650 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
651 pdd->qpd.dqm = dev->dqm;
652 pdd->qpd.pqm = &p->pqm;
653 pdd->qpd.evicted = 0;
654 pdd->process = p;
655 pdd->bound = PDD_UNBOUND;
656 pdd->already_dequeued = false;
657 list_add(&pdd->per_device_list, &p->per_device_data);
658
659
660 idr_init(&pdd->alloc_idr);
661
662 return pdd;
663}
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679int kfd_process_device_init_vm(struct kfd_process_device *pdd,
680 struct file *drm_file)
681{
682 struct kfd_process *p;
683 struct kfd_dev *dev;
684 int ret;
685
686 if (pdd->vm)
687 return drm_file ? -EBUSY : 0;
688
689 p = pdd->process;
690 dev = pdd->dev;
691
692 if (drm_file)
693 ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(
694 dev->kgd, drm_file, p->pasid,
695 &pdd->vm, &p->kgd_process_info, &p->ef);
696 else
697 ret = amdgpu_amdkfd_gpuvm_create_process_vm(dev->kgd, p->pasid,
698 &pdd->vm, &p->kgd_process_info, &p->ef);
699 if (ret) {
700 pr_err("Failed to create process VM object\n");
701 return ret;
702 }
703
704 ret = kfd_process_device_reserve_ib_mem(pdd);
705 if (ret)
706 goto err_reserve_ib_mem;
707 ret = kfd_process_device_init_cwsr_dgpu(pdd);
708 if (ret)
709 goto err_init_cwsr;
710
711 pdd->drm_file = drm_file;
712
713 return 0;
714
715err_init_cwsr:
716err_reserve_ib_mem:
717 kfd_process_device_free_bos(pdd);
718 if (!drm_file)
719 amdgpu_amdkfd_gpuvm_destroy_process_vm(dev->kgd, pdd->vm);
720 pdd->vm = NULL;
721
722 return ret;
723}
724
725
726
727
728
729
730
731
732struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
733 struct kfd_process *p)
734{
735 struct kfd_process_device *pdd;
736 int err;
737
738 pdd = kfd_get_process_device_data(dev, p);
739 if (!pdd) {
740 pr_err("Process device data doesn't exist\n");
741 return ERR_PTR(-ENOMEM);
742 }
743
744 err = kfd_iommu_bind_process_to_device(pdd);
745 if (err)
746 return ERR_PTR(err);
747
748 err = kfd_process_device_init_vm(pdd, NULL);
749 if (err)
750 return ERR_PTR(err);
751
752 return pdd;
753}
754
755struct kfd_process_device *kfd_get_first_process_device_data(
756 struct kfd_process *p)
757{
758 return list_first_entry(&p->per_device_data,
759 struct kfd_process_device,
760 per_device_list);
761}
762
763struct kfd_process_device *kfd_get_next_process_device_data(
764 struct kfd_process *p,
765 struct kfd_process_device *pdd)
766{
767 if (list_is_last(&pdd->per_device_list, &p->per_device_data))
768 return NULL;
769 return list_next_entry(pdd, per_device_list);
770}
771
772bool kfd_has_process_device_data(struct kfd_process *p)
773{
774 return !(list_empty(&p->per_device_data));
775}
776
777
778
779
780int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
781 void *mem)
782{
783 return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL);
784}
785
786
787
788
789void *kfd_process_device_translate_handle(struct kfd_process_device *pdd,
790 int handle)
791{
792 if (handle < 0)
793 return NULL;
794
795 return idr_find(&pdd->alloc_idr, handle);
796}
797
798
799
800
801void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
802 int handle)
803{
804 if (handle >= 0)
805 idr_remove(&pdd->alloc_idr, handle);
806}
807
808
809struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid)
810{
811 struct kfd_process *p, *ret_p = NULL;
812 unsigned int temp;
813
814 int idx = srcu_read_lock(&kfd_processes_srcu);
815
816 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
817 if (p->pasid == pasid) {
818 kref_get(&p->ref);
819 ret_p = p;
820 break;
821 }
822 }
823
824 srcu_read_unlock(&kfd_processes_srcu, idx);
825
826 return ret_p;
827}
828
829
830struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
831{
832 struct kfd_process *p;
833
834 int idx = srcu_read_lock(&kfd_processes_srcu);
835
836 p = find_process_by_mm(mm);
837 if (p)
838 kref_get(&p->ref);
839
840 srcu_read_unlock(&kfd_processes_srcu, idx);
841
842 return p;
843}
844
845
846
847
848
849
850int kfd_process_evict_queues(struct kfd_process *p)
851{
852 struct kfd_process_device *pdd;
853 int r = 0;
854 unsigned int n_evicted = 0;
855
856 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
857 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
858 &pdd->qpd);
859 if (r) {
860 pr_err("Failed to evict process queues\n");
861 goto fail;
862 }
863 n_evicted++;
864 }
865
866 return r;
867
868fail:
869
870
871
872 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
873 if (n_evicted == 0)
874 break;
875 if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
876 &pdd->qpd))
877 pr_err("Failed to restore queues\n");
878
879 n_evicted--;
880 }
881
882 return r;
883}
884
885
886int kfd_process_restore_queues(struct kfd_process *p)
887{
888 struct kfd_process_device *pdd;
889 int r, ret = 0;
890
891 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
892 r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
893 &pdd->qpd);
894 if (r) {
895 pr_err("Failed to restore process queues\n");
896 if (!ret)
897 ret = r;
898 }
899 }
900
901 return ret;
902}
903
904static void evict_process_worker(struct work_struct *work)
905{
906 int ret;
907 struct kfd_process *p;
908 struct delayed_work *dwork;
909
910 dwork = to_delayed_work(work);
911
912
913
914
915 p = container_of(dwork, struct kfd_process, eviction_work);
916 WARN_ONCE(p->last_eviction_seqno != p->ef->seqno,
917 "Eviction fence mismatch\n");
918
919
920
921
922
923
924
925 flush_delayed_work(&p->restore_work);
926
927 pr_debug("Started evicting pasid %d\n", p->pasid);
928 ret = kfd_process_evict_queues(p);
929 if (!ret) {
930 dma_fence_signal(p->ef);
931 dma_fence_put(p->ef);
932 p->ef = NULL;
933 queue_delayed_work(kfd_restore_wq, &p->restore_work,
934 msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
935
936 pr_debug("Finished evicting pasid %d\n", p->pasid);
937 } else
938 pr_err("Failed to evict queues of pasid %d\n", p->pasid);
939}
940
941static void restore_process_worker(struct work_struct *work)
942{
943 struct delayed_work *dwork;
944 struct kfd_process *p;
945 struct kfd_process_device *pdd;
946 int ret = 0;
947
948 dwork = to_delayed_work(work);
949
950
951
952
953 p = container_of(dwork, struct kfd_process, restore_work);
954
955
956
957
958
959
960 pdd = list_first_entry(&p->per_device_data,
961 struct kfd_process_device,
962 per_device_list);
963
964 pr_debug("Started restoring pasid %d\n", p->pasid);
965
966
967
968
969
970
971
972
973
974
975
976 p->last_restore_timestamp = get_jiffies_64();
977 ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
978 &p->ef);
979 if (ret) {
980 pr_debug("Failed to restore BOs of pasid %d, retry after %d ms\n",
981 p->pasid, PROCESS_BACK_OFF_TIME_MS);
982 ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
983 msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
984 WARN(!ret, "reschedule restore work failed\n");
985 return;
986 }
987
988 ret = kfd_process_restore_queues(p);
989 if (!ret)
990 pr_debug("Finished restoring pasid %d\n", p->pasid);
991 else
992 pr_err("Failed to restore queues of pasid %d\n", p->pasid);
993}
994
995void kfd_suspend_all_processes(void)
996{
997 struct kfd_process *p;
998 unsigned int temp;
999 int idx = srcu_read_lock(&kfd_processes_srcu);
1000
1001 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1002 cancel_delayed_work_sync(&p->eviction_work);
1003 cancel_delayed_work_sync(&p->restore_work);
1004
1005 if (kfd_process_evict_queues(p))
1006 pr_err("Failed to suspend process %d\n", p->pasid);
1007 dma_fence_signal(p->ef);
1008 dma_fence_put(p->ef);
1009 p->ef = NULL;
1010 }
1011 srcu_read_unlock(&kfd_processes_srcu, idx);
1012}
1013
1014int kfd_resume_all_processes(void)
1015{
1016 struct kfd_process *p;
1017 unsigned int temp;
1018 int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
1019
1020 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1021 if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) {
1022 pr_err("Restore process %d failed during resume\n",
1023 p->pasid);
1024 ret = -EFAULT;
1025 }
1026 }
1027 srcu_read_unlock(&kfd_processes_srcu, idx);
1028 return ret;
1029}
1030
1031int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
1032 struct vm_area_struct *vma)
1033{
1034 struct kfd_process_device *pdd;
1035 struct qcm_process_device *qpd;
1036
1037 if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
1038 pr_err("Incorrect CWSR mapping size.\n");
1039 return -EINVAL;
1040 }
1041
1042 pdd = kfd_get_process_device_data(dev, process);
1043 if (!pdd)
1044 return -EINVAL;
1045 qpd = &pdd->qpd;
1046
1047 qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1048 get_order(KFD_CWSR_TBA_TMA_SIZE));
1049 if (!qpd->cwsr_kaddr) {
1050 pr_err("Error allocating per process CWSR buffer.\n");
1051 return -ENOMEM;
1052 }
1053
1054 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
1055 | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
1056
1057 return remap_pfn_range(vma, vma->vm_start,
1058 PFN_DOWN(__pa(qpd->cwsr_kaddr)),
1059 KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
1060}
1061
1062void kfd_flush_tlb(struct kfd_process_device *pdd)
1063{
1064 struct kfd_dev *dev = pdd->dev;
1065 const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
1066
1067 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
1068
1069
1070
1071 if (pdd->qpd.vmid)
1072 f2g->invalidate_tlbs_vmid(dev->kgd, pdd->qpd.vmid);
1073 } else {
1074 f2g->invalidate_tlbs(dev->kgd, pdd->process->pasid);
1075 }
1076}
1077
1078#if defined(CONFIG_DEBUG_FS)
1079
1080int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
1081{
1082 struct kfd_process *p;
1083 unsigned int temp;
1084 int r = 0;
1085
1086 int idx = srcu_read_lock(&kfd_processes_srcu);
1087
1088 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1089 seq_printf(m, "Process %d PASID %d:\n",
1090 p->lead_thread->tgid, p->pasid);
1091
1092 mutex_lock(&p->mutex);
1093 r = pqm_debugfs_mqds(m, &p->pqm);
1094 mutex_unlock(&p->mutex);
1095
1096 if (r)
1097 break;
1098 }
1099
1100 srcu_read_unlock(&kfd_processes_srcu, idx);
1101
1102 return r;
1103}
1104
1105#endif
1106