1
2
3
4
5
6
7
8#include "habanalabs.h"
9#include "../include/hw_ip/mmu/mmu_general.h"
10
11#include <linux/genalloc.h>
12#include <linux/slab.h>
13
14static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr);
15
16static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 hop_addr)
17{
18 struct pgt_info *pgt_info = NULL;
19
20 hash_for_each_possible(ctx->mmu_shadow_hash, pgt_info, node,
21 (unsigned long) hop_addr)
22 if (hop_addr == pgt_info->shadow_addr)
23 break;
24
25 return pgt_info;
26}
27
28static void _free_hop(struct hl_ctx *ctx, struct pgt_info *pgt_info)
29{
30 struct hl_device *hdev = ctx->hdev;
31
32 gen_pool_free(hdev->mmu_pgt_pool, pgt_info->phys_addr,
33 hdev->asic_prop.mmu_hop_table_size);
34 hash_del(&pgt_info->node);
35 kfree((u64 *) (uintptr_t) pgt_info->shadow_addr);
36 kfree(pgt_info);
37}
38
39static void free_hop(struct hl_ctx *ctx, u64 hop_addr)
40{
41 struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
42
43 _free_hop(ctx, pgt_info);
44}
45
46static u64 alloc_hop(struct hl_ctx *ctx)
47{
48 struct hl_device *hdev = ctx->hdev;
49 struct asic_fixed_properties *prop = &hdev->asic_prop;
50 struct pgt_info *pgt_info;
51 u64 phys_addr, shadow_addr;
52
53 pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL);
54 if (!pgt_info)
55 return ULLONG_MAX;
56
57 phys_addr = (u64) gen_pool_alloc(hdev->mmu_pgt_pool,
58 prop->mmu_hop_table_size);
59 if (!phys_addr) {
60 dev_err(hdev->dev, "failed to allocate page\n");
61 goto pool_add_err;
62 }
63
64 shadow_addr = (u64) (uintptr_t) kzalloc(prop->mmu_hop_table_size,
65 GFP_KERNEL);
66 if (!shadow_addr)
67 goto shadow_err;
68
69 pgt_info->phys_addr = phys_addr;
70 pgt_info->shadow_addr = shadow_addr;
71 pgt_info->ctx = ctx;
72 pgt_info->num_of_ptes = 0;
73 hash_add(ctx->mmu_shadow_hash, &pgt_info->node, shadow_addr);
74
75 return shadow_addr;
76
77shadow_err:
78 gen_pool_free(hdev->mmu_pgt_pool, phys_addr, prop->mmu_hop_table_size);
79pool_add_err:
80 kfree(pgt_info);
81
82 return ULLONG_MAX;
83}
84
85static inline u64 get_phys_hop0_addr(struct hl_ctx *ctx)
86{
87 return ctx->hdev->asic_prop.mmu_pgt_addr +
88 (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
89}
90
91static inline u64 get_hop0_addr(struct hl_ctx *ctx)
92{
93 return (u64) (uintptr_t) ctx->hdev->mmu_shadow_hop0 +
94 (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
95}
96
97static inline void flush(struct hl_ctx *ctx)
98{
99
100 mb();
101 ctx->hdev->asic_funcs->read_pte(ctx->hdev, get_phys_hop0_addr(ctx));
102}
103
104
105static inline void write_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val)
106{
107
108
109
110
111
112
113
114 u64 phys_val = get_phys_addr(ctx, val & HOP_PHYS_ADDR_MASK) |
115 (val & FLAGS_MASK);
116
117 ctx->hdev->asic_funcs->write_pte(ctx->hdev,
118 get_phys_addr(ctx, shadow_pte_addr),
119 phys_val);
120
121 *(u64 *) (uintptr_t) shadow_pte_addr = val;
122}
123
124
125static inline void write_final_pte(struct hl_ctx *ctx, u64 shadow_pte_addr,
126 u64 val)
127{
128 ctx->hdev->asic_funcs->write_pte(ctx->hdev,
129 get_phys_addr(ctx, shadow_pte_addr),
130 val);
131 *(u64 *) (uintptr_t) shadow_pte_addr = val;
132}
133
134
135static inline void clear_pte(struct hl_ctx *ctx, u64 pte_addr)
136{
137
138 write_final_pte(ctx, pte_addr, 0);
139}
140
141static inline void get_pte(struct hl_ctx *ctx, u64 hop_addr)
142{
143 get_pgt_info(ctx, hop_addr)->num_of_ptes++;
144}
145
146
147
148
149
150
151
152
153
154
155static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr)
156{
157 struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
158 int num_of_ptes_left;
159
160 pgt_info->num_of_ptes--;
161
162
163
164
165
166 num_of_ptes_left = pgt_info->num_of_ptes;
167 if (!num_of_ptes_left)
168 _free_hop(ctx, pgt_info);
169
170 return num_of_ptes_left;
171}
172
173static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
174 u64 virt_addr, u64 mask, u64 shift)
175{
176 return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
177 ((virt_addr & mask) >> shift);
178}
179
180static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx,
181 struct hl_mmu_properties *mmu_prop,
182 u64 hop_addr, u64 vaddr)
183{
184 return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop0_mask,
185 mmu_prop->hop0_shift);
186}
187
188static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx,
189 struct hl_mmu_properties *mmu_prop,
190 u64 hop_addr, u64 vaddr)
191{
192 return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop1_mask,
193 mmu_prop->hop1_shift);
194}
195
196static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx,
197 struct hl_mmu_properties *mmu_prop,
198 u64 hop_addr, u64 vaddr)
199{
200 return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop2_mask,
201 mmu_prop->hop2_shift);
202}
203
204static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx,
205 struct hl_mmu_properties *mmu_prop,
206 u64 hop_addr, u64 vaddr)
207{
208 return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop3_mask,
209 mmu_prop->hop3_shift);
210}
211
212static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx,
213 struct hl_mmu_properties *mmu_prop,
214 u64 hop_addr, u64 vaddr)
215{
216 return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop4_mask,
217 mmu_prop->hop4_shift);
218}
219
220static inline u64 get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte)
221{
222 if (curr_pte & PAGE_PRESENT_MASK)
223 return curr_pte & HOP_PHYS_ADDR_MASK;
224 else
225 return ULLONG_MAX;
226}
227
228static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte,
229 bool *is_new_hop)
230{
231 u64 hop_addr = get_next_hop_addr(ctx, curr_pte);
232
233 if (hop_addr == ULLONG_MAX) {
234 hop_addr = alloc_hop(ctx);
235 *is_new_hop = (hop_addr != ULLONG_MAX);
236 }
237
238 return hop_addr;
239}
240
241
242static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr)
243{
244 u64 page_mask = (ctx->hdev->asic_prop.mmu_hop_table_size - 1);
245 u64 shadow_hop_addr = shadow_addr & ~page_mask;
246 u64 pte_offset = shadow_addr & page_mask;
247 u64 phys_hop_addr;
248
249 if (shadow_hop_addr != get_hop0_addr(ctx))
250 phys_hop_addr = get_pgt_info(ctx, shadow_hop_addr)->phys_addr;
251 else
252 phys_hop_addr = get_phys_hop0_addr(ctx);
253
254 return phys_hop_addr + pte_offset;
255}
256
257static bool is_dram_va(struct hl_device *hdev, u64 virt_addr)
258{
259 struct asic_fixed_properties *prop = &hdev->asic_prop;
260
261 return hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
262 prop->dmmu.start_addr,
263 prop->dmmu.end_addr);
264}
265
266static int dram_default_mapping_init(struct hl_ctx *ctx)
267{
268 struct hl_device *hdev = ctx->hdev;
269 struct asic_fixed_properties *prop = &hdev->asic_prop;
270 u64 num_of_hop3, total_hops, hop0_addr, hop1_addr, hop2_addr,
271 hop2_pte_addr, hop3_pte_addr, pte_val;
272 int rc, i, j, hop3_allocated = 0;
273
274 if ((!hdev->dram_supports_virtual_memory) ||
275 (!hdev->dram_default_page_mapping) ||
276 (ctx->asid == HL_KERNEL_ASID_ID))
277 return 0;
278
279 num_of_hop3 = prop->dram_size_for_default_page_mapping;
280 do_div(num_of_hop3, prop->dram_page_size);
281 do_div(num_of_hop3, PTE_ENTRIES_IN_HOP);
282
283
284 total_hops = num_of_hop3 + 2;
285
286 ctx->dram_default_hops = kzalloc(HL_PTE_SIZE * total_hops, GFP_KERNEL);
287 if (!ctx->dram_default_hops)
288 return -ENOMEM;
289
290 hop0_addr = get_hop0_addr(ctx);
291
292 hop1_addr = alloc_hop(ctx);
293 if (hop1_addr == ULLONG_MAX) {
294 dev_err(hdev->dev, "failed to alloc hop 1\n");
295 rc = -ENOMEM;
296 goto hop1_err;
297 }
298
299 ctx->dram_default_hops[total_hops - 1] = hop1_addr;
300
301 hop2_addr = alloc_hop(ctx);
302 if (hop2_addr == ULLONG_MAX) {
303 dev_err(hdev->dev, "failed to alloc hop 2\n");
304 rc = -ENOMEM;
305 goto hop2_err;
306 }
307
308 ctx->dram_default_hops[total_hops - 2] = hop2_addr;
309
310 for (i = 0 ; i < num_of_hop3 ; i++) {
311 ctx->dram_default_hops[i] = alloc_hop(ctx);
312 if (ctx->dram_default_hops[i] == ULLONG_MAX) {
313 dev_err(hdev->dev, "failed to alloc hop 3, i: %d\n", i);
314 rc = -ENOMEM;
315 goto hop3_err;
316 }
317 hop3_allocated++;
318 }
319
320
321 pte_val = (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
322 write_pte(ctx, hop0_addr, pte_val);
323
324 pte_val = (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
325 write_pte(ctx, hop1_addr, pte_val);
326 get_pte(ctx, hop1_addr);
327
328 hop2_pte_addr = hop2_addr;
329 for (i = 0 ; i < num_of_hop3 ; i++) {
330 pte_val = (ctx->dram_default_hops[i] & HOP_PHYS_ADDR_MASK) |
331 PAGE_PRESENT_MASK;
332 write_pte(ctx, hop2_pte_addr, pte_val);
333 get_pte(ctx, hop2_addr);
334 hop2_pte_addr += HL_PTE_SIZE;
335 }
336
337 pte_val = (prop->mmu_dram_default_page_addr & HOP_PHYS_ADDR_MASK) |
338 LAST_MASK | PAGE_PRESENT_MASK;
339
340 for (i = 0 ; i < num_of_hop3 ; i++) {
341 hop3_pte_addr = ctx->dram_default_hops[i];
342 for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) {
343 write_final_pte(ctx, hop3_pte_addr, pte_val);
344 get_pte(ctx, ctx->dram_default_hops[i]);
345 hop3_pte_addr += HL_PTE_SIZE;
346 }
347 }
348
349 flush(ctx);
350
351 return 0;
352
353hop3_err:
354 for (i = 0 ; i < hop3_allocated ; i++)
355 free_hop(ctx, ctx->dram_default_hops[i]);
356
357 free_hop(ctx, hop2_addr);
358hop2_err:
359 free_hop(ctx, hop1_addr);
360hop1_err:
361 kfree(ctx->dram_default_hops);
362
363 return rc;
364}
365
366static void dram_default_mapping_fini(struct hl_ctx *ctx)
367{
368 struct hl_device *hdev = ctx->hdev;
369 struct asic_fixed_properties *prop = &hdev->asic_prop;
370 u64 num_of_hop3, total_hops, hop0_addr, hop1_addr, hop2_addr,
371 hop2_pte_addr, hop3_pte_addr;
372 int i, j;
373
374 if ((!hdev->dram_supports_virtual_memory) ||
375 (!hdev->dram_default_page_mapping) ||
376 (ctx->asid == HL_KERNEL_ASID_ID))
377 return;
378
379 num_of_hop3 = prop->dram_size_for_default_page_mapping;
380 do_div(num_of_hop3, prop->dram_page_size);
381 do_div(num_of_hop3, PTE_ENTRIES_IN_HOP);
382
383 hop0_addr = get_hop0_addr(ctx);
384
385 total_hops = num_of_hop3 + 2;
386 hop1_addr = ctx->dram_default_hops[total_hops - 1];
387 hop2_addr = ctx->dram_default_hops[total_hops - 2];
388
389 for (i = 0 ; i < num_of_hop3 ; i++) {
390 hop3_pte_addr = ctx->dram_default_hops[i];
391 for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) {
392 clear_pte(ctx, hop3_pte_addr);
393 put_pte(ctx, ctx->dram_default_hops[i]);
394 hop3_pte_addr += HL_PTE_SIZE;
395 }
396 }
397
398 hop2_pte_addr = hop2_addr;
399 hop2_pte_addr = hop2_addr;
400 for (i = 0 ; i < num_of_hop3 ; i++) {
401 clear_pte(ctx, hop2_pte_addr);
402 put_pte(ctx, hop2_addr);
403 hop2_pte_addr += HL_PTE_SIZE;
404 }
405
406 clear_pte(ctx, hop1_addr);
407 put_pte(ctx, hop1_addr);
408 clear_pte(ctx, hop0_addr);
409
410 kfree(ctx->dram_default_hops);
411
412 flush(ctx);
413}
414
415
416
417
418
419
420
421
422
423
424
425int hl_mmu_init(struct hl_device *hdev)
426{
427 struct asic_fixed_properties *prop = &hdev->asic_prop;
428 int rc;
429
430 if (!hdev->mmu_enable)
431 return 0;
432
433 hdev->mmu_pgt_pool =
434 gen_pool_create(__ffs(prop->mmu_hop_table_size), -1);
435
436 if (!hdev->mmu_pgt_pool) {
437 dev_err(hdev->dev, "Failed to create page gen pool\n");
438 return -ENOMEM;
439 }
440
441 rc = gen_pool_add(hdev->mmu_pgt_pool, prop->mmu_pgt_addr +
442 prop->mmu_hop0_tables_total_size,
443 prop->mmu_pgt_size - prop->mmu_hop0_tables_total_size,
444 -1);
445 if (rc) {
446 dev_err(hdev->dev, "Failed to add memory to page gen pool\n");
447 goto err_pool_add;
448 }
449
450 hdev->mmu_shadow_hop0 = kvmalloc_array(prop->max_asid,
451 prop->mmu_hop_table_size,
452 GFP_KERNEL | __GFP_ZERO);
453 if (ZERO_OR_NULL_PTR(hdev->mmu_shadow_hop0)) {
454 rc = -ENOMEM;
455 goto err_pool_add;
456 }
457
458
459
460 return 0;
461
462err_pool_add:
463 gen_pool_destroy(hdev->mmu_pgt_pool);
464
465 return rc;
466}
467
468
469
470
471
472
473
474
475
476
477
478void hl_mmu_fini(struct hl_device *hdev)
479{
480 if (!hdev->mmu_enable)
481 return;
482
483
484
485 kvfree(hdev->mmu_shadow_hop0);
486 gen_pool_destroy(hdev->mmu_pgt_pool);
487}
488
489
490
491
492
493
494
495
496
497int hl_mmu_ctx_init(struct hl_ctx *ctx)
498{
499 struct hl_device *hdev = ctx->hdev;
500
501 if (!hdev->mmu_enable)
502 return 0;
503
504 mutex_init(&ctx->mmu_lock);
505 hash_init(ctx->mmu_shadow_hash);
506
507 return dram_default_mapping_init(ctx);
508}
509
510
511
512
513
514
515
516
517
518
519
520void hl_mmu_ctx_fini(struct hl_ctx *ctx)
521{
522 struct hl_device *hdev = ctx->hdev;
523 struct pgt_info *pgt_info;
524 struct hlist_node *tmp;
525 int i;
526
527 if (!hdev->mmu_enable)
528 return;
529
530 dram_default_mapping_fini(ctx);
531
532 if (!hash_empty(ctx->mmu_shadow_hash))
533 dev_err(hdev->dev, "ctx %d is freed while it has pgts in use\n",
534 ctx->asid);
535
536 hash_for_each_safe(ctx->mmu_shadow_hash, i, tmp, pgt_info, node) {
537 dev_err_ratelimited(hdev->dev,
538 "pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
539 pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes);
540 _free_hop(ctx, pgt_info);
541 }
542
543 mutex_destroy(&ctx->mmu_lock);
544}
545
546static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, bool is_dram_addr)
547{
548 struct hl_device *hdev = ctx->hdev;
549 struct asic_fixed_properties *prop = &hdev->asic_prop;
550 struct hl_mmu_properties *mmu_prop;
551 u64 hop0_addr = 0, hop0_pte_addr = 0,
552 hop1_addr = 0, hop1_pte_addr = 0,
553 hop2_addr = 0, hop2_pte_addr = 0,
554 hop3_addr = 0, hop3_pte_addr = 0,
555 hop4_addr = 0, hop4_pte_addr = 0,
556 curr_pte;
557 bool is_huge, clear_hop3 = true;
558
559
560 mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
561
562 hop0_addr = get_hop0_addr(ctx);
563 hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
564
565 curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr;
566
567 hop1_addr = get_next_hop_addr(ctx, curr_pte);
568
569 if (hop1_addr == ULLONG_MAX)
570 goto not_mapped;
571
572 hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
573
574 curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr;
575
576 hop2_addr = get_next_hop_addr(ctx, curr_pte);
577
578 if (hop2_addr == ULLONG_MAX)
579 goto not_mapped;
580
581 hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
582
583 curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr;
584
585 hop3_addr = get_next_hop_addr(ctx, curr_pte);
586
587 if (hop3_addr == ULLONG_MAX)
588 goto not_mapped;
589
590 hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
591
592 curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
593
594 is_huge = curr_pte & LAST_MASK;
595
596 if (is_dram_addr && !is_huge) {
597 dev_err(hdev->dev,
598 "DRAM unmapping should use huge pages only\n");
599 return -EFAULT;
600 }
601
602 if (!is_huge) {
603 hop4_addr = get_next_hop_addr(ctx, curr_pte);
604
605 if (hop4_addr == ULLONG_MAX)
606 goto not_mapped;
607
608 hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr,
609 virt_addr);
610
611 curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr;
612
613 clear_hop3 = false;
614 }
615
616 if (hdev->dram_default_page_mapping && is_dram_addr) {
617 u64 default_pte = (prop->mmu_dram_default_page_addr &
618 HOP_PHYS_ADDR_MASK) | LAST_MASK |
619 PAGE_PRESENT_MASK;
620 if (curr_pte == default_pte) {
621 dev_err(hdev->dev,
622 "DRAM: hop3 PTE points to zero page, can't unmap, va: 0x%llx\n",
623 virt_addr);
624 goto not_mapped;
625 }
626
627 if (!(curr_pte & PAGE_PRESENT_MASK)) {
628 dev_err(hdev->dev,
629 "DRAM: hop3 PTE is cleared! can't unmap, va: 0x%llx\n",
630 virt_addr);
631 goto not_mapped;
632 }
633
634 write_final_pte(ctx, hop3_pte_addr, default_pte);
635 put_pte(ctx, hop3_addr);
636 } else {
637 if (!(curr_pte & PAGE_PRESENT_MASK))
638 goto not_mapped;
639
640 if (hop4_addr)
641 clear_pte(ctx, hop4_pte_addr);
642 else
643 clear_pte(ctx, hop3_pte_addr);
644
645 if (hop4_addr && !put_pte(ctx, hop4_addr))
646 clear_hop3 = true;
647
648 if (!clear_hop3)
649 goto mapped;
650
651 clear_pte(ctx, hop3_pte_addr);
652
653 if (put_pte(ctx, hop3_addr))
654 goto mapped;
655
656 clear_pte(ctx, hop2_pte_addr);
657
658 if (put_pte(ctx, hop2_addr))
659 goto mapped;
660
661 clear_pte(ctx, hop1_pte_addr);
662
663 if (put_pte(ctx, hop1_addr))
664 goto mapped;
665
666 clear_pte(ctx, hop0_pte_addr);
667 }
668
669mapped:
670 return 0;
671
672not_mapped:
673 dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
674 virt_addr);
675
676 return -EINVAL;
677}
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
701 bool flush_pte)
702{
703 struct hl_device *hdev = ctx->hdev;
704 struct asic_fixed_properties *prop = &hdev->asic_prop;
705 struct hl_mmu_properties *mmu_prop;
706 u64 real_virt_addr;
707 u32 real_page_size, npages;
708 int i, rc = 0;
709 bool is_dram_addr;
710
711 if (!hdev->mmu_enable)
712 return 0;
713
714 is_dram_addr = is_dram_va(hdev, virt_addr);
715
716 if (is_dram_addr)
717 mmu_prop = &prop->dmmu;
718 else if ((page_size % prop->pmmu_huge.page_size) == 0)
719 mmu_prop = &prop->pmmu_huge;
720 else
721 mmu_prop = &prop->pmmu;
722
723
724
725
726
727 if ((page_size % mmu_prop->page_size) == 0) {
728 real_page_size = mmu_prop->page_size;
729 } else {
730 dev_err(hdev->dev,
731 "page size of %u is not %uKB aligned, can't unmap\n",
732 page_size, mmu_prop->page_size >> 10);
733
734 return -EFAULT;
735 }
736
737 npages = page_size / real_page_size;
738 real_virt_addr = virt_addr;
739
740 for (i = 0 ; i < npages ; i++) {
741 rc = _hl_mmu_unmap(ctx, real_virt_addr, is_dram_addr);
742 if (rc)
743 break;
744
745 real_virt_addr += real_page_size;
746 }
747
748 if (flush_pte)
749 flush(ctx);
750
751 return rc;
752}
753
754static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
755 u32 page_size, bool is_dram_addr)
756{
757 struct hl_device *hdev = ctx->hdev;
758 struct asic_fixed_properties *prop = &hdev->asic_prop;
759 struct hl_mmu_properties *mmu_prop;
760 u64 hop0_addr = 0, hop0_pte_addr = 0,
761 hop1_addr = 0, hop1_pte_addr = 0,
762 hop2_addr = 0, hop2_pte_addr = 0,
763 hop3_addr = 0, hop3_pte_addr = 0,
764 hop4_addr = 0, hop4_pte_addr = 0,
765 curr_pte = 0;
766 bool hop1_new = false, hop2_new = false, hop3_new = false,
767 hop4_new = false, is_huge;
768 int rc = -ENOMEM;
769
770
771
772
773
774
775
776
777 if (is_dram_addr) {
778 mmu_prop = &prop->dmmu;
779 is_huge = true;
780 } else if (page_size == prop->pmmu_huge.page_size) {
781 mmu_prop = &prop->pmmu_huge;
782 is_huge = true;
783 } else {
784 mmu_prop = &prop->pmmu;
785 is_huge = false;
786 }
787
788 hop0_addr = get_hop0_addr(ctx);
789 hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
790 curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr;
791
792 hop1_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop1_new);
793 if (hop1_addr == ULLONG_MAX)
794 goto err;
795
796 hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
797 curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr;
798
799 hop2_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop2_new);
800 if (hop2_addr == ULLONG_MAX)
801 goto err;
802
803 hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
804 curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr;
805
806 hop3_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop3_new);
807 if (hop3_addr == ULLONG_MAX)
808 goto err;
809
810 hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
811 curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
812
813 if (!is_huge) {
814 hop4_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop4_new);
815 if (hop4_addr == ULLONG_MAX)
816 goto err;
817
818 hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr,
819 virt_addr);
820 curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr;
821 }
822
823 if (hdev->dram_default_page_mapping && is_dram_addr) {
824 u64 default_pte = (prop->mmu_dram_default_page_addr &
825 HOP_PHYS_ADDR_MASK) | LAST_MASK |
826 PAGE_PRESENT_MASK;
827
828 if (curr_pte != default_pte) {
829 dev_err(hdev->dev,
830 "DRAM: mapping already exists for virt_addr 0x%llx\n",
831 virt_addr);
832 rc = -EINVAL;
833 goto err;
834 }
835
836 if (hop1_new || hop2_new || hop3_new || hop4_new) {
837 dev_err(hdev->dev,
838 "DRAM mapping should not allocate more hops\n");
839 rc = -EFAULT;
840 goto err;
841 }
842 } else if (curr_pte & PAGE_PRESENT_MASK) {
843 dev_err(hdev->dev,
844 "mapping already exists for virt_addr 0x%llx\n",
845 virt_addr);
846
847 dev_dbg(hdev->dev, "hop0 pte: 0x%llx (0x%llx)\n",
848 *(u64 *) (uintptr_t) hop0_pte_addr, hop0_pte_addr);
849 dev_dbg(hdev->dev, "hop1 pte: 0x%llx (0x%llx)\n",
850 *(u64 *) (uintptr_t) hop1_pte_addr, hop1_pte_addr);
851 dev_dbg(hdev->dev, "hop2 pte: 0x%llx (0x%llx)\n",
852 *(u64 *) (uintptr_t) hop2_pte_addr, hop2_pte_addr);
853 dev_dbg(hdev->dev, "hop3 pte: 0x%llx (0x%llx)\n",
854 *(u64 *) (uintptr_t) hop3_pte_addr, hop3_pte_addr);
855
856 if (!is_huge)
857 dev_dbg(hdev->dev, "hop4 pte: 0x%llx (0x%llx)\n",
858 *(u64 *) (uintptr_t) hop4_pte_addr,
859 hop4_pte_addr);
860
861 rc = -EINVAL;
862 goto err;
863 }
864
865 curr_pte = (phys_addr & HOP_PHYS_ADDR_MASK) | LAST_MASK
866 | PAGE_PRESENT_MASK;
867
868 if (is_huge)
869 write_final_pte(ctx, hop3_pte_addr, curr_pte);
870 else
871 write_final_pte(ctx, hop4_pte_addr, curr_pte);
872
873 if (hop1_new) {
874 curr_pte =
875 (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
876 write_pte(ctx, hop0_pte_addr, curr_pte);
877 }
878 if (hop2_new) {
879 curr_pte =
880 (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
881 write_pte(ctx, hop1_pte_addr, curr_pte);
882 get_pte(ctx, hop1_addr);
883 }
884 if (hop3_new) {
885 curr_pte =
886 (hop3_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
887 write_pte(ctx, hop2_pte_addr, curr_pte);
888 get_pte(ctx, hop2_addr);
889 }
890
891 if (!is_huge) {
892 if (hop4_new) {
893 curr_pte = (hop4_addr & HOP_PHYS_ADDR_MASK) |
894 PAGE_PRESENT_MASK;
895 write_pte(ctx, hop3_pte_addr, curr_pte);
896 get_pte(ctx, hop3_addr);
897 }
898
899 get_pte(ctx, hop4_addr);
900 } else {
901 get_pte(ctx, hop3_addr);
902 }
903
904 return 0;
905
906err:
907 if (hop4_new)
908 free_hop(ctx, hop4_addr);
909 if (hop3_new)
910 free_hop(ctx, hop3_addr);
911 if (hop2_new)
912 free_hop(ctx, hop2_addr);
913 if (hop1_new)
914 free_hop(ctx, hop1_addr);
915
916 return rc;
917}
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size,
942 bool flush_pte)
943{
944 struct hl_device *hdev = ctx->hdev;
945 struct asic_fixed_properties *prop = &hdev->asic_prop;
946 struct hl_mmu_properties *mmu_prop;
947 u64 real_virt_addr, real_phys_addr;
948 u32 real_page_size, npages;
949 int i, rc, mapped_cnt = 0;
950 bool is_dram_addr;
951
952 if (!hdev->mmu_enable)
953 return 0;
954
955 is_dram_addr = is_dram_va(hdev, virt_addr);
956
957 if (is_dram_addr)
958 mmu_prop = &prop->dmmu;
959 else if ((page_size % prop->pmmu_huge.page_size) == 0)
960 mmu_prop = &prop->pmmu_huge;
961 else
962 mmu_prop = &prop->pmmu;
963
964
965
966
967
968 if ((page_size % mmu_prop->page_size) == 0) {
969 real_page_size = mmu_prop->page_size;
970 } else {
971 dev_err(hdev->dev,
972 "page size of %u is not %uKB aligned, can't unmap\n",
973 page_size, mmu_prop->page_size >> 10);
974
975 return -EFAULT;
976 }
977
978 WARN_ONCE((phys_addr & (real_page_size - 1)),
979 "Mapping 0x%llx with page size of 0x%x is erroneous! Address must be divisible by page size",
980 phys_addr, real_page_size);
981
982 npages = page_size / real_page_size;
983 real_virt_addr = virt_addr;
984 real_phys_addr = phys_addr;
985
986 for (i = 0 ; i < npages ; i++) {
987 rc = _hl_mmu_map(ctx, real_virt_addr, real_phys_addr,
988 real_page_size, is_dram_addr);
989 if (rc)
990 goto err;
991
992 real_virt_addr += real_page_size;
993 real_phys_addr += real_page_size;
994 mapped_cnt++;
995 }
996
997 if (flush_pte)
998 flush(ctx);
999
1000 return 0;
1001
1002err:
1003 real_virt_addr = virt_addr;
1004 for (i = 0 ; i < mapped_cnt ; i++) {
1005 if (_hl_mmu_unmap(ctx, real_virt_addr, is_dram_addr))
1006 dev_warn_ratelimited(hdev->dev,
1007 "failed to unmap va: 0x%llx\n", real_virt_addr);
1008
1009 real_virt_addr += real_page_size;
1010 }
1011
1012 flush(ctx);
1013
1014 return rc;
1015}
1016
1017
1018
1019
1020
1021
1022
1023void hl_mmu_swap_out(struct hl_ctx *ctx)
1024{
1025
1026}
1027
1028
1029
1030
1031
1032
1033
1034void hl_mmu_swap_in(struct hl_ctx *ctx)
1035{
1036
1037}
1038