1
2
3
4
5
6#include <linux/mm.h>
7#include <linux/mmu_context.h>
8#include <linux/mmu_notifier.h>
9#include <linux/slab.h>
10
11#include "arm-smmu-v3.h"
12#include "../../iommu-sva-lib.h"
13#include "../../io-pgtable-arm.h"
14
15struct arm_smmu_mmu_notifier {
16 struct mmu_notifier mn;
17 struct arm_smmu_ctx_desc *cd;
18 bool cleared;
19 refcount_t refs;
20 struct list_head list;
21 struct arm_smmu_domain *domain;
22};
23
24#define mn_to_smmu(mn) container_of(mn, struct arm_smmu_mmu_notifier, mn)
25
26struct arm_smmu_bond {
27 struct iommu_sva sva;
28 struct mm_struct *mm;
29 struct arm_smmu_mmu_notifier *smmu_mn;
30 struct list_head list;
31 refcount_t refs;
32};
33
34#define sva_to_bond(handle) \
35 container_of(handle, struct arm_smmu_bond, sva)
36
37static DEFINE_MUTEX(sva_lock);
38
39
40
41
42
43static struct arm_smmu_ctx_desc *
44arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
45{
46 int ret;
47 u32 new_asid;
48 struct arm_smmu_ctx_desc *cd;
49 struct arm_smmu_device *smmu;
50 struct arm_smmu_domain *smmu_domain;
51
52 cd = xa_load(&arm_smmu_asid_xa, asid);
53 if (!cd)
54 return NULL;
55
56 if (cd->mm) {
57 if (WARN_ON(cd->mm != mm))
58 return ERR_PTR(-EINVAL);
59
60 refcount_inc(&cd->refs);
61 return cd;
62 }
63
64 smmu_domain = container_of(cd, struct arm_smmu_domain, s1_cfg.cd);
65 smmu = smmu_domain->smmu;
66
67 ret = xa_alloc(&arm_smmu_asid_xa, &new_asid, cd,
68 XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
69 if (ret)
70 return ERR_PTR(-ENOSPC);
71
72
73
74
75
76 cd->asid = new_asid;
77
78
79
80
81
82 arm_smmu_write_ctx_desc(smmu_domain, 0, cd);
83
84
85 arm_smmu_tlb_inv_asid(smmu, asid);
86
87 xa_erase(&arm_smmu_asid_xa, asid);
88 return NULL;
89}
90
91static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
92{
93 u16 asid;
94 int err = 0;
95 u64 tcr, par, reg;
96 struct arm_smmu_ctx_desc *cd;
97 struct arm_smmu_ctx_desc *ret = NULL;
98
99 asid = arm64_mm_context_get(mm);
100 if (!asid)
101 return ERR_PTR(-ESRCH);
102
103 cd = kzalloc(sizeof(*cd), GFP_KERNEL);
104 if (!cd) {
105 err = -ENOMEM;
106 goto out_put_context;
107 }
108
109 refcount_set(&cd->refs, 1);
110
111 mutex_lock(&arm_smmu_asid_lock);
112 ret = arm_smmu_share_asid(mm, asid);
113 if (ret) {
114 mutex_unlock(&arm_smmu_asid_lock);
115 goto out_free_cd;
116 }
117
118 err = xa_insert(&arm_smmu_asid_xa, asid, cd, GFP_KERNEL);
119 mutex_unlock(&arm_smmu_asid_lock);
120
121 if (err)
122 goto out_free_asid;
123
124 tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, 64ULL - vabits_actual) |
125 FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, ARM_LPAE_TCR_RGN_WBWA) |
126 FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, ARM_LPAE_TCR_RGN_WBWA) |
127 FIELD_PREP(CTXDESC_CD_0_TCR_SH0, ARM_LPAE_TCR_SH_IS) |
128 CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64;
129
130 switch (PAGE_SIZE) {
131 case SZ_4K:
132 tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_4K);
133 break;
134 case SZ_16K:
135 tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_16K);
136 break;
137 case SZ_64K:
138 tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_64K);
139 break;
140 default:
141 WARN_ON(1);
142 err = -EINVAL;
143 goto out_free_asid;
144 }
145
146 reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
147 par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT);
148 tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par);
149
150 cd->ttbr = virt_to_phys(mm->pgd);
151 cd->tcr = tcr;
152
153
154
155
156 cd->mair = read_sysreg(mair_el1);
157 cd->asid = asid;
158 cd->mm = mm;
159
160 return cd;
161
162out_free_asid:
163 arm_smmu_free_asid(cd);
164out_free_cd:
165 kfree(cd);
166out_put_context:
167 arm64_mm_context_put(mm);
168 return err < 0 ? ERR_PTR(err) : ret;
169}
170
171static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
172{
173 if (arm_smmu_free_asid(cd)) {
174
175 arm64_mm_context_put(cd->mm);
176 kfree(cd);
177 }
178}
179
180static void arm_smmu_mm_invalidate_range(struct mmu_notifier *mn,
181 struct mm_struct *mm,
182 unsigned long start, unsigned long end)
183{
184 struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
185 struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
186 size_t size = end - start + 1;
187
188 if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM))
189 arm_smmu_tlb_inv_range_asid(start, size, smmu_mn->cd->asid,
190 PAGE_SIZE, false, smmu_domain);
191 arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, start, size);
192}
193
194static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
195{
196 struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
197 struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
198
199 mutex_lock(&sva_lock);
200 if (smmu_mn->cleared) {
201 mutex_unlock(&sva_lock);
202 return;
203 }
204
205
206
207
208
209 arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, &quiet_cd);
210
211 arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid);
212 arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0);
213
214 smmu_mn->cleared = true;
215 mutex_unlock(&sva_lock);
216}
217
218static void arm_smmu_mmu_notifier_free(struct mmu_notifier *mn)
219{
220 kfree(mn_to_smmu(mn));
221}
222
223static struct mmu_notifier_ops arm_smmu_mmu_notifier_ops = {
224 .invalidate_range = arm_smmu_mm_invalidate_range,
225 .release = arm_smmu_mm_release,
226 .free_notifier = arm_smmu_mmu_notifier_free,
227};
228
229
230static struct arm_smmu_mmu_notifier *
231arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
232 struct mm_struct *mm)
233{
234 int ret;
235 struct arm_smmu_ctx_desc *cd;
236 struct arm_smmu_mmu_notifier *smmu_mn;
237
238 list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) {
239 if (smmu_mn->mn.mm == mm) {
240 refcount_inc(&smmu_mn->refs);
241 return smmu_mn;
242 }
243 }
244
245 cd = arm_smmu_alloc_shared_cd(mm);
246 if (IS_ERR(cd))
247 return ERR_CAST(cd);
248
249 smmu_mn = kzalloc(sizeof(*smmu_mn), GFP_KERNEL);
250 if (!smmu_mn) {
251 ret = -ENOMEM;
252 goto err_free_cd;
253 }
254
255 refcount_set(&smmu_mn->refs, 1);
256 smmu_mn->cd = cd;
257 smmu_mn->domain = smmu_domain;
258 smmu_mn->mn.ops = &arm_smmu_mmu_notifier_ops;
259
260 ret = mmu_notifier_register(&smmu_mn->mn, mm);
261 if (ret) {
262 kfree(smmu_mn);
263 goto err_free_cd;
264 }
265
266 ret = arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, cd);
267 if (ret)
268 goto err_put_notifier;
269
270 list_add(&smmu_mn->list, &smmu_domain->mmu_notifiers);
271 return smmu_mn;
272
273err_put_notifier:
274
275 mmu_notifier_put(&smmu_mn->mn);
276err_free_cd:
277 arm_smmu_free_shared_cd(cd);
278 return ERR_PTR(ret);
279}
280
281static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
282{
283 struct mm_struct *mm = smmu_mn->mn.mm;
284 struct arm_smmu_ctx_desc *cd = smmu_mn->cd;
285 struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
286
287 if (!refcount_dec_and_test(&smmu_mn->refs))
288 return;
289
290 list_del(&smmu_mn->list);
291 arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, NULL);
292
293
294
295
296
297 if (!smmu_mn->cleared) {
298 arm_smmu_tlb_inv_asid(smmu_domain->smmu, cd->asid);
299 arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0);
300 }
301
302
303 mmu_notifier_put(&smmu_mn->mn);
304 arm_smmu_free_shared_cd(cd);
305}
306
307static struct iommu_sva *
308__arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
309{
310 int ret;
311 struct arm_smmu_bond *bond;
312 struct arm_smmu_master *master = dev_iommu_priv_get(dev);
313 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
314 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
315
316 if (!master || !master->sva_enabled)
317 return ERR_PTR(-ENODEV);
318
319
320 list_for_each_entry(bond, &master->bonds, list) {
321 if (bond->mm == mm) {
322 refcount_inc(&bond->refs);
323 return &bond->sva;
324 }
325 }
326
327 bond = kzalloc(sizeof(*bond), GFP_KERNEL);
328 if (!bond)
329 return ERR_PTR(-ENOMEM);
330
331
332 ret = iommu_sva_alloc_pasid(mm, 1, (1U << master->ssid_bits) - 1);
333 if (ret)
334 goto err_free_bond;
335
336 bond->mm = mm;
337 bond->sva.dev = dev;
338 refcount_set(&bond->refs, 1);
339
340 bond->smmu_mn = arm_smmu_mmu_notifier_get(smmu_domain, mm);
341 if (IS_ERR(bond->smmu_mn)) {
342 ret = PTR_ERR(bond->smmu_mn);
343 goto err_free_pasid;
344 }
345
346 list_add(&bond->list, &master->bonds);
347 return &bond->sva;
348
349err_free_pasid:
350 iommu_sva_free_pasid(mm);
351err_free_bond:
352 kfree(bond);
353 return ERR_PTR(ret);
354}
355
356struct iommu_sva *
357arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
358{
359 struct iommu_sva *handle;
360 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
361 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
362
363 if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1)
364 return ERR_PTR(-EINVAL);
365
366 mutex_lock(&sva_lock);
367 handle = __arm_smmu_sva_bind(dev, mm);
368 mutex_unlock(&sva_lock);
369 return handle;
370}
371
372void arm_smmu_sva_unbind(struct iommu_sva *handle)
373{
374 struct arm_smmu_bond *bond = sva_to_bond(handle);
375
376 mutex_lock(&sva_lock);
377 if (refcount_dec_and_test(&bond->refs)) {
378 list_del(&bond->list);
379 arm_smmu_mmu_notifier_put(bond->smmu_mn);
380 iommu_sva_free_pasid(bond->mm);
381 kfree(bond);
382 }
383 mutex_unlock(&sva_lock);
384}
385
386u32 arm_smmu_sva_get_pasid(struct iommu_sva *handle)
387{
388 struct arm_smmu_bond *bond = sva_to_bond(handle);
389
390 return bond->mm->pasid;
391}
392
393bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
394{
395 unsigned long reg, fld;
396 unsigned long oas;
397 unsigned long asid_bits;
398 u32 feat_mask = ARM_SMMU_FEAT_COHERENCY;
399
400 if (vabits_actual == 52)
401 feat_mask |= ARM_SMMU_FEAT_VAX;
402
403 if ((smmu->features & feat_mask) != feat_mask)
404 return false;
405
406 if (!(smmu->pgsize_bitmap & PAGE_SIZE))
407 return false;
408
409
410
411
412
413
414 reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
415 fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT);
416 oas = id_aa64mmfr0_parange_to_phys_shift(fld);
417 if (smmu->oas < oas)
418 return false;
419
420
421 fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_ASID_SHIFT);
422 asid_bits = fld ? 16 : 8;
423 if (smmu->asid_bits < asid_bits)
424 return false;
425
426
427
428
429
430 if (arm64_kernel_unmapped_at_el0())
431 asid_bits--;
432 dev_dbg(smmu->dev, "%d shared contexts\n", (1 << asid_bits) -
433 num_possible_cpus() - 2);
434
435 return true;
436}
437
438bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master)
439{
440
441 if (master->num_streams != 1)
442 return false;
443
444 return master->stall_enabled;
445}
446
447bool arm_smmu_master_sva_supported(struct arm_smmu_master *master)
448{
449 if (!(master->smmu->features & ARM_SMMU_FEAT_SVA))
450 return false;
451
452
453 return master->ssid_bits;
454}
455
456bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
457{
458 bool enabled;
459
460 mutex_lock(&sva_lock);
461 enabled = master->sva_enabled;
462 mutex_unlock(&sva_lock);
463 return enabled;
464}
465
466static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master)
467{
468 int ret;
469 struct device *dev = master->dev;
470
471
472
473
474
475 if (!arm_smmu_master_iopf_supported(master))
476 return 0;
477
478 if (!master->iopf_enabled)
479 return -EINVAL;
480
481 ret = iopf_queue_add_device(master->smmu->evtq.iopf, dev);
482 if (ret)
483 return ret;
484
485 ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
486 if (ret) {
487 iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
488 return ret;
489 }
490 return 0;
491}
492
493static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master)
494{
495 struct device *dev = master->dev;
496
497 if (!master->iopf_enabled)
498 return;
499
500 iommu_unregister_device_fault_handler(dev);
501 iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
502}
503
504int arm_smmu_master_enable_sva(struct arm_smmu_master *master)
505{
506 int ret;
507
508 mutex_lock(&sva_lock);
509 ret = arm_smmu_master_sva_enable_iopf(master);
510 if (!ret)
511 master->sva_enabled = true;
512 mutex_unlock(&sva_lock);
513
514 return ret;
515}
516
517int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
518{
519 mutex_lock(&sva_lock);
520 if (!list_empty(&master->bonds)) {
521 dev_err(master->dev, "cannot disable SVA, device is bound\n");
522 mutex_unlock(&sva_lock);
523 return -EBUSY;
524 }
525 arm_smmu_master_sva_disable_iopf(master);
526 master->sva_enabled = false;
527 mutex_unlock(&sva_lock);
528
529 return 0;
530}
531
532void arm_smmu_sva_notifier_synchronize(void)
533{
534
535
536
537
538 mmu_notifier_synchronize();
539}
540