1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "msm_drv.h"
19#include "msm_mmu.h"
20
21struct msm_iommu {
22 struct msm_mmu base;
23 struct iommu_domain *domain;
24};
25#define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
26
27static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
28 unsigned long iova, int flags, void *arg)
29{
30 pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags);
31 return 0;
32}
33
34static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
35{
36 struct msm_iommu *iommu = to_msm_iommu(mmu);
37 return iommu_attach_device(iommu->domain, mmu->dev);
38}
39
40static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt)
41{
42 struct msm_iommu *iommu = to_msm_iommu(mmu);
43 iommu_detach_device(iommu->domain, mmu->dev);
44}
45
46static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
47 struct sg_table *sgt, unsigned len, int prot)
48{
49 struct msm_iommu *iommu = to_msm_iommu(mmu);
50 struct iommu_domain *domain = iommu->domain;
51 struct scatterlist *sg;
52 unsigned int da = iova;
53 unsigned int i, j;
54 int ret;
55
56 if (!domain || !sgt)
57 return -EINVAL;
58
59 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
60 u32 pa = sg_phys(sg) - sg->offset;
61 size_t bytes = sg->length + sg->offset;
62
63 VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
64
65 ret = iommu_map(domain, da, pa, bytes, prot);
66 if (ret)
67 goto fail;
68
69 da += bytes;
70 }
71
72 return 0;
73
74fail:
75 da = iova;
76
77 for_each_sg(sgt->sgl, sg, i, j) {
78 size_t bytes = sg->length + sg->offset;
79 iommu_unmap(domain, da, bytes);
80 da += bytes;
81 }
82 return ret;
83}
84
85static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
86 struct sg_table *sgt, unsigned len)
87{
88 struct msm_iommu *iommu = to_msm_iommu(mmu);
89 struct iommu_domain *domain = iommu->domain;
90 struct scatterlist *sg;
91 unsigned int da = iova;
92 int i;
93
94 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
95 size_t bytes = sg->length + sg->offset;
96 size_t unmapped;
97
98 unmapped = iommu_unmap(domain, da, bytes);
99 if (unmapped < bytes)
100 return unmapped;
101
102 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
103
104 BUG_ON(!PAGE_ALIGNED(bytes));
105
106 da += bytes;
107 }
108
109 return 0;
110}
111
112static void msm_iommu_destroy(struct msm_mmu *mmu)
113{
114 struct msm_iommu *iommu = to_msm_iommu(mmu);
115 iommu_domain_free(iommu->domain);
116 kfree(iommu);
117}
118
119static const struct msm_mmu_funcs funcs = {
120 .attach = msm_iommu_attach,
121 .detach = msm_iommu_detach,
122 .map = msm_iommu_map,
123 .unmap = msm_iommu_unmap,
124 .destroy = msm_iommu_destroy,
125};
126
127struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
128{
129 struct msm_iommu *iommu;
130
131 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
132 if (!iommu)
133 return ERR_PTR(-ENOMEM);
134
135 iommu->domain = domain;
136 msm_mmu_init(&iommu->base, dev, &funcs);
137 iommu_set_fault_handler(domain, msm_fault_handler, dev);
138
139 return &iommu->base;
140}
141