1
2
3
4
5
6
7#include <linux/dma-iommu.h>
8#include <linux/dma-map-ops.h>
9#include <linux/iommu.h>
10#include <linux/platform_device.h>
11
12#include <drm/drm_print.h>
13#include <drm/exynos_drm.h>
14
15#include "exynos_drm_drv.h"
16
17#if defined(CONFIG_ARM_DMA_USE_IOMMU)
18#include <asm/dma-iommu.h>
19#else
20#define arm_iommu_create_mapping(...) ({ NULL; })
21#define arm_iommu_attach_device(...) ({ -ENODEV; })
22#define arm_iommu_release_mapping(...) ({ })
23#define arm_iommu_detach_device(...) ({ })
24#define to_dma_iommu_mapping(dev) NULL
25#endif
26
27#if !defined(CONFIG_IOMMU_DMA)
28#define iommu_dma_init_domain(...) ({ -EINVAL; })
29#endif
30
31#define EXYNOS_DEV_ADDR_START 0x20000000
32#define EXYNOS_DEV_ADDR_SIZE 0x40000000
33
34
35
36
37
38
39
40
41
42
43static int drm_iommu_attach_device(struct drm_device *drm_dev,
44 struct device *subdrv_dev, void **dma_priv)
45{
46 struct exynos_drm_private *priv = drm_dev->dev_private;
47 int ret = 0;
48
49 if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
50 DRM_DEV_ERROR(subdrv_dev, "Device %s lacks support for IOMMU\n",
51 dev_name(subdrv_dev));
52 return -EINVAL;
53 }
54
55 dma_set_max_seg_size(subdrv_dev, DMA_BIT_MASK(32));
56 if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
57
58
59
60
61
62
63 *dma_priv = to_dma_iommu_mapping(subdrv_dev);
64 if (*dma_priv)
65 arm_iommu_detach_device(subdrv_dev);
66
67 ret = arm_iommu_attach_device(subdrv_dev, priv->mapping);
68 } else if (IS_ENABLED(CONFIG_IOMMU_DMA)) {
69 ret = iommu_attach_device(priv->mapping, subdrv_dev);
70 }
71
72 return ret;
73}
74
75
76
77
78
79
80
81
82
83
84static void drm_iommu_detach_device(struct drm_device *drm_dev,
85 struct device *subdrv_dev, void **dma_priv)
86{
87 struct exynos_drm_private *priv = drm_dev->dev_private;
88
89 if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
90 arm_iommu_detach_device(subdrv_dev);
91 arm_iommu_attach_device(subdrv_dev, *dma_priv);
92 } else if (IS_ENABLED(CONFIG_IOMMU_DMA))
93 iommu_detach_device(priv->mapping, subdrv_dev);
94}
95
96int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
97 void **dma_priv)
98{
99 struct exynos_drm_private *priv = drm->dev_private;
100
101 if (!priv->dma_dev) {
102 priv->dma_dev = dev;
103 DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n",
104 dev_name(dev));
105 }
106
107 if (!IS_ENABLED(CONFIG_EXYNOS_IOMMU))
108 return 0;
109
110 if (!priv->mapping) {
111 void *mapping;
112
113 if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
114 mapping = arm_iommu_create_mapping(&platform_bus_type,
115 EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE);
116 else if (IS_ENABLED(CONFIG_IOMMU_DMA))
117 mapping = iommu_get_domain_for_dev(priv->dma_dev);
118
119 if (IS_ERR(mapping))
120 return PTR_ERR(mapping);
121 priv->mapping = mapping;
122 }
123
124 return drm_iommu_attach_device(drm, dev, dma_priv);
125}
126
127void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev,
128 void **dma_priv)
129{
130 if (IS_ENABLED(CONFIG_EXYNOS_IOMMU))
131 drm_iommu_detach_device(drm, dev, dma_priv);
132}
133
134void exynos_drm_cleanup_dma(struct drm_device *drm)
135{
136 struct exynos_drm_private *priv = drm->dev_private;
137
138 if (!IS_ENABLED(CONFIG_EXYNOS_IOMMU))
139 return;
140
141 arm_iommu_release_mapping(priv->mapping);
142 priv->mapping = NULL;
143 priv->dma_dev = NULL;
144}
145