1
2
3
4
5
6
7#include <drm/drmP.h>
8#include <drm/exynos_drm.h>
9#include <linux/dma-iommu.h>
10#include <linux/dma-mapping.h>
11#include <linux/iommu.h>
12
13#include "exynos_drm_drv.h"
14
15#if defined(CONFIG_ARM_DMA_USE_IOMMU)
16#include <asm/dma-iommu.h>
17#else
18#define arm_iommu_create_mapping(...) ({ NULL; })
19#define arm_iommu_attach_device(...) ({ -ENODEV; })
20#define arm_iommu_release_mapping(...) ({ })
21#define arm_iommu_detach_device(...) ({ })
22#define to_dma_iommu_mapping(dev) NULL
23#endif
24
25#if !defined(CONFIG_IOMMU_DMA)
26#define iommu_dma_init_domain(...) ({ -EINVAL; })
27#endif
28
29#define EXYNOS_DEV_ADDR_START 0x20000000
30#define EXYNOS_DEV_ADDR_SIZE 0x40000000
31
32static inline int configure_dma_max_seg_size(struct device *dev)
33{
34 if (!dev->dma_parms)
35 dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
36 if (!dev->dma_parms)
37 return -ENOMEM;
38
39 dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
40 return 0;
41}
42
43static inline void clear_dma_max_seg_size(struct device *dev)
44{
45 kfree(dev->dma_parms);
46 dev->dma_parms = NULL;
47}
48
49
50
51
52
53
54
55
56
57
58static int drm_iommu_attach_device(struct drm_device *drm_dev,
59 struct device *subdrv_dev)
60{
61 struct exynos_drm_private *priv = drm_dev->dev_private;
62 int ret;
63
64 if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
65 DRM_DEV_ERROR(subdrv_dev, "Device %s lacks support for IOMMU\n",
66 dev_name(subdrv_dev));
67 return -EINVAL;
68 }
69
70 ret = configure_dma_max_seg_size(subdrv_dev);
71 if (ret)
72 return ret;
73
74 if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
75 if (to_dma_iommu_mapping(subdrv_dev))
76 arm_iommu_detach_device(subdrv_dev);
77
78 ret = arm_iommu_attach_device(subdrv_dev, priv->mapping);
79 } else if (IS_ENABLED(CONFIG_IOMMU_DMA)) {
80 ret = iommu_attach_device(priv->mapping, subdrv_dev);
81 }
82
83 if (ret)
84 clear_dma_max_seg_size(subdrv_dev);
85
86 return 0;
87}
88
89
90
91
92
93
94
95
96
97
98static void drm_iommu_detach_device(struct drm_device *drm_dev,
99 struct device *subdrv_dev)
100{
101 struct exynos_drm_private *priv = drm_dev->dev_private;
102
103 if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
104 arm_iommu_detach_device(subdrv_dev);
105 else if (IS_ENABLED(CONFIG_IOMMU_DMA))
106 iommu_detach_device(priv->mapping, subdrv_dev);
107
108 clear_dma_max_seg_size(subdrv_dev);
109}
110
111int exynos_drm_register_dma(struct drm_device *drm, struct device *dev)
112{
113 struct exynos_drm_private *priv = drm->dev_private;
114
115 if (!priv->dma_dev) {
116 priv->dma_dev = dev;
117 DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n",
118 dev_name(dev));
119 }
120
121 if (!IS_ENABLED(CONFIG_EXYNOS_IOMMU))
122 return 0;
123
124 if (!priv->mapping) {
125 void *mapping;
126
127 if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
128 mapping = arm_iommu_create_mapping(&platform_bus_type,
129 EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE);
130 else if (IS_ENABLED(CONFIG_IOMMU_DMA))
131 mapping = iommu_get_domain_for_dev(priv->dma_dev);
132
133 if (IS_ERR(mapping))
134 return PTR_ERR(mapping);
135 priv->mapping = mapping;
136 }
137
138 return drm_iommu_attach_device(drm, dev);
139}
140
141void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev)
142{
143 if (IS_ENABLED(CONFIG_EXYNOS_IOMMU))
144 drm_iommu_detach_device(drm, dev);
145}
146
147void exynos_drm_cleanup_dma(struct drm_device *drm)
148{
149 struct exynos_drm_private *priv = drm->dev_private;
150
151 if (!IS_ENABLED(CONFIG_EXYNOS_IOMMU))
152 return;
153
154 arm_iommu_release_mapping(priv->mapping);
155 priv->mapping = NULL;
156 priv->dma_dev = NULL;
157}
158