1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include <drm/drm_gem_vram_helper.h>
31#include <drm/drm_managed.h>
32
33#include "ast_drv.h"
34
35static void ast_cursor_fini(struct ast_private *ast)
36{
37 size_t i;
38 struct drm_gem_vram_object *gbo;
39
40 for (i = 0; i < ARRAY_SIZE(ast->cursor.gbo); ++i) {
41 gbo = ast->cursor.gbo[i];
42 drm_gem_vram_vunmap(gbo, &ast->cursor.map[i]);
43 drm_gem_vram_unpin(gbo);
44 drm_gem_vram_put(gbo);
45 }
46}
47
48static void ast_cursor_release(struct drm_device *dev, void *ptr)
49{
50 struct ast_private *ast = to_ast_private(dev);
51
52 ast_cursor_fini(ast);
53}
54
55
56
57
58int ast_cursor_init(struct ast_private *ast)
59{
60 struct drm_device *dev = &ast->base;
61 size_t size, i;
62 struct drm_gem_vram_object *gbo;
63 struct dma_buf_map map;
64 int ret;
65
66 size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
67
68 for (i = 0; i < ARRAY_SIZE(ast->cursor.gbo); ++i) {
69 gbo = drm_gem_vram_create(dev, size, 0);
70 if (IS_ERR(gbo)) {
71 ret = PTR_ERR(gbo);
72 goto err_drm_gem_vram_put;
73 }
74 ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM |
75 DRM_GEM_VRAM_PL_FLAG_TOPDOWN);
76 if (ret) {
77 drm_gem_vram_put(gbo);
78 goto err_drm_gem_vram_put;
79 }
80 ret = drm_gem_vram_vmap(gbo, &map);
81 if (ret) {
82 drm_gem_vram_unpin(gbo);
83 drm_gem_vram_put(gbo);
84 goto err_drm_gem_vram_put;
85 }
86
87 ast->cursor.gbo[i] = gbo;
88 ast->cursor.map[i] = map;
89 }
90
91 return drmm_add_action_or_reset(dev, ast_cursor_release, NULL);
92
93err_drm_gem_vram_put:
94 while (i) {
95 --i;
96 gbo = ast->cursor.gbo[i];
97 drm_gem_vram_vunmap(gbo, &ast->cursor.map[i]);
98 drm_gem_vram_unpin(gbo);
99 drm_gem_vram_put(gbo);
100 }
101 return ret;
102}
103
104static void update_cursor_image(u8 __iomem *dst, const u8 *src, int width, int height)
105{
106 union {
107 u32 ul;
108 u8 b[4];
109 } srcdata32[2], data32;
110 union {
111 u16 us;
112 u8 b[2];
113 } data16;
114 u32 csum = 0;
115 s32 alpha_dst_delta, last_alpha_dst_delta;
116 u8 __iomem *dstxor;
117 const u8 *srcxor;
118 int i, j;
119 u32 per_pixel_copy, two_pixel_copy;
120
121 alpha_dst_delta = AST_MAX_HWC_WIDTH << 1;
122 last_alpha_dst_delta = alpha_dst_delta - (width << 1);
123
124 srcxor = src;
125 dstxor = (u8 *)dst + last_alpha_dst_delta + (AST_MAX_HWC_HEIGHT - height) * alpha_dst_delta;
126 per_pixel_copy = width & 1;
127 two_pixel_copy = width >> 1;
128
129 for (j = 0; j < height; j++) {
130 for (i = 0; i < two_pixel_copy; i++) {
131 srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
132 srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0;
133 data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
134 data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
135 data32.b[2] = srcdata32[1].b[1] | (srcdata32[1].b[0] >> 4);
136 data32.b[3] = srcdata32[1].b[3] | (srcdata32[1].b[2] >> 4);
137
138 writel(data32.ul, dstxor);
139 csum += data32.ul;
140
141 dstxor += 4;
142 srcxor += 8;
143
144 }
145
146 for (i = 0; i < per_pixel_copy; i++) {
147 srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
148 data16.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
149 data16.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
150 writew(data16.us, dstxor);
151 csum += (u32)data16.us;
152
153 dstxor += 2;
154 srcxor += 4;
155 }
156 dstxor += last_alpha_dst_delta;
157 }
158
159
160 dst += AST_HWC_SIZE;
161 writel(csum, dst);
162 writel(width, dst + AST_HWC_SIGNATURE_SizeX);
163 writel(height, dst + AST_HWC_SIGNATURE_SizeY);
164 writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX);
165 writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
166}
167
168int ast_cursor_blit(struct ast_private *ast, struct drm_framebuffer *fb)
169{
170 struct drm_device *dev = &ast->base;
171 struct drm_gem_vram_object *gbo;
172 struct dma_buf_map map;
173 int ret;
174 void *src;
175 void __iomem *dst;
176
177 if (drm_WARN_ON_ONCE(dev, fb->width > AST_MAX_HWC_WIDTH) ||
178 drm_WARN_ON_ONCE(dev, fb->height > AST_MAX_HWC_HEIGHT))
179 return -EINVAL;
180
181 gbo = drm_gem_vram_of_gem(fb->obj[0]);
182
183 ret = drm_gem_vram_pin(gbo, 0);
184 if (ret)
185 return ret;
186 ret = drm_gem_vram_vmap(gbo, &map);
187 if (ret)
188 goto err_drm_gem_vram_unpin;
189 src = map.vaddr;
190
191 dst = ast->cursor.map[ast->cursor.next_index].vaddr_iomem;
192
193
194 update_cursor_image(dst, src, fb->width, fb->height);
195
196 drm_gem_vram_vunmap(gbo, &map);
197 drm_gem_vram_unpin(gbo);
198
199 return 0;
200
201err_drm_gem_vram_unpin:
202 drm_gem_vram_unpin(gbo);
203 return ret;
204}
205
206static void ast_cursor_set_base(struct ast_private *ast, u64 address)
207{
208 u8 addr0 = (address >> 3) & 0xff;
209 u8 addr1 = (address >> 11) & 0xff;
210 u8 addr2 = (address >> 19) & 0xff;
211
212 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc8, addr0);
213 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc9, addr1);
214 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, addr2);
215}
216
217void ast_cursor_page_flip(struct ast_private *ast)
218{
219 struct drm_device *dev = &ast->base;
220 struct drm_gem_vram_object *gbo;
221 s64 off;
222
223 gbo = ast->cursor.gbo[ast->cursor.next_index];
224
225 off = drm_gem_vram_offset(gbo);
226 if (drm_WARN_ON_ONCE(dev, off < 0))
227 return;
228
229 ast_cursor_set_base(ast, off);
230
231 ++ast->cursor.next_index;
232 ast->cursor.next_index %= ARRAY_SIZE(ast->cursor.gbo);
233}
234
235static void ast_cursor_set_location(struct ast_private *ast, u16 x, u16 y,
236 u8 x_offset, u8 y_offset)
237{
238 u8 x0 = (x & 0x00ff);
239 u8 x1 = (x & 0x0f00) >> 8;
240 u8 y0 = (y & 0x00ff);
241 u8 y1 = (y & 0x0700) >> 8;
242
243 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc2, x_offset);
244 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc3, y_offset);
245 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc4, x0);
246 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc5, x1);
247 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc6, y0);
248 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, y1);
249}
250
251void ast_cursor_show(struct ast_private *ast, int x, int y,
252 unsigned int offset_x, unsigned int offset_y)
253{
254 u8 x_offset, y_offset;
255 u8 __iomem *dst;
256 u8 __iomem *sig;
257 u8 jreg;
258
259 dst = ast->cursor.map[ast->cursor.next_index].vaddr;
260
261 sig = dst + AST_HWC_SIZE;
262 writel(x, sig + AST_HWC_SIGNATURE_X);
263 writel(y, sig + AST_HWC_SIGNATURE_Y);
264
265 if (x < 0) {
266 x_offset = (-x) + offset_x;
267 x = 0;
268 } else {
269 x_offset = offset_x;
270 }
271 if (y < 0) {
272 y_offset = (-y) + offset_y;
273 y = 0;
274 } else {
275 y_offset = offset_y;
276 }
277
278 ast_cursor_set_location(ast, x, y, x_offset, y_offset);
279
280
281 jreg = 0x02 |
282 0x01;
283 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg);
284}
285
286void ast_cursor_hide(struct ast_private *ast)
287{
288 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00);
289}
290