1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50#include <linux/fb.h>
51#include "nv_type.h"
52#include "nv_proto.h"
53#include "nv_dma.h"
54#include "nv_local.h"
55
56
57
58
59
60#define SKIPS 8
61
62static const int NVCopyROP[16] = {
63 0xCC,
64 0x55
65};
66
67static const int NVCopyROP_PM[16] = {
68 0xCA,
69 0x5A,
70};
71
72static inline void nvidiafb_safe_mode(struct fb_info *info)
73{
74 struct nvidia_par *par = info->par;
75
76 touch_softlockup_watchdog();
77 info->pixmap.scan_align = 1;
78 par->lockup = 1;
79}
80
81static inline void NVFlush(struct fb_info *info)
82{
83 struct nvidia_par *par = info->par;
84 int count = 1000000000;
85
86 while (--count && READ_GET(par) != par->dmaPut) ;
87
88 if (!count) {
89 printk("nvidiafb: DMA Flush lockup\n");
90 nvidiafb_safe_mode(info);
91 }
92}
93
94static inline void NVSync(struct fb_info *info)
95{
96 struct nvidia_par *par = info->par;
97 int count = 1000000000;
98
99 while (--count && NV_RD32(par->PGRAPH, 0x0700)) ;
100
101 if (!count) {
102 printk("nvidiafb: DMA Sync lockup\n");
103 nvidiafb_safe_mode(info);
104 }
105}
106
107static void NVDmaKickoff(struct nvidia_par *par)
108{
109 if (par->dmaCurrent != par->dmaPut) {
110 par->dmaPut = par->dmaCurrent;
111 WRITE_PUT(par, par->dmaPut);
112 }
113}
114
115static void NVDmaWait(struct fb_info *info, int size)
116{
117 struct nvidia_par *par = info->par;
118 int dmaGet;
119 int count = 1000000000, cnt;
120 size++;
121
122 while (par->dmaFree < size && --count && !par->lockup) {
123 dmaGet = READ_GET(par);
124
125 if (par->dmaPut >= dmaGet) {
126 par->dmaFree = par->dmaMax - par->dmaCurrent;
127 if (par->dmaFree < size) {
128 NVDmaNext(par, 0x20000000);
129 if (dmaGet <= SKIPS) {
130 if (par->dmaPut <= SKIPS)
131 WRITE_PUT(par, SKIPS + 1);
132 cnt = 1000000000;
133 do {
134 dmaGet = READ_GET(par);
135 } while (--cnt && dmaGet <= SKIPS);
136 if (!cnt) {
137 printk("DMA Get lockup\n");
138 par->lockup = 1;
139 }
140 }
141 WRITE_PUT(par, SKIPS);
142 par->dmaCurrent = par->dmaPut = SKIPS;
143 par->dmaFree = dmaGet - (SKIPS + 1);
144 }
145 } else
146 par->dmaFree = dmaGet - par->dmaCurrent - 1;
147 }
148
149 if (!count) {
150 printk("nvidiafb: DMA Wait Lockup\n");
151 nvidiafb_safe_mode(info);
152 }
153}
154
155static void NVSetPattern(struct fb_info *info, u32 clr0, u32 clr1,
156 u32 pat0, u32 pat1)
157{
158 struct nvidia_par *par = info->par;
159
160 NVDmaStart(info, par, PATTERN_COLOR_0, 4);
161 NVDmaNext(par, clr0);
162 NVDmaNext(par, clr1);
163 NVDmaNext(par, pat0);
164 NVDmaNext(par, pat1);
165}
166
167static void NVSetRopSolid(struct fb_info *info, u32 rop, u32 planemask)
168{
169 struct nvidia_par *par = info->par;
170
171 if (planemask != ~0) {
172 NVSetPattern(info, 0, planemask, ~0, ~0);
173 if (par->currentRop != (rop + 32)) {
174 NVDmaStart(info, par, ROP_SET, 1);
175 NVDmaNext(par, NVCopyROP_PM[rop]);
176 par->currentRop = rop + 32;
177 }
178 } else if (par->currentRop != rop) {
179 if (par->currentRop >= 16)
180 NVSetPattern(info, ~0, ~0, ~0, ~0);
181 NVDmaStart(info, par, ROP_SET, 1);
182 NVDmaNext(par, NVCopyROP[rop]);
183 par->currentRop = rop;
184 }
185}
186
187static void NVSetClippingRectangle(struct fb_info *info, int x1, int y1,
188 int x2, int y2)
189{
190 struct nvidia_par *par = info->par;
191 int h = y2 - y1 + 1;
192 int w = x2 - x1 + 1;
193
194 NVDmaStart(info, par, CLIP_POINT, 2);
195 NVDmaNext(par, (y1 << 16) | x1);
196 NVDmaNext(par, (h << 16) | w);
197}
198
199void NVResetGraphics(struct fb_info *info)
200{
201 struct nvidia_par *par = info->par;
202 u32 surfaceFormat, patternFormat, rectFormat, lineFormat;
203 int pitch, i;
204
205 pitch = info->fix.line_length;
206
207 par->dmaBase = (u32 __iomem *) (&par->FbStart[par->FbUsableSize]);
208
209 for (i = 0; i < SKIPS; i++)
210 NV_WR32(&par->dmaBase[i], 0, 0x00000000);
211
212 NV_WR32(&par->dmaBase[0x0 + SKIPS], 0, 0x00040000);
213 NV_WR32(&par->dmaBase[0x1 + SKIPS], 0, 0x80000010);
214 NV_WR32(&par->dmaBase[0x2 + SKIPS], 0, 0x00042000);
215 NV_WR32(&par->dmaBase[0x3 + SKIPS], 0, 0x80000011);
216 NV_WR32(&par->dmaBase[0x4 + SKIPS], 0, 0x00044000);
217 NV_WR32(&par->dmaBase[0x5 + SKIPS], 0, 0x80000012);
218 NV_WR32(&par->dmaBase[0x6 + SKIPS], 0, 0x00046000);
219 NV_WR32(&par->dmaBase[0x7 + SKIPS], 0, 0x80000013);
220 NV_WR32(&par->dmaBase[0x8 + SKIPS], 0, 0x00048000);
221 NV_WR32(&par->dmaBase[0x9 + SKIPS], 0, 0x80000014);
222 NV_WR32(&par->dmaBase[0xA + SKIPS], 0, 0x0004A000);
223 NV_WR32(&par->dmaBase[0xB + SKIPS], 0, 0x80000015);
224 NV_WR32(&par->dmaBase[0xC + SKIPS], 0, 0x0004C000);
225 NV_WR32(&par->dmaBase[0xD + SKIPS], 0, 0x80000016);
226 NV_WR32(&par->dmaBase[0xE + SKIPS], 0, 0x0004E000);
227 NV_WR32(&par->dmaBase[0xF + SKIPS], 0, 0x80000017);
228
229 par->dmaPut = 0;
230 par->dmaCurrent = 16 + SKIPS;
231 par->dmaMax = 8191;
232 par->dmaFree = par->dmaMax - par->dmaCurrent;
233
234 switch (info->var.bits_per_pixel) {
235 case 32:
236 case 24:
237 surfaceFormat = SURFACE_FORMAT_DEPTH24;
238 patternFormat = PATTERN_FORMAT_DEPTH24;
239 rectFormat = RECT_FORMAT_DEPTH24;
240 lineFormat = LINE_FORMAT_DEPTH24;
241 break;
242 case 16:
243 surfaceFormat = SURFACE_FORMAT_DEPTH16;
244 patternFormat = PATTERN_FORMAT_DEPTH16;
245 rectFormat = RECT_FORMAT_DEPTH16;
246 lineFormat = LINE_FORMAT_DEPTH16;
247 break;
248 default:
249 surfaceFormat = SURFACE_FORMAT_DEPTH8;
250 patternFormat = PATTERN_FORMAT_DEPTH8;
251 rectFormat = RECT_FORMAT_DEPTH8;
252 lineFormat = LINE_FORMAT_DEPTH8;
253 break;
254 }
255
256 NVDmaStart(info, par, SURFACE_FORMAT, 4);
257 NVDmaNext(par, surfaceFormat);
258 NVDmaNext(par, pitch | (pitch << 16));
259 NVDmaNext(par, 0);
260 NVDmaNext(par, 0);
261
262 NVDmaStart(info, par, PATTERN_FORMAT, 1);
263 NVDmaNext(par, patternFormat);
264
265 NVDmaStart(info, par, RECT_FORMAT, 1);
266 NVDmaNext(par, rectFormat);
267
268 NVDmaStart(info, par, LINE_FORMAT, 1);
269 NVDmaNext(par, lineFormat);
270
271 par->currentRop = ~0;
272 NVSetRopSolid(info, ROP_COPY, ~0);
273
274 NVSetClippingRectangle(info, 0, 0, info->var.xres_virtual,
275 info->var.yres_virtual);
276
277 NVDmaKickoff(par);
278}
279
280int nvidiafb_sync(struct fb_info *info)
281{
282 struct nvidia_par *par = info->par;
283
284 if (info->state != FBINFO_STATE_RUNNING)
285 return 0;
286
287 if (!par->lockup)
288 NVFlush(info);
289
290 if (!par->lockup)
291 NVSync(info);
292
293 return 0;
294}
295
296void nvidiafb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
297{
298 struct nvidia_par *par = info->par;
299
300 if (info->state != FBINFO_STATE_RUNNING)
301 return;
302
303 if (par->lockup) {
304 cfb_copyarea(info, region);
305 return;
306 }
307
308 NVDmaStart(info, par, BLIT_POINT_SRC, 3);
309 NVDmaNext(par, (region->sy << 16) | region->sx);
310 NVDmaNext(par, (region->dy << 16) | region->dx);
311 NVDmaNext(par, (region->height << 16) | region->width);
312
313 NVDmaKickoff(par);
314}
315
316void nvidiafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
317{
318 struct nvidia_par *par = info->par;
319 u32 color;
320
321 if (info->state != FBINFO_STATE_RUNNING)
322 return;
323
324 if (par->lockup) {
325 cfb_fillrect(info, rect);
326 return;
327 }
328
329 if (info->var.bits_per_pixel == 8)
330 color = rect->color;
331 else
332 color = ((u32 *) info->pseudo_palette)[rect->color];
333
334 if (rect->rop != ROP_COPY)
335 NVSetRopSolid(info, rect->rop, ~0);
336
337 NVDmaStart(info, par, RECT_SOLID_COLOR, 1);
338 NVDmaNext(par, color);
339
340 NVDmaStart(info, par, RECT_SOLID_RECTS(0), 2);
341 NVDmaNext(par, (rect->dx << 16) | rect->dy);
342 NVDmaNext(par, (rect->width << 16) | rect->height);
343
344 NVDmaKickoff(par);
345
346 if (rect->rop != ROP_COPY)
347 NVSetRopSolid(info, ROP_COPY, ~0);
348}
349
350static void nvidiafb_mono_color_expand(struct fb_info *info,
351 const struct fb_image *image)
352{
353 struct nvidia_par *par = info->par;
354 u32 fg, bg, mask = ~(~0 >> (32 - info->var.bits_per_pixel));
355 u32 dsize, width, *data = (u32 *) image->data, tmp;
356 int j, k = 0;
357
358 width = (image->width + 31) & ~31;
359 dsize = (width * image->height) >> 5;
360
361 if (info->var.bits_per_pixel == 8) {
362 fg = image->fg_color | mask;
363 bg = image->bg_color | mask;
364 } else {
365 fg = ((u32 *) info->pseudo_palette)[image->fg_color] | mask;
366 bg = ((u32 *) info->pseudo_palette)[image->bg_color] | mask;
367 }
368
369 NVDmaStart(info, par, RECT_EXPAND_TWO_COLOR_CLIP, 7);
370 NVDmaNext(par, (image->dy << 16) | (image->dx & 0xffff));
371 NVDmaNext(par, ((image->dy + image->height) << 16) |
372 ((image->dx + image->width) & 0xffff));
373 NVDmaNext(par, bg);
374 NVDmaNext(par, fg);
375 NVDmaNext(par, (image->height << 16) | width);
376 NVDmaNext(par, (image->height << 16) | width);
377 NVDmaNext(par, (image->dy << 16) | (image->dx & 0xffff));
378
379 while (dsize >= RECT_EXPAND_TWO_COLOR_DATA_MAX_DWORDS) {
380 NVDmaStart(info, par, RECT_EXPAND_TWO_COLOR_DATA(0),
381 RECT_EXPAND_TWO_COLOR_DATA_MAX_DWORDS);
382
383 for (j = RECT_EXPAND_TWO_COLOR_DATA_MAX_DWORDS; j--;) {
384 tmp = data[k++];
385 reverse_order(&tmp);
386 NVDmaNext(par, tmp);
387 }
388
389 dsize -= RECT_EXPAND_TWO_COLOR_DATA_MAX_DWORDS;
390 }
391
392 if (dsize) {
393 NVDmaStart(info, par, RECT_EXPAND_TWO_COLOR_DATA(0), dsize);
394
395 for (j = dsize; j--;) {
396 tmp = data[k++];
397 reverse_order(&tmp);
398 NVDmaNext(par, tmp);
399 }
400 }
401
402 NVDmaKickoff(par);
403}
404
405void nvidiafb_imageblit(struct fb_info *info, const struct fb_image *image)
406{
407 struct nvidia_par *par = info->par;
408
409 if (info->state != FBINFO_STATE_RUNNING)
410 return;
411
412 if (image->depth == 1 && !par->lockup)
413 nvidiafb_mono_color_expand(info, image);
414 else
415 cfb_imageblit(info, image);
416}
417