1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <drm/drmP.h>
23#include "gma_display.h"
24#include "psb_intel_drv.h"
25#include "psb_intel_reg.h"
26#include "psb_drv.h"
27#include "framebuffer.h"
28
29
30
31
32bool gma_pipe_has_type(struct drm_crtc *crtc, int type)
33{
34 struct drm_device *dev = crtc->dev;
35 struct drm_mode_config *mode_config = &dev->mode_config;
36 struct drm_connector *l_entry;
37
38 list_for_each_entry(l_entry, &mode_config->connector_list, head) {
39 if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
40 struct gma_encoder *gma_encoder =
41 gma_attached_encoder(l_entry);
42 if (gma_encoder->type == type)
43 return true;
44 }
45 }
46
47 return false;
48}
49
50void gma_wait_for_vblank(struct drm_device *dev)
51{
52
53 mdelay(20);
54}
55
56int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
57 struct drm_framebuffer *old_fb)
58{
59 struct drm_device *dev = crtc->dev;
60 struct drm_psb_private *dev_priv = dev->dev_private;
61 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
62 struct psb_framebuffer *psbfb = to_psb_fb(crtc->primary->fb);
63 int pipe = gma_crtc->pipe;
64 const struct psb_offset *map = &dev_priv->regmap[pipe];
65 unsigned long start, offset;
66 u32 dspcntr;
67 int ret = 0;
68
69 if (!gma_power_begin(dev, true))
70 return 0;
71
72
73 if (!crtc->primary->fb) {
74 dev_err(dev->dev, "No FB bound\n");
75 goto gma_pipe_cleaner;
76 }
77
78
79
80 ret = psb_gtt_pin(psbfb->gtt);
81 if (ret < 0)
82 goto gma_pipe_set_base_exit;
83 start = psbfb->gtt->offset;
84 offset = y * crtc->primary->fb->pitches[0] + x * (crtc->primary->fb->bits_per_pixel / 8);
85
86 REG_WRITE(map->stride, crtc->primary->fb->pitches[0]);
87
88 dspcntr = REG_READ(map->cntr);
89 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
90
91 switch (crtc->primary->fb->bits_per_pixel) {
92 case 8:
93 dspcntr |= DISPPLANE_8BPP;
94 break;
95 case 16:
96 if (crtc->primary->fb->depth == 15)
97 dspcntr |= DISPPLANE_15_16BPP;
98 else
99 dspcntr |= DISPPLANE_16BPP;
100 break;
101 case 24:
102 case 32:
103 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
104 break;
105 default:
106 dev_err(dev->dev, "Unknown color depth\n");
107 ret = -EINVAL;
108 goto gma_pipe_set_base_exit;
109 }
110 REG_WRITE(map->cntr, dspcntr);
111
112 dev_dbg(dev->dev,
113 "Writing base %08lX %08lX %d %d\n", start, offset, x, y);
114
115
116
117
118 if (IS_PSB(dev)) {
119 REG_WRITE(map->base, offset + start);
120 REG_READ(map->base);
121 } else {
122 REG_WRITE(map->base, offset);
123 REG_READ(map->base);
124 REG_WRITE(map->surf, start);
125 REG_READ(map->surf);
126 }
127
128gma_pipe_cleaner:
129
130 if (old_fb)
131 psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
132
133gma_pipe_set_base_exit:
134 gma_power_end(dev);
135 return ret;
136}
137
138
139void gma_crtc_load_lut(struct drm_crtc *crtc)
140{
141 struct drm_device *dev = crtc->dev;
142 struct drm_psb_private *dev_priv = dev->dev_private;
143 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
144 const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
145 int palreg = map->palette;
146 int i;
147
148
149 if (!crtc->enabled)
150 return;
151
152 if (gma_power_begin(dev, false)) {
153 for (i = 0; i < 256; i++) {
154 REG_WRITE(palreg + 4 * i,
155 ((gma_crtc->lut_r[i] +
156 gma_crtc->lut_adj[i]) << 16) |
157 ((gma_crtc->lut_g[i] +
158 gma_crtc->lut_adj[i]) << 8) |
159 (gma_crtc->lut_b[i] +
160 gma_crtc->lut_adj[i]));
161 }
162 gma_power_end(dev);
163 } else {
164 for (i = 0; i < 256; i++) {
165
166 dev_priv->regs.pipe[0].palette[i] =
167 ((gma_crtc->lut_r[i] +
168 gma_crtc->lut_adj[i]) << 16) |
169 ((gma_crtc->lut_g[i] +
170 gma_crtc->lut_adj[i]) << 8) |
171 (gma_crtc->lut_b[i] +
172 gma_crtc->lut_adj[i]);
173 }
174
175 }
176}
177
178int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
179 u32 size)
180{
181 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
182 int i;
183
184 for (i = 0; i < size; i++) {
185 gma_crtc->lut_r[i] = red[i] >> 8;
186 gma_crtc->lut_g[i] = green[i] >> 8;
187 gma_crtc->lut_b[i] = blue[i] >> 8;
188 }
189
190 gma_crtc_load_lut(crtc);
191
192 return 0;
193}
194
195
196
197
198
199
200
201void gma_crtc_dpms(struct drm_crtc *crtc, int mode)
202{
203 struct drm_device *dev = crtc->dev;
204 struct drm_psb_private *dev_priv = dev->dev_private;
205 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
206 int pipe = gma_crtc->pipe;
207 const struct psb_offset *map = &dev_priv->regmap[pipe];
208 u32 temp;
209
210
211
212
213
214 if (IS_CDV(dev))
215 dev_priv->ops->disable_sr(dev);
216
217 switch (mode) {
218 case DRM_MODE_DPMS_ON:
219 case DRM_MODE_DPMS_STANDBY:
220 case DRM_MODE_DPMS_SUSPEND:
221 if (gma_crtc->active)
222 break;
223
224 gma_crtc->active = true;
225
226
227 temp = REG_READ(map->dpll);
228 if ((temp & DPLL_VCO_ENABLE) == 0) {
229 REG_WRITE(map->dpll, temp);
230 REG_READ(map->dpll);
231
232 udelay(150);
233 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
234 REG_READ(map->dpll);
235
236 udelay(150);
237 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
238 REG_READ(map->dpll);
239
240 udelay(150);
241 }
242
243
244 temp = REG_READ(map->cntr);
245 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
246 REG_WRITE(map->cntr,
247 temp | DISPLAY_PLANE_ENABLE);
248
249 REG_WRITE(map->base, REG_READ(map->base));
250 }
251
252 udelay(150);
253
254
255 temp = REG_READ(map->conf);
256 if ((temp & PIPEACONF_ENABLE) == 0)
257 REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
258
259 temp = REG_READ(map->status);
260 temp &= ~(0xFFFF);
261 temp |= PIPE_FIFO_UNDERRUN;
262 REG_WRITE(map->status, temp);
263 REG_READ(map->status);
264
265 gma_crtc_load_lut(crtc);
266
267
268
269
270 break;
271 case DRM_MODE_DPMS_OFF:
272 if (!gma_crtc->active)
273 break;
274
275 gma_crtc->active = false;
276
277
278
279
280
281
282 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
283
284
285 drm_crtc_vblank_off(crtc);
286
287
288 gma_wait_for_vblank(dev);
289
290
291 temp = REG_READ(map->cntr);
292 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
293 REG_WRITE(map->cntr,
294 temp & ~DISPLAY_PLANE_ENABLE);
295
296 REG_WRITE(map->base, REG_READ(map->base));
297 REG_READ(map->base);
298 }
299
300
301 temp = REG_READ(map->conf);
302 if ((temp & PIPEACONF_ENABLE) != 0) {
303 REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
304 REG_READ(map->conf);
305 }
306
307
308 gma_wait_for_vblank(dev);
309
310 udelay(150);
311
312
313 temp = REG_READ(map->dpll);
314 if ((temp & DPLL_VCO_ENABLE) != 0) {
315 REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
316 REG_READ(map->dpll);
317 }
318
319
320 udelay(150);
321 break;
322 }
323
324 if (IS_CDV(dev))
325 dev_priv->ops->update_wm(dev, crtc);
326
327
328 REG_WRITE(DSPARB, 0x3F3E);
329}
330
331int gma_crtc_cursor_set(struct drm_crtc *crtc,
332 struct drm_file *file_priv,
333 uint32_t handle,
334 uint32_t width, uint32_t height)
335{
336 struct drm_device *dev = crtc->dev;
337 struct drm_psb_private *dev_priv = dev->dev_private;
338 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
339 int pipe = gma_crtc->pipe;
340 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
341 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
342 uint32_t temp;
343 size_t addr = 0;
344 struct gtt_range *gt;
345 struct gtt_range *cursor_gt = gma_crtc->cursor_gt;
346 struct drm_gem_object *obj;
347 void *tmp_dst, *tmp_src;
348 int ret = 0, i, cursor_pages;
349
350
351 if (!handle) {
352 temp = CURSOR_MODE_DISABLE;
353 if (gma_power_begin(dev, false)) {
354 REG_WRITE(control, temp);
355 REG_WRITE(base, 0);
356 gma_power_end(dev);
357 }
358
359
360 if (gma_crtc->cursor_obj) {
361 gt = container_of(gma_crtc->cursor_obj,
362 struct gtt_range, gem);
363 psb_gtt_unpin(gt);
364 drm_gem_object_unreference_unlocked(gma_crtc->cursor_obj);
365 gma_crtc->cursor_obj = NULL;
366 }
367 return 0;
368 }
369
370
371 if (width != 64 || height != 64) {
372 dev_dbg(dev->dev, "We currently only support 64x64 cursors\n");
373 return -EINVAL;
374 }
375
376 obj = drm_gem_object_lookup(file_priv, handle);
377 if (!obj) {
378 ret = -ENOENT;
379 goto unlock;
380 }
381
382 if (obj->size < width * height * 4) {
383 dev_dbg(dev->dev, "Buffer is too small\n");
384 ret = -ENOMEM;
385 goto unref_cursor;
386 }
387
388 gt = container_of(obj, struct gtt_range, gem);
389
390
391 ret = psb_gtt_pin(gt);
392 if (ret) {
393 dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
394 goto unref_cursor;
395 }
396
397 if (dev_priv->ops->cursor_needs_phys) {
398 if (cursor_gt == NULL) {
399 dev_err(dev->dev, "No hardware cursor mem available");
400 ret = -ENOMEM;
401 goto unref_cursor;
402 }
403
404
405 if (gt->npage > 4)
406 cursor_pages = 4;
407 else
408 cursor_pages = gt->npage;
409
410
411 tmp_dst = dev_priv->vram_addr + cursor_gt->offset;
412 for (i = 0; i < cursor_pages; i++) {
413 tmp_src = kmap(gt->pages[i]);
414 memcpy(tmp_dst, tmp_src, PAGE_SIZE);
415 kunmap(gt->pages[i]);
416 tmp_dst += PAGE_SIZE;
417 }
418
419 addr = gma_crtc->cursor_addr;
420 } else {
421 addr = gt->offset;
422 gma_crtc->cursor_addr = addr;
423 }
424
425 temp = 0;
426
427 temp |= (pipe << 28);
428 temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
429
430 if (gma_power_begin(dev, false)) {
431 REG_WRITE(control, temp);
432 REG_WRITE(base, addr);
433 gma_power_end(dev);
434 }
435
436
437 if (gma_crtc->cursor_obj) {
438 gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem);
439 psb_gtt_unpin(gt);
440 drm_gem_object_unreference_unlocked(gma_crtc->cursor_obj);
441 }
442
443 gma_crtc->cursor_obj = obj;
444unlock:
445 return ret;
446
447unref_cursor:
448 drm_gem_object_unreference_unlocked(obj);
449 return ret;
450}
451
452int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
453{
454 struct drm_device *dev = crtc->dev;
455 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
456 int pipe = gma_crtc->pipe;
457 uint32_t temp = 0;
458 uint32_t addr;
459
460 if (x < 0) {
461 temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
462 x = -x;
463 }
464 if (y < 0) {
465 temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
466 y = -y;
467 }
468
469 temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
470 temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
471
472 addr = gma_crtc->cursor_addr;
473
474 if (gma_power_begin(dev, false)) {
475 REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
476 REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr);
477 gma_power_end(dev);
478 }
479 return 0;
480}
481
482void gma_crtc_prepare(struct drm_crtc *crtc)
483{
484 const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
485 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
486}
487
488void gma_crtc_commit(struct drm_crtc *crtc)
489{
490 const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
491 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
492}
493
494void gma_crtc_disable(struct drm_crtc *crtc)
495{
496 struct gtt_range *gt;
497 const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
498
499 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
500
501 if (crtc->primary->fb) {
502 gt = to_psb_fb(crtc->primary->fb)->gtt;
503 psb_gtt_unpin(gt);
504 }
505}
506
507void gma_crtc_destroy(struct drm_crtc *crtc)
508{
509 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
510
511 kfree(gma_crtc->crtc_state);
512 drm_crtc_cleanup(crtc);
513 kfree(gma_crtc);
514}
515
516int gma_crtc_set_config(struct drm_mode_set *set)
517{
518 struct drm_device *dev = set->crtc->dev;
519 struct drm_psb_private *dev_priv = dev->dev_private;
520 int ret;
521
522 if (!dev_priv->rpm_enabled)
523 return drm_crtc_helper_set_config(set);
524
525 pm_runtime_forbid(&dev->pdev->dev);
526 ret = drm_crtc_helper_set_config(set);
527 pm_runtime_allow(&dev->pdev->dev);
528
529 return ret;
530}
531
532
533
534
535void gma_crtc_save(struct drm_crtc *crtc)
536{
537 struct drm_device *dev = crtc->dev;
538 struct drm_psb_private *dev_priv = dev->dev_private;
539 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
540 struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
541 const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
542 uint32_t palette_reg;
543 int i;
544
545 if (!crtc_state) {
546 dev_err(dev->dev, "No CRTC state found\n");
547 return;
548 }
549
550 crtc_state->saveDSPCNTR = REG_READ(map->cntr);
551 crtc_state->savePIPECONF = REG_READ(map->conf);
552 crtc_state->savePIPESRC = REG_READ(map->src);
553 crtc_state->saveFP0 = REG_READ(map->fp0);
554 crtc_state->saveFP1 = REG_READ(map->fp1);
555 crtc_state->saveDPLL = REG_READ(map->dpll);
556 crtc_state->saveHTOTAL = REG_READ(map->htotal);
557 crtc_state->saveHBLANK = REG_READ(map->hblank);
558 crtc_state->saveHSYNC = REG_READ(map->hsync);
559 crtc_state->saveVTOTAL = REG_READ(map->vtotal);
560 crtc_state->saveVBLANK = REG_READ(map->vblank);
561 crtc_state->saveVSYNC = REG_READ(map->vsync);
562 crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
563
564
565 crtc_state->saveDSPSIZE = REG_READ(map->size);
566 crtc_state->saveDSPPOS = REG_READ(map->pos);
567
568 crtc_state->saveDSPBASE = REG_READ(map->base);
569
570 palette_reg = map->palette;
571 for (i = 0; i < 256; ++i)
572 crtc_state->savePalette[i] = REG_READ(palette_reg + (i << 2));
573}
574
575
576
577
578void gma_crtc_restore(struct drm_crtc *crtc)
579{
580 struct drm_device *dev = crtc->dev;
581 struct drm_psb_private *dev_priv = dev->dev_private;
582 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
583 struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
584 const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
585 uint32_t palette_reg;
586 int i;
587
588 if (!crtc_state) {
589 dev_err(dev->dev, "No crtc state\n");
590 return;
591 }
592
593 if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
594 REG_WRITE(map->dpll,
595 crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
596 REG_READ(map->dpll);
597 udelay(150);
598 }
599
600 REG_WRITE(map->fp0, crtc_state->saveFP0);
601 REG_READ(map->fp0);
602
603 REG_WRITE(map->fp1, crtc_state->saveFP1);
604 REG_READ(map->fp1);
605
606 REG_WRITE(map->dpll, crtc_state->saveDPLL);
607 REG_READ(map->dpll);
608 udelay(150);
609
610 REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
611 REG_WRITE(map->hblank, crtc_state->saveHBLANK);
612 REG_WRITE(map->hsync, crtc_state->saveHSYNC);
613 REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
614 REG_WRITE(map->vblank, crtc_state->saveVBLANK);
615 REG_WRITE(map->vsync, crtc_state->saveVSYNC);
616 REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
617
618 REG_WRITE(map->size, crtc_state->saveDSPSIZE);
619 REG_WRITE(map->pos, crtc_state->saveDSPPOS);
620
621 REG_WRITE(map->src, crtc_state->savePIPESRC);
622 REG_WRITE(map->base, crtc_state->saveDSPBASE);
623 REG_WRITE(map->conf, crtc_state->savePIPECONF);
624
625 gma_wait_for_vblank(dev);
626
627 REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
628 REG_WRITE(map->base, crtc_state->saveDSPBASE);
629
630 gma_wait_for_vblank(dev);
631
632 palette_reg = map->palette;
633 for (i = 0; i < 256; ++i)
634 REG_WRITE(palette_reg + (i << 2), crtc_state->savePalette[i]);
635}
636
637void gma_encoder_prepare(struct drm_encoder *encoder)
638{
639 const struct drm_encoder_helper_funcs *encoder_funcs =
640 encoder->helper_private;
641
642 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
643}
644
645void gma_encoder_commit(struct drm_encoder *encoder)
646{
647 const struct drm_encoder_helper_funcs *encoder_funcs =
648 encoder->helper_private;
649
650 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
651}
652
653void gma_encoder_destroy(struct drm_encoder *encoder)
654{
655 struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
656
657 drm_encoder_cleanup(encoder);
658 kfree(intel_encoder);
659}
660
661
662struct drm_encoder *gma_best_encoder(struct drm_connector *connector)
663{
664 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
665
666 return &gma_encoder->base;
667}
668
669void gma_connector_attach_encoder(struct gma_connector *connector,
670 struct gma_encoder *encoder)
671{
672 connector->encoder = encoder;
673 drm_mode_connector_attach_encoder(&connector->base,
674 &encoder->base);
675}
676
677#define GMA_PLL_INVALID(s) { return false; }
678
679bool gma_pll_is_valid(struct drm_crtc *crtc,
680 const struct gma_limit_t *limit,
681 struct gma_clock_t *clock)
682{
683 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
684 GMA_PLL_INVALID("p1 out of range");
685 if (clock->p < limit->p.min || limit->p.max < clock->p)
686 GMA_PLL_INVALID("p out of range");
687 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
688 GMA_PLL_INVALID("m2 out of range");
689 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
690 GMA_PLL_INVALID("m1 out of range");
691
692 if (clock->m1 <= clock->m2 && clock->m1 != 0)
693 GMA_PLL_INVALID("m1 <= m2 && m1 != 0");
694 if (clock->m < limit->m.min || limit->m.max < clock->m)
695 GMA_PLL_INVALID("m out of range");
696 if (clock->n < limit->n.min || limit->n.max < clock->n)
697 GMA_PLL_INVALID("n out of range");
698 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
699 GMA_PLL_INVALID("vco out of range");
700
701
702
703
704 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
705 GMA_PLL_INVALID("dot out of range");
706
707 return true;
708}
709
710bool gma_find_best_pll(const struct gma_limit_t *limit,
711 struct drm_crtc *crtc, int target, int refclk,
712 struct gma_clock_t *best_clock)
713{
714 struct drm_device *dev = crtc->dev;
715 const struct gma_clock_funcs *clock_funcs =
716 to_gma_crtc(crtc)->clock_funcs;
717 struct gma_clock_t clock;
718 int err = target;
719
720 if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
721 (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
722
723
724
725
726
727
728 if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
729 LVDS_CLKB_POWER_UP)
730 clock.p2 = limit->p2.p2_fast;
731 else
732 clock.p2 = limit->p2.p2_slow;
733 } else {
734 if (target < limit->p2.dot_limit)
735 clock.p2 = limit->p2.p2_slow;
736 else
737 clock.p2 = limit->p2.p2_fast;
738 }
739
740 memset(best_clock, 0, sizeof(*best_clock));
741
742
743 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
744 for (clock.m2 = limit->m2.min;
745 (clock.m2 < clock.m1 || clock.m1 == 0) &&
746 clock.m2 <= limit->m2.max; clock.m2++) {
747 for (clock.n = limit->n.min;
748 clock.n <= limit->n.max; clock.n++) {
749 for (clock.p1 = limit->p1.min;
750 clock.p1 <= limit->p1.max;
751 clock.p1++) {
752 int this_err;
753
754 clock_funcs->clock(refclk, &clock);
755
756 if (!clock_funcs->pll_is_valid(crtc,
757 limit, &clock))
758 continue;
759
760 this_err = abs(clock.dot - target);
761 if (this_err < err) {
762 *best_clock = clock;
763 err = this_err;
764 }
765 }
766 }
767 }
768 }
769
770 return err != target;
771}
772