linux/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
<<
>>
Prefs
   1/**************************************************************************
   2 *
   3 * Copyright © 2007 David Airlie
   4 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
   5 * All Rights Reserved.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a
   8 * copy of this software and associated documentation files (the
   9 * "Software"), to deal in the Software without restriction, including
  10 * without limitation the rights to use, copy, modify, merge, publish,
  11 * distribute, sub license, and/or sell copies of the Software, and to
  12 * permit persons to whom the Software is furnished to do so, subject to
  13 * the following conditions:
  14 *
  15 * The above copyright notice and this permission notice (including the
  16 * next paragraph) shall be included in all copies or substantial portions
  17 * of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 **************************************************************************/
  28
  29#include <linux/export.h>
  30
  31#include <drm/drmP.h>
  32#include "vmwgfx_drv.h"
  33
  34#include <drm/ttm/ttm_placement.h>
  35
  36#define VMW_DIRTY_DELAY (HZ / 30)
  37
  38struct vmw_fb_par {
  39        struct vmw_private *vmw_priv;
  40
  41        void *vmalloc;
  42
  43        struct vmw_dma_buffer *vmw_bo;
  44        struct ttm_bo_kmap_obj map;
  45
  46        u32 pseudo_palette[17];
  47
  48        unsigned depth;
  49        unsigned bpp;
  50
  51        unsigned max_width;
  52        unsigned max_height;
  53
  54        void *bo_ptr;
  55        unsigned bo_size;
  56        bool bo_iowrite;
  57
  58        struct {
  59                spinlock_t lock;
  60                bool active;
  61                unsigned x1;
  62                unsigned y1;
  63                unsigned x2;
  64                unsigned y2;
  65        } dirty;
  66};
  67
  68static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
  69                            unsigned blue, unsigned transp,
  70                            struct fb_info *info)
  71{
  72        struct vmw_fb_par *par = info->par;
  73        u32 *pal = par->pseudo_palette;
  74
  75        if (regno > 15) {
  76                DRM_ERROR("Bad regno %u.\n", regno);
  77                return 1;
  78        }
  79
  80        switch (par->depth) {
  81        case 24:
  82        case 32:
  83                pal[regno] = ((red & 0xff00) << 8) |
  84                              (green & 0xff00) |
  85                             ((blue  & 0xff00) >> 8);
  86                break;
  87        default:
  88                DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp);
  89                return 1;
  90        }
  91
  92        return 0;
  93}
  94
  95static int vmw_fb_check_var(struct fb_var_screeninfo *var,
  96                            struct fb_info *info)
  97{
  98        int depth = var->bits_per_pixel;
  99        struct vmw_fb_par *par = info->par;
 100        struct vmw_private *vmw_priv = par->vmw_priv;
 101
 102        switch (var->bits_per_pixel) {
 103        case 32:
 104                depth = (var->transp.length > 0) ? 32 : 24;
 105                break;
 106        default:
 107                DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
 108                return -EINVAL;
 109        }
 110
 111        switch (depth) {
 112        case 24:
 113                var->red.offset = 16;
 114                var->green.offset = 8;
 115                var->blue.offset = 0;
 116                var->red.length = 8;
 117                var->green.length = 8;
 118                var->blue.length = 8;
 119                var->transp.length = 0;
 120                var->transp.offset = 0;
 121                break;
 122        case 32:
 123                var->red.offset = 16;
 124                var->green.offset = 8;
 125                var->blue.offset = 0;
 126                var->red.length = 8;
 127                var->green.length = 8;
 128                var->blue.length = 8;
 129                var->transp.length = 8;
 130                var->transp.offset = 24;
 131                break;
 132        default:
 133                DRM_ERROR("Bad depth %u.\n", depth);
 134                return -EINVAL;
 135        }
 136
 137        if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
 138            (var->xoffset != 0 || var->yoffset != 0)) {
 139                DRM_ERROR("Can not handle panning without display topology\n");
 140                return -EINVAL;
 141        }
 142
 143        if ((var->xoffset + var->xres) > par->max_width ||
 144            (var->yoffset + var->yres) > par->max_height) {
 145                DRM_ERROR("Requested geom can not fit in framebuffer\n");
 146                return -EINVAL;
 147        }
 148
 149        if (!vmw_kms_validate_mode_vram(vmw_priv,
 150                                        info->fix.line_length,
 151                                        var->yoffset + var->yres)) {
 152                DRM_ERROR("Requested geom can not fit in framebuffer\n");
 153                return -EINVAL;
 154        }
 155
 156        return 0;
 157}
 158
 159static int vmw_fb_set_par(struct fb_info *info)
 160{
 161        struct vmw_fb_par *par = info->par;
 162        struct vmw_private *vmw_priv = par->vmw_priv;
 163        int ret;
 164
 165        ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
 166                                 info->fix.line_length,
 167                                 par->bpp, par->depth);
 168        if (ret)
 169                return ret;
 170
 171        if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
 172                /* TODO check if pitch and offset changes */
 173                vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
 174                vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
 175                vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
 176                vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
 177                vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
 178                vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
 179                vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
 180                vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
 181        }
 182
 183        /* This is really helpful since if this fails the user
 184         * can probably not see anything on the screen.
 185         */
 186        WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0);
 187
 188        return 0;
 189}
 190
 191static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
 192                              struct fb_info *info)
 193{
 194        return 0;
 195}
 196
 197static int vmw_fb_blank(int blank, struct fb_info *info)
 198{
 199        return 0;
 200}
 201
 202/*
 203 * Dirty code
 204 */
 205
 206static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
 207{
 208        struct vmw_private *vmw_priv = par->vmw_priv;
 209        struct fb_info *info = vmw_priv->fb_info;
 210        int stride = (info->fix.line_length / 4);
 211        int *src = (int *)info->screen_base;
 212        __le32 __iomem *vram_mem = par->bo_ptr;
 213        unsigned long flags;
 214        unsigned x, y, w, h;
 215        int i, k;
 216        struct {
 217                uint32_t header;
 218                SVGAFifoCmdUpdate body;
 219        } *cmd;
 220
 221        if (vmw_priv->suspended)
 222                return;
 223
 224        spin_lock_irqsave(&par->dirty.lock, flags);
 225        if (!par->dirty.active) {
 226                spin_unlock_irqrestore(&par->dirty.lock, flags);
 227                return;
 228        }
 229        x = par->dirty.x1;
 230        y = par->dirty.y1;
 231        w = min(par->dirty.x2, info->var.xres) - x;
 232        h = min(par->dirty.y2, info->var.yres) - y;
 233        par->dirty.x1 = par->dirty.x2 = 0;
 234        par->dirty.y1 = par->dirty.y2 = 0;
 235        spin_unlock_irqrestore(&par->dirty.lock, flags);
 236
 237        for (i = y * stride; i < info->fix.smem_len / 4; i += stride) {
 238                for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++)
 239                        iowrite32(src[k], vram_mem + k);
 240        }
 241
 242#if 0
 243        DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h);
 244#endif
 245
 246        cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd));
 247        if (unlikely(cmd == NULL)) {
 248                DRM_ERROR("Fifo reserve failed.\n");
 249                return;
 250        }
 251
 252        cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
 253        cmd->body.x = cpu_to_le32(x);
 254        cmd->body.y = cpu_to_le32(y);
 255        cmd->body.width = cpu_to_le32(w);
 256        cmd->body.height = cpu_to_le32(h);
 257        vmw_fifo_commit(vmw_priv, sizeof(*cmd));
 258}
 259
 260static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
 261                              unsigned x1, unsigned y1,
 262                              unsigned width, unsigned height)
 263{
 264        struct fb_info *info = par->vmw_priv->fb_info;
 265        unsigned long flags;
 266        unsigned x2 = x1 + width;
 267        unsigned y2 = y1 + height;
 268
 269        spin_lock_irqsave(&par->dirty.lock, flags);
 270        if (par->dirty.x1 == par->dirty.x2) {
 271                par->dirty.x1 = x1;
 272                par->dirty.y1 = y1;
 273                par->dirty.x2 = x2;
 274                par->dirty.y2 = y2;
 275                /* if we are active start the dirty work
 276                 * we share the work with the defio system */
 277                if (par->dirty.active)
 278                        schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY);
 279        } else {
 280                if (x1 < par->dirty.x1)
 281                        par->dirty.x1 = x1;
 282                if (y1 < par->dirty.y1)
 283                        par->dirty.y1 = y1;
 284                if (x2 > par->dirty.x2)
 285                        par->dirty.x2 = x2;
 286                if (y2 > par->dirty.y2)
 287                        par->dirty.y2 = y2;
 288        }
 289        spin_unlock_irqrestore(&par->dirty.lock, flags);
 290}
 291
 292static void vmw_deferred_io(struct fb_info *info,
 293                            struct list_head *pagelist)
 294{
 295        struct vmw_fb_par *par = info->par;
 296        unsigned long start, end, min, max;
 297        unsigned long flags;
 298        struct page *page;
 299        int y1, y2;
 300
 301        min = ULONG_MAX;
 302        max = 0;
 303        list_for_each_entry(page, pagelist, lru) {
 304                start = page->index << PAGE_SHIFT;
 305                end = start + PAGE_SIZE - 1;
 306                min = min(min, start);
 307                max = max(max, end);
 308        }
 309
 310        if (min < max) {
 311                y1 = min / info->fix.line_length;
 312                y2 = (max / info->fix.line_length) + 1;
 313
 314                spin_lock_irqsave(&par->dirty.lock, flags);
 315                par->dirty.x1 = 0;
 316                par->dirty.y1 = y1;
 317                par->dirty.x2 = info->var.xres;
 318                par->dirty.y2 = y2;
 319                spin_unlock_irqrestore(&par->dirty.lock, flags);
 320        }
 321
 322        vmw_fb_dirty_flush(par);
 323};
 324
 325struct fb_deferred_io vmw_defio = {
 326        .delay          = VMW_DIRTY_DELAY,
 327        .deferred_io    = vmw_deferred_io,
 328};
 329
 330/*
 331 * Draw code
 332 */
 333
 334static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
 335{
 336        cfb_fillrect(info, rect);
 337        vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
 338                          rect->width, rect->height);
 339}
 340
 341static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
 342{
 343        cfb_copyarea(info, region);
 344        vmw_fb_dirty_mark(info->par, region->dx, region->dy,
 345                          region->width, region->height);
 346}
 347
 348static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
 349{
 350        cfb_imageblit(info, image);
 351        vmw_fb_dirty_mark(info->par, image->dx, image->dy,
 352                          image->width, image->height);
 353}
 354
 355/*
 356 * Bring up code
 357 */
 358
 359static struct fb_ops vmw_fb_ops = {
 360        .owner = THIS_MODULE,
 361        .fb_check_var = vmw_fb_check_var,
 362        .fb_set_par = vmw_fb_set_par,
 363        .fb_setcolreg = vmw_fb_setcolreg,
 364        .fb_fillrect = vmw_fb_fillrect,
 365        .fb_copyarea = vmw_fb_copyarea,
 366        .fb_imageblit = vmw_fb_imageblit,
 367        .fb_pan_display = vmw_fb_pan_display,
 368        .fb_blank = vmw_fb_blank,
 369};
 370
 371static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
 372                            size_t size, struct vmw_dma_buffer **out)
 373{
 374        struct vmw_dma_buffer *vmw_bo;
 375        struct ttm_placement ne_placement = vmw_vram_ne_placement;
 376        int ret;
 377
 378        ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 379
 380        /* interuptable? */
 381        ret = ttm_write_lock(&vmw_priv->fbdev_master.lock, false);
 382        if (unlikely(ret != 0))
 383                return ret;
 384
 385        vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
 386        if (!vmw_bo)
 387                goto err_unlock;
 388
 389        ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
 390                              &ne_placement,
 391                              false,
 392                              &vmw_dmabuf_bo_free);
 393        if (unlikely(ret != 0))
 394                goto err_unlock; /* init frees the buffer on failure */
 395
 396        *out = vmw_bo;
 397
 398        ttm_write_unlock(&vmw_priv->fbdev_master.lock);
 399
 400        return 0;
 401
 402err_unlock:
 403        ttm_write_unlock(&vmw_priv->fbdev_master.lock);
 404        return ret;
 405}
 406
 407int vmw_fb_init(struct vmw_private *vmw_priv)
 408{
 409        struct device *device = &vmw_priv->dev->pdev->dev;
 410        struct vmw_fb_par *par;
 411        struct fb_info *info;
 412        unsigned initial_width, initial_height;
 413        unsigned fb_width, fb_height;
 414        unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
 415        int ret;
 416
 417        fb_bpp = 32;
 418        fb_depth = 24;
 419
 420        /* XXX As shouldn't these be as well. */
 421        fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
 422        fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
 423
 424        initial_width = min(vmw_priv->initial_width, fb_width);
 425        initial_height = min(vmw_priv->initial_height, fb_height);
 426
 427        fb_pitch = fb_width * fb_bpp / 8;
 428        fb_size = fb_pitch * fb_height;
 429        fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
 430
 431        info = framebuffer_alloc(sizeof(*par), device);
 432        if (!info)
 433                return -ENOMEM;
 434
 435        /*
 436         * Par
 437         */
 438        vmw_priv->fb_info = info;
 439        par = info->par;
 440        par->vmw_priv = vmw_priv;
 441        par->depth = fb_depth;
 442        par->bpp = fb_bpp;
 443        par->vmalloc = NULL;
 444        par->max_width = fb_width;
 445        par->max_height = fb_height;
 446
 447        /*
 448         * Create buffers and alloc memory
 449         */
 450        par->vmalloc = vmalloc(fb_size);
 451        if (unlikely(par->vmalloc == NULL)) {
 452                ret = -ENOMEM;
 453                goto err_free;
 454        }
 455
 456        ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
 457        if (unlikely(ret != 0))
 458                goto err_free;
 459
 460        ret = ttm_bo_kmap(&par->vmw_bo->base,
 461                          0,
 462                          par->vmw_bo->base.num_pages,
 463                          &par->map);
 464        if (unlikely(ret != 0))
 465                goto err_unref;
 466        par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
 467        par->bo_size = fb_size;
 468
 469        /*
 470         * Fixed and var
 471         */
 472        strcpy(info->fix.id, "svgadrmfb");
 473        info->fix.type = FB_TYPE_PACKED_PIXELS;
 474        info->fix.visual = FB_VISUAL_TRUECOLOR;
 475        info->fix.type_aux = 0;
 476        info->fix.xpanstep = 1; /* doing it in hw */
 477        info->fix.ypanstep = 1; /* doing it in hw */
 478        info->fix.ywrapstep = 0;
 479        info->fix.accel = FB_ACCEL_NONE;
 480        info->fix.line_length = fb_pitch;
 481
 482        info->fix.smem_start = 0;
 483        info->fix.smem_len = fb_size;
 484
 485        info->pseudo_palette = par->pseudo_palette;
 486        info->screen_base = par->vmalloc;
 487        info->screen_size = fb_size;
 488
 489        info->flags = FBINFO_DEFAULT;
 490        info->fbops = &vmw_fb_ops;
 491
 492        /* 24 depth per default */
 493        info->var.red.offset = 16;
 494        info->var.green.offset = 8;
 495        info->var.blue.offset = 0;
 496        info->var.red.length = 8;
 497        info->var.green.length = 8;
 498        info->var.blue.length = 8;
 499        info->var.transp.offset = 0;
 500        info->var.transp.length = 0;
 501
 502        info->var.xres_virtual = fb_width;
 503        info->var.yres_virtual = fb_height;
 504        info->var.bits_per_pixel = par->bpp;
 505        info->var.xoffset = 0;
 506        info->var.yoffset = 0;
 507        info->var.activate = FB_ACTIVATE_NOW;
 508        info->var.height = -1;
 509        info->var.width = -1;
 510
 511        info->var.xres = initial_width;
 512        info->var.yres = initial_height;
 513
 514        /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
 515
 516        info->apertures = alloc_apertures(1);
 517        if (!info->apertures) {
 518                ret = -ENOMEM;
 519                goto err_aper;
 520        }
 521        info->apertures->ranges[0].base = vmw_priv->vram_start;
 522        info->apertures->ranges[0].size = vmw_priv->vram_size;
 523
 524        /*
 525         * Dirty & Deferred IO
 526         */
 527        par->dirty.x1 = par->dirty.x2 = 0;
 528        par->dirty.y1 = par->dirty.y2 = 0;
 529        par->dirty.active = true;
 530        spin_lock_init(&par->dirty.lock);
 531        info->fbdefio = &vmw_defio;
 532        fb_deferred_io_init(info);
 533
 534        ret = register_framebuffer(info);
 535        if (unlikely(ret != 0))
 536                goto err_defio;
 537
 538        return 0;
 539
 540err_defio:
 541        fb_deferred_io_cleanup(info);
 542err_aper:
 543        ttm_bo_kunmap(&par->map);
 544err_unref:
 545        ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
 546err_free:
 547        vfree(par->vmalloc);
 548        framebuffer_release(info);
 549        vmw_priv->fb_info = NULL;
 550
 551        return ret;
 552}
 553
 554int vmw_fb_close(struct vmw_private *vmw_priv)
 555{
 556        struct fb_info *info;
 557        struct vmw_fb_par *par;
 558        struct ttm_buffer_object *bo;
 559
 560        if (!vmw_priv->fb_info)
 561                return 0;
 562
 563        info = vmw_priv->fb_info;
 564        par = info->par;
 565        bo = &par->vmw_bo->base;
 566        par->vmw_bo = NULL;
 567
 568        /* ??? order */
 569        fb_deferred_io_cleanup(info);
 570        unregister_framebuffer(info);
 571
 572        ttm_bo_kunmap(&par->map);
 573        ttm_bo_unref(&bo);
 574
 575        vfree(par->vmalloc);
 576        framebuffer_release(info);
 577
 578        return 0;
 579}
 580
 581int vmw_fb_off(struct vmw_private *vmw_priv)
 582{
 583        struct fb_info *info;
 584        struct vmw_fb_par *par;
 585        unsigned long flags;
 586
 587        if (!vmw_priv->fb_info)
 588                return -EINVAL;
 589
 590        info = vmw_priv->fb_info;
 591        par = info->par;
 592
 593        spin_lock_irqsave(&par->dirty.lock, flags);
 594        par->dirty.active = false;
 595        spin_unlock_irqrestore(&par->dirty.lock, flags);
 596
 597        flush_delayed_work(&info->deferred_work);
 598
 599        par->bo_ptr = NULL;
 600        ttm_bo_kunmap(&par->map);
 601
 602        vmw_dmabuf_unpin(vmw_priv, par->vmw_bo, false);
 603
 604        return 0;
 605}
 606
 607int vmw_fb_on(struct vmw_private *vmw_priv)
 608{
 609        struct fb_info *info;
 610        struct vmw_fb_par *par;
 611        unsigned long flags;
 612        bool dummy;
 613        int ret;
 614
 615        if (!vmw_priv->fb_info)
 616                return -EINVAL;
 617
 618        info = vmw_priv->fb_info;
 619        par = info->par;
 620
 621        /* we are already active */
 622        if (par->bo_ptr != NULL)
 623                return 0;
 624
 625        /* Make sure that all overlays are stoped when we take over */
 626        vmw_overlay_stop_all(vmw_priv);
 627
 628        ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo, true, false);
 629        if (unlikely(ret != 0)) {
 630                DRM_ERROR("could not move buffer to start of VRAM\n");
 631                goto err_no_buffer;
 632        }
 633
 634        ret = ttm_bo_kmap(&par->vmw_bo->base,
 635                          0,
 636                          par->vmw_bo->base.num_pages,
 637                          &par->map);
 638        BUG_ON(ret != 0);
 639        par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
 640
 641        spin_lock_irqsave(&par->dirty.lock, flags);
 642        par->dirty.active = true;
 643        spin_unlock_irqrestore(&par->dirty.lock, flags);
 644
 645err_no_buffer:
 646        vmw_fb_set_par(info);
 647
 648        vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
 649
 650        /* If there already was stuff dirty we wont
 651         * schedule a new work, so lets do it now */
 652        schedule_delayed_work(&info->deferred_work, 0);
 653
 654        return 0;
 655}
 656