linux/drivers/gpu/drm/gud/gud_pipe.c
<<
>>
Prefs
   1// SPDX-License-Identifier: MIT
   2/*
   3 * Copyright 2020 Noralf Trønnes
   4 */
   5
   6#include <linux/lz4.h>
   7#include <linux/usb.h>
   8#include <linux/workqueue.h>
   9
  10#include <drm/drm_atomic.h>
  11#include <drm/drm_connector.h>
  12#include <drm/drm_damage_helper.h>
  13#include <drm/drm_drv.h>
  14#include <drm/drm_format_helper.h>
  15#include <drm/drm_fourcc.h>
  16#include <drm/drm_framebuffer.h>
  17#include <drm/drm_gem.h>
  18#include <drm/drm_gem_framebuffer_helper.h>
  19#include <drm/drm_print.h>
  20#include <drm/drm_rect.h>
  21#include <drm/drm_simple_kms_helper.h>
  22#include <drm/gud.h>
  23
  24#include "gud_internal.h"
  25
  26/*
  27 * Some userspace rendering loops runs all displays in the same loop.
  28 * This means that a fast display will have to wait for a slow one.
  29 * For this reason gud does flushing asynchronous by default.
  30 * The down side is that in e.g. a single display setup userspace thinks
  31 * the display is insanely fast since the driver reports back immediately
  32 * that the flush/pageflip is done. This wastes CPU and power.
  33 * Such users might want to set this module parameter to false.
  34 */
  35static bool gud_async_flush = true;
  36module_param_named(async_flush, gud_async_flush, bool, 0644);
  37MODULE_PARM_DESC(async_flush, "Enable asynchronous flushing [default=true]");
  38
  39/*
  40 * FIXME: The driver is probably broken on Big Endian machines.
  41 * See discussion:
  42 * https://lore.kernel.org/dri-devel/CAKb7UvihLX0hgBOP3VBG7O+atwZcUVCPVuBdfmDMpg0NjXe-cQ@mail.gmail.com/
  43 */
  44
  45static bool gud_is_big_endian(void)
  46{
  47#if defined(__BIG_ENDIAN)
  48        return true;
  49#else
  50        return false;
  51#endif
  52}
  53
  54static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format,
  55                                   void *src, struct drm_framebuffer *fb,
  56                                   struct drm_rect *rect)
  57{
  58        unsigned int block_width = drm_format_info_block_width(format, 0);
  59        unsigned int bits_per_pixel = 8 / block_width;
  60        unsigned int x, y, width, height;
  61        u8 pix, *pix8, *block = dst; /* Assign to silence compiler warning */
  62        size_t len;
  63        void *buf;
  64
  65        WARN_ON_ONCE(format->char_per_block[0] != 1);
  66
  67        /* Start on a byte boundary */
  68        rect->x1 = ALIGN_DOWN(rect->x1, block_width);
  69        width = drm_rect_width(rect);
  70        height = drm_rect_height(rect);
  71        len = drm_format_info_min_pitch(format, 0, width) * height;
  72
  73        buf = kmalloc(width * height, GFP_KERNEL);
  74        if (!buf)
  75                return 0;
  76
  77        drm_fb_xrgb8888_to_gray8(buf, 0, src, fb, rect);
  78        pix8 = buf;
  79
  80        for (y = 0; y < height; y++) {
  81                for (x = 0; x < width; x++) {
  82                        unsigned int pixpos = x % block_width; /* within byte from the left */
  83                        unsigned int pixshift = (block_width - pixpos - 1) * bits_per_pixel;
  84
  85                        if (!pixpos) {
  86                                block = dst++;
  87                                *block = 0;
  88                        }
  89
  90                        pix = (*pix8++) >> (8 - bits_per_pixel);
  91                        *block |= pix << pixshift;
  92                }
  93        }
  94
  95        kfree(buf);
  96
  97        return len;
  98}
  99
 100static size_t gud_xrgb8888_to_color(u8 *dst, const struct drm_format_info *format,
 101                                    void *src, struct drm_framebuffer *fb,
 102                                    struct drm_rect *rect)
 103{
 104        unsigned int block_width = drm_format_info_block_width(format, 0);
 105        unsigned int bits_per_pixel = 8 / block_width;
 106        u8 r, g, b, pix, *block = dst; /* Assign to silence compiler warning */
 107        unsigned int x, y, width;
 108        u32 *pix32;
 109        size_t len;
 110
 111        /* Start on a byte boundary */
 112        rect->x1 = ALIGN_DOWN(rect->x1, block_width);
 113        width = drm_rect_width(rect);
 114        len = drm_format_info_min_pitch(format, 0, width) * drm_rect_height(rect);
 115
 116        for (y = rect->y1; y < rect->y2; y++) {
 117                pix32 = src + (y * fb->pitches[0]);
 118                pix32 += rect->x1;
 119
 120                for (x = 0; x < width; x++) {
 121                        unsigned int pixpos = x % block_width; /* within byte from the left */
 122                        unsigned int pixshift = (block_width - pixpos - 1) * bits_per_pixel;
 123
 124                        if (!pixpos) {
 125                                block = dst++;
 126                                *block = 0;
 127                        }
 128
 129                        r = *pix32 >> 16;
 130                        g = *pix32 >> 8;
 131                        b = *pix32++;
 132
 133                        switch (format->format) {
 134                        case GUD_DRM_FORMAT_XRGB1111:
 135                                pix = ((r >> 7) << 2) | ((g >> 7) << 1) | (b >> 7);
 136                                break;
 137                        default:
 138                                WARN_ON_ONCE(1);
 139                                return len;
 140                        }
 141
 142                        *block |= pix << pixshift;
 143                }
 144        }
 145
 146        return len;
 147}
 148
 149static int gud_prep_flush(struct gud_device *gdrm, struct drm_framebuffer *fb,
 150                          const struct drm_format_info *format, struct drm_rect *rect,
 151                          struct gud_set_buffer_req *req)
 152{
 153        struct dma_buf_attachment *import_attach = fb->obj[0]->import_attach;
 154        u8 compression = gdrm->compression;
 155        struct iosys_map map[DRM_FORMAT_MAX_PLANES];
 156        struct iosys_map map_data[DRM_FORMAT_MAX_PLANES];
 157        void *vaddr, *buf;
 158        size_t pitch, len;
 159        int ret = 0;
 160
 161        pitch = drm_format_info_min_pitch(format, 0, drm_rect_width(rect));
 162        len = pitch * drm_rect_height(rect);
 163        if (len > gdrm->bulk_len)
 164                return -E2BIG;
 165
 166        ret = drm_gem_fb_vmap(fb, map, map_data);
 167        if (ret)
 168                return ret;
 169
 170        vaddr = map_data[0].vaddr;
 171
 172        ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
 173        if (ret)
 174                goto vunmap;
 175retry:
 176        if (compression)
 177                buf = gdrm->compress_buf;
 178        else
 179                buf = gdrm->bulk_buf;
 180
 181        /*
 182         * Imported buffers are assumed to be write-combined and thus uncached
 183         * with slow reads (at least on ARM).
 184         */
 185        if (format != fb->format) {
 186                if (format->format == GUD_DRM_FORMAT_R1) {
 187                        len = gud_xrgb8888_to_r124(buf, format, vaddr, fb, rect);
 188                        if (!len) {
 189                                ret = -ENOMEM;
 190                                goto end_cpu_access;
 191                        }
 192                } else if (format->format == DRM_FORMAT_R8) {
 193                        drm_fb_xrgb8888_to_gray8(buf, 0, vaddr, fb, rect);
 194                } else if (format->format == DRM_FORMAT_RGB332) {
 195                        drm_fb_xrgb8888_to_rgb332(buf, 0, vaddr, fb, rect);
 196                } else if (format->format == DRM_FORMAT_RGB565) {
 197                        drm_fb_xrgb8888_to_rgb565(buf, 0, vaddr, fb, rect, gud_is_big_endian());
 198                } else if (format->format == DRM_FORMAT_RGB888) {
 199                        drm_fb_xrgb8888_to_rgb888(buf, 0, vaddr, fb, rect);
 200                } else {
 201                        len = gud_xrgb8888_to_color(buf, format, vaddr, fb, rect);
 202                }
 203        } else if (gud_is_big_endian() && format->cpp[0] > 1) {
 204                drm_fb_swab(buf, 0, vaddr, fb, rect, !import_attach);
 205        } else if (compression && !import_attach && pitch == fb->pitches[0]) {
 206                /* can compress directly from the framebuffer */
 207                buf = vaddr + rect->y1 * pitch;
 208        } else {
 209                drm_fb_memcpy(buf, 0, vaddr, fb, rect);
 210        }
 211
 212        memset(req, 0, sizeof(*req));
 213        req->x = cpu_to_le32(rect->x1);
 214        req->y = cpu_to_le32(rect->y1);
 215        req->width = cpu_to_le32(drm_rect_width(rect));
 216        req->height = cpu_to_le32(drm_rect_height(rect));
 217        req->length = cpu_to_le32(len);
 218
 219        if (compression & GUD_COMPRESSION_LZ4) {
 220                int complen;
 221
 222                complen = LZ4_compress_default(buf, gdrm->bulk_buf, len, len, gdrm->lz4_comp_mem);
 223                if (complen <= 0) {
 224                        compression = 0;
 225                        goto retry;
 226                }
 227
 228                req->compression = GUD_COMPRESSION_LZ4;
 229                req->compressed_length = cpu_to_le32(complen);
 230        }
 231
 232end_cpu_access:
 233        drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
 234vunmap:
 235        drm_gem_fb_vunmap(fb, map);
 236
 237        return ret;
 238}
 239
 240struct gud_usb_bulk_context {
 241        struct timer_list timer;
 242        struct usb_sg_request sgr;
 243};
 244
 245static void gud_usb_bulk_timeout(struct timer_list *t)
 246{
 247        struct gud_usb_bulk_context *ctx = from_timer(ctx, t, timer);
 248
 249        usb_sg_cancel(&ctx->sgr);
 250}
 251
 252static int gud_usb_bulk(struct gud_device *gdrm, size_t len)
 253{
 254        struct gud_usb_bulk_context ctx;
 255        int ret;
 256
 257        ret = usb_sg_init(&ctx.sgr, gud_to_usb_device(gdrm), gdrm->bulk_pipe, 0,
 258                          gdrm->bulk_sgt.sgl, gdrm->bulk_sgt.nents, len, GFP_KERNEL);
 259        if (ret)
 260                return ret;
 261
 262        timer_setup_on_stack(&ctx.timer, gud_usb_bulk_timeout, 0);
 263        mod_timer(&ctx.timer, jiffies + msecs_to_jiffies(3000));
 264
 265        usb_sg_wait(&ctx.sgr);
 266
 267        if (!del_timer_sync(&ctx.timer))
 268                ret = -ETIMEDOUT;
 269        else if (ctx.sgr.status < 0)
 270                ret = ctx.sgr.status;
 271        else if (ctx.sgr.bytes != len)
 272                ret = -EIO;
 273
 274        destroy_timer_on_stack(&ctx.timer);
 275
 276        return ret;
 277}
 278
 279static int gud_flush_rect(struct gud_device *gdrm, struct drm_framebuffer *fb,
 280                          const struct drm_format_info *format, struct drm_rect *rect)
 281{
 282        struct gud_set_buffer_req req;
 283        size_t len, trlen;
 284        int ret;
 285
 286        drm_dbg(&gdrm->drm, "Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
 287
 288        ret = gud_prep_flush(gdrm, fb, format, rect, &req);
 289        if (ret)
 290                return ret;
 291
 292        len = le32_to_cpu(req.length);
 293
 294        if (req.compression)
 295                trlen = le32_to_cpu(req.compressed_length);
 296        else
 297                trlen = len;
 298
 299        gdrm->stats_length += len;
 300        /* Did it wrap around? */
 301        if (gdrm->stats_length <= len && gdrm->stats_actual_length) {
 302                gdrm->stats_length = len;
 303                gdrm->stats_actual_length = 0;
 304        }
 305        gdrm->stats_actual_length += trlen;
 306
 307        if (!(gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE) || gdrm->prev_flush_failed) {
 308                ret = gud_usb_set(gdrm, GUD_REQ_SET_BUFFER, 0, &req, sizeof(req));
 309                if (ret)
 310                        return ret;
 311        }
 312
 313        ret = gud_usb_bulk(gdrm, trlen);
 314        if (ret)
 315                gdrm->stats_num_errors++;
 316
 317        return ret;
 318}
 319
 320void gud_clear_damage(struct gud_device *gdrm)
 321{
 322        gdrm->damage.x1 = INT_MAX;
 323        gdrm->damage.y1 = INT_MAX;
 324        gdrm->damage.x2 = 0;
 325        gdrm->damage.y2 = 0;
 326}
 327
 328static void gud_add_damage(struct gud_device *gdrm, struct drm_rect *damage)
 329{
 330        gdrm->damage.x1 = min(gdrm->damage.x1, damage->x1);
 331        gdrm->damage.y1 = min(gdrm->damage.y1, damage->y1);
 332        gdrm->damage.x2 = max(gdrm->damage.x2, damage->x2);
 333        gdrm->damage.y2 = max(gdrm->damage.y2, damage->y2);
 334}
 335
 336static void gud_retry_failed_flush(struct gud_device *gdrm, struct drm_framebuffer *fb,
 337                                   struct drm_rect *damage)
 338{
 339        /*
 340         * pipe_update waits for the worker when the display mode is going to change.
 341         * This ensures that the width and height is still the same making it safe to
 342         * add back the damage.
 343         */
 344
 345        mutex_lock(&gdrm->damage_lock);
 346        if (!gdrm->fb) {
 347                drm_framebuffer_get(fb);
 348                gdrm->fb = fb;
 349        }
 350        gud_add_damage(gdrm, damage);
 351        mutex_unlock(&gdrm->damage_lock);
 352
 353        /* Retry only once to avoid a possible storm in case of continues errors. */
 354        if (!gdrm->prev_flush_failed)
 355                queue_work(system_long_wq, &gdrm->work);
 356        gdrm->prev_flush_failed = true;
 357}
 358
 359void gud_flush_work(struct work_struct *work)
 360{
 361        struct gud_device *gdrm = container_of(work, struct gud_device, work);
 362        const struct drm_format_info *format;
 363        struct drm_framebuffer *fb;
 364        struct drm_rect damage;
 365        unsigned int i, lines;
 366        int idx, ret = 0;
 367        size_t pitch;
 368
 369        if (!drm_dev_enter(&gdrm->drm, &idx))
 370                return;
 371
 372        mutex_lock(&gdrm->damage_lock);
 373        fb = gdrm->fb;
 374        gdrm->fb = NULL;
 375        damage = gdrm->damage;
 376        gud_clear_damage(gdrm);
 377        mutex_unlock(&gdrm->damage_lock);
 378
 379        if (!fb)
 380                goto out;
 381
 382        format = fb->format;
 383        if (format->format == DRM_FORMAT_XRGB8888 && gdrm->xrgb8888_emulation_format)
 384                format = gdrm->xrgb8888_emulation_format;
 385
 386        /* Split update if it's too big */
 387        pitch = drm_format_info_min_pitch(format, 0, drm_rect_width(&damage));
 388        lines = drm_rect_height(&damage);
 389
 390        if (gdrm->bulk_len < lines * pitch)
 391                lines = gdrm->bulk_len / pitch;
 392
 393        for (i = 0; i < DIV_ROUND_UP(drm_rect_height(&damage), lines); i++) {
 394                struct drm_rect rect = damage;
 395
 396                rect.y1 += i * lines;
 397                rect.y2 = min_t(u32, rect.y1 + lines, damage.y2);
 398
 399                ret = gud_flush_rect(gdrm, fb, format, &rect);
 400                if (ret) {
 401                        if (ret != -ENODEV && ret != -ECONNRESET &&
 402                            ret != -ESHUTDOWN && ret != -EPROTO) {
 403                                bool prev_flush_failed = gdrm->prev_flush_failed;
 404
 405                                gud_retry_failed_flush(gdrm, fb, &damage);
 406                                if (!prev_flush_failed)
 407                                        dev_err_ratelimited(fb->dev->dev,
 408                                                            "Failed to flush framebuffer: error=%d\n", ret);
 409                        }
 410                        break;
 411                }
 412
 413                gdrm->prev_flush_failed = false;
 414        }
 415
 416        drm_framebuffer_put(fb);
 417out:
 418        drm_dev_exit(idx);
 419}
 420
 421static void gud_fb_queue_damage(struct gud_device *gdrm, struct drm_framebuffer *fb,
 422                                struct drm_rect *damage)
 423{
 424        struct drm_framebuffer *old_fb = NULL;
 425
 426        mutex_lock(&gdrm->damage_lock);
 427
 428        if (fb != gdrm->fb) {
 429                old_fb = gdrm->fb;
 430                drm_framebuffer_get(fb);
 431                gdrm->fb = fb;
 432        }
 433
 434        gud_add_damage(gdrm, damage);
 435
 436        mutex_unlock(&gdrm->damage_lock);
 437
 438        queue_work(system_long_wq, &gdrm->work);
 439
 440        if (old_fb)
 441                drm_framebuffer_put(old_fb);
 442}
 443
 444int gud_pipe_check(struct drm_simple_display_pipe *pipe,
 445                   struct drm_plane_state *new_plane_state,
 446                   struct drm_crtc_state *new_crtc_state)
 447{
 448        struct gud_device *gdrm = to_gud_device(pipe->crtc.dev);
 449        struct drm_plane_state *old_plane_state = pipe->plane.state;
 450        const struct drm_display_mode *mode = &new_crtc_state->mode;
 451        struct drm_atomic_state *state = new_plane_state->state;
 452        struct drm_framebuffer *old_fb = old_plane_state->fb;
 453        struct drm_connector_state *connector_state = NULL;
 454        struct drm_framebuffer *fb = new_plane_state->fb;
 455        const struct drm_format_info *format = fb->format;
 456        struct drm_connector *connector;
 457        unsigned int i, num_properties;
 458        struct gud_state_req *req;
 459        int idx, ret;
 460        size_t len;
 461
 462        if (WARN_ON_ONCE(!fb))
 463                return -EINVAL;
 464
 465        if (old_plane_state->rotation != new_plane_state->rotation)
 466                new_crtc_state->mode_changed = true;
 467
 468        if (old_fb && old_fb->format != format)
 469                new_crtc_state->mode_changed = true;
 470
 471        if (!new_crtc_state->mode_changed && !new_crtc_state->connectors_changed)
 472                return 0;
 473
 474        /* Only one connector is supported */
 475        if (hweight32(new_crtc_state->connector_mask) != 1)
 476                return -EINVAL;
 477
 478        if (format->format == DRM_FORMAT_XRGB8888 && gdrm->xrgb8888_emulation_format)
 479                format = gdrm->xrgb8888_emulation_format;
 480
 481        for_each_new_connector_in_state(state, connector, connector_state, i) {
 482                if (connector_state->crtc)
 483                        break;
 484        }
 485
 486        /*
 487         * DRM_IOCTL_MODE_OBJ_SETPROPERTY on the rotation property will not have
 488         * the connector included in the state.
 489         */
 490        if (!connector_state) {
 491                struct drm_connector_list_iter conn_iter;
 492
 493                drm_connector_list_iter_begin(pipe->crtc.dev, &conn_iter);
 494                drm_for_each_connector_iter(connector, &conn_iter) {
 495                        if (connector->state->crtc) {
 496                                connector_state = connector->state;
 497                                break;
 498                        }
 499                }
 500                drm_connector_list_iter_end(&conn_iter);
 501        }
 502
 503        if (WARN_ON_ONCE(!connector_state))
 504                return -ENOENT;
 505
 506        len = struct_size(req, properties,
 507                          GUD_PROPERTIES_MAX_NUM + GUD_CONNECTOR_PROPERTIES_MAX_NUM);
 508        req = kzalloc(len, GFP_KERNEL);
 509        if (!req)
 510                return -ENOMEM;
 511
 512        gud_from_display_mode(&req->mode, mode);
 513
 514        req->format = gud_from_fourcc(format->format);
 515        if (WARN_ON_ONCE(!req->format)) {
 516                ret = -EINVAL;
 517                goto out;
 518        }
 519
 520        req->connector = drm_connector_index(connector_state->connector);
 521
 522        ret = gud_connector_fill_properties(connector_state, req->properties);
 523        if (ret < 0)
 524                goto out;
 525
 526        num_properties = ret;
 527        for (i = 0; i < gdrm->num_properties; i++) {
 528                u16 prop = gdrm->properties[i];
 529                u64 val;
 530
 531                switch (prop) {
 532                case GUD_PROPERTY_ROTATION:
 533                        /* DRM UAPI matches the protocol so use value directly */
 534                        val = new_plane_state->rotation;
 535                        break;
 536                default:
 537                        WARN_ON_ONCE(1);
 538                        ret = -EINVAL;
 539                        goto out;
 540                }
 541
 542                req->properties[num_properties + i].prop = cpu_to_le16(prop);
 543                req->properties[num_properties + i].val = cpu_to_le64(val);
 544                num_properties++;
 545        }
 546
 547        if (drm_dev_enter(fb->dev, &idx)) {
 548                len = struct_size(req, properties, num_properties);
 549                ret = gud_usb_set(gdrm, GUD_REQ_SET_STATE_CHECK, 0, req, len);
 550                drm_dev_exit(idx);
 551        }  else {
 552                ret = -ENODEV;
 553        }
 554out:
 555        kfree(req);
 556
 557        return ret;
 558}
 559
 560void gud_pipe_update(struct drm_simple_display_pipe *pipe,
 561                     struct drm_plane_state *old_state)
 562{
 563        struct drm_device *drm = pipe->crtc.dev;
 564        struct gud_device *gdrm = to_gud_device(drm);
 565        struct drm_plane_state *state = pipe->plane.state;
 566        struct drm_framebuffer *fb = state->fb;
 567        struct drm_crtc *crtc = &pipe->crtc;
 568        struct drm_rect damage;
 569        int idx;
 570
 571        if (crtc->state->mode_changed || !crtc->state->enable) {
 572                cancel_work_sync(&gdrm->work);
 573                mutex_lock(&gdrm->damage_lock);
 574                if (gdrm->fb) {
 575                        drm_framebuffer_put(gdrm->fb);
 576                        gdrm->fb = NULL;
 577                }
 578                gud_clear_damage(gdrm);
 579                mutex_unlock(&gdrm->damage_lock);
 580        }
 581
 582        if (!drm_dev_enter(drm, &idx))
 583                return;
 584
 585        if (!old_state->fb)
 586                gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 1);
 587
 588        if (fb && (crtc->state->mode_changed || crtc->state->connectors_changed))
 589                gud_usb_set(gdrm, GUD_REQ_SET_STATE_COMMIT, 0, NULL, 0);
 590
 591        if (crtc->state->active_changed)
 592                gud_usb_set_u8(gdrm, GUD_REQ_SET_DISPLAY_ENABLE, crtc->state->active);
 593
 594        if (drm_atomic_helper_damage_merged(old_state, state, &damage)) {
 595                if (gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE)
 596                        drm_rect_init(&damage, 0, 0, fb->width, fb->height);
 597                gud_fb_queue_damage(gdrm, fb, &damage);
 598                if (!gud_async_flush)
 599                        flush_work(&gdrm->work);
 600        }
 601
 602        if (!crtc->state->enable)
 603                gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 0);
 604
 605        drm_dev_exit(idx);
 606}
 607