linux/drivers/gpu/drm/gud/gud_pipe.c
<<
>>
Prefs
   1// SPDX-License-Identifier: MIT
   2/*
   3 * Copyright 2020 Noralf Trønnes
   4 */
   5
   6#include <linux/lz4.h>
   7#include <linux/usb.h>
   8#include <linux/workqueue.h>
   9
  10#include <drm/drm_atomic.h>
  11#include <drm/drm_connector.h>
  12#include <drm/drm_damage_helper.h>
  13#include <drm/drm_drv.h>
  14#include <drm/drm_format_helper.h>
  15#include <drm/drm_fourcc.h>
  16#include <drm/drm_framebuffer.h>
  17#include <drm/drm_gem.h>
  18#include <drm/drm_gem_framebuffer_helper.h>
  19#include <drm/drm_print.h>
  20#include <drm/drm_rect.h>
  21#include <drm/drm_simple_kms_helper.h>
  22#include <drm/gud.h>
  23
  24#include "gud_internal.h"
  25
  26/*
  27 * Some userspace rendering loops runs all displays in the same loop.
  28 * This means that a fast display will have to wait for a slow one.
  29 * For this reason gud does flushing asynchronous by default.
  30 * The down side is that in e.g. a single display setup userspace thinks
  31 * the display is insanely fast since the driver reports back immediately
  32 * that the flush/pageflip is done. This wastes CPU and power.
  33 * Such users might want to set this module parameter to false.
  34 */
  35static bool gud_async_flush = true;
  36module_param_named(async_flush, gud_async_flush, bool, 0644);
  37MODULE_PARM_DESC(async_flush, "Enable asynchronous flushing [default=true]");
  38
  39/*
  40 * FIXME: The driver is probably broken on Big Endian machines.
  41 * See discussion:
  42 * https://lore.kernel.org/dri-devel/CAKb7UvihLX0hgBOP3VBG7O+atwZcUVCPVuBdfmDMpg0NjXe-cQ@mail.gmail.com/
  43 */
  44
  45static bool gud_is_big_endian(void)
  46{
  47#if defined(__BIG_ENDIAN)
  48        return true;
  49#else
  50        return false;
  51#endif
  52}
  53
  54static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format,
  55                                   void *src, struct drm_framebuffer *fb,
  56                                   struct drm_rect *rect)
  57{
  58        unsigned int block_width = drm_format_info_block_width(format, 0);
  59        unsigned int bits_per_pixel = 8 / block_width;
  60        unsigned int x, y, width, height;
  61        u8 pix, *pix8, *block = dst; /* Assign to silence compiler warning */
  62        size_t len;
  63        void *buf;
  64
  65        WARN_ON_ONCE(format->char_per_block[0] != 1);
  66
  67        /* Start on a byte boundary */
  68        rect->x1 = ALIGN_DOWN(rect->x1, block_width);
  69        width = drm_rect_width(rect);
  70        height = drm_rect_height(rect);
  71        len = drm_format_info_min_pitch(format, 0, width) * height;
  72
  73        buf = kmalloc(width * height, GFP_KERNEL);
  74        if (!buf)
  75                return 0;
  76
  77        drm_fb_xrgb8888_to_gray8(buf, src, fb, rect);
  78        pix8 = buf;
  79
  80        for (y = 0; y < height; y++) {
  81                for (x = 0; x < width; x++) {
  82                        unsigned int pixpos = x % block_width; /* within byte from the left */
  83                        unsigned int pixshift = (block_width - pixpos - 1) * bits_per_pixel;
  84
  85                        if (!pixpos) {
  86                                block = dst++;
  87                                *block = 0;
  88                        }
  89
  90                        pix = (*pix8++) >> (8 - bits_per_pixel);
  91                        *block |= pix << pixshift;
  92                }
  93        }
  94
  95        kfree(buf);
  96
  97        return len;
  98}
  99
 100static size_t gud_xrgb8888_to_color(u8 *dst, const struct drm_format_info *format,
 101                                    void *src, struct drm_framebuffer *fb,
 102                                    struct drm_rect *rect)
 103{
 104        unsigned int block_width = drm_format_info_block_width(format, 0);
 105        unsigned int bits_per_pixel = 8 / block_width;
 106        u8 r, g, b, pix, *block = dst; /* Assign to silence compiler warning */
 107        unsigned int x, y, width;
 108        u32 *pix32;
 109        size_t len;
 110
 111        /* Start on a byte boundary */
 112        rect->x1 = ALIGN_DOWN(rect->x1, block_width);
 113        width = drm_rect_width(rect);
 114        len = drm_format_info_min_pitch(format, 0, width) * drm_rect_height(rect);
 115
 116        for (y = rect->y1; y < rect->y2; y++) {
 117                pix32 = src + (y * fb->pitches[0]);
 118                pix32 += rect->x1;
 119
 120                for (x = 0; x < width; x++) {
 121                        unsigned int pixpos = x % block_width; /* within byte from the left */
 122                        unsigned int pixshift = (block_width - pixpos - 1) * bits_per_pixel;
 123
 124                        if (!pixpos) {
 125                                block = dst++;
 126                                *block = 0;
 127                        }
 128
 129                        r = *pix32 >> 16;
 130                        g = *pix32 >> 8;
 131                        b = *pix32++;
 132
 133                        switch (format->format) {
 134                        case GUD_DRM_FORMAT_XRGB1111:
 135                                pix = ((r >> 7) << 2) | ((g >> 7) << 1) | (b >> 7);
 136                                break;
 137                        default:
 138                                WARN_ON_ONCE(1);
 139                                return len;
 140                        }
 141
 142                        *block |= pix << pixshift;
 143                }
 144        }
 145
 146        return len;
 147}
 148
 149static int gud_prep_flush(struct gud_device *gdrm, struct drm_framebuffer *fb,
 150                          const struct drm_format_info *format, struct drm_rect *rect,
 151                          struct gud_set_buffer_req *req)
 152{
 153        struct dma_buf_attachment *import_attach = fb->obj[0]->import_attach;
 154        u8 compression = gdrm->compression;
 155        struct dma_buf_map map[DRM_FORMAT_MAX_PLANES];
 156        struct dma_buf_map map_data[DRM_FORMAT_MAX_PLANES];
 157        void *vaddr, *buf;
 158        size_t pitch, len;
 159        int ret = 0;
 160
 161        pitch = drm_format_info_min_pitch(format, 0, drm_rect_width(rect));
 162        len = pitch * drm_rect_height(rect);
 163        if (len > gdrm->bulk_len)
 164                return -E2BIG;
 165
 166        ret = drm_gem_fb_vmap(fb, map, map_data);
 167        if (ret)
 168                return ret;
 169
 170        vaddr = map_data[0].vaddr;
 171
 172        ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
 173        if (ret)
 174                goto vunmap;
 175retry:
 176        if (compression)
 177                buf = gdrm->compress_buf;
 178        else
 179                buf = gdrm->bulk_buf;
 180
 181        /*
 182         * Imported buffers are assumed to be write-combined and thus uncached
 183         * with slow reads (at least on ARM).
 184         */
 185        if (format != fb->format) {
 186                if (format->format == GUD_DRM_FORMAT_R1) {
 187                        len = gud_xrgb8888_to_r124(buf, format, vaddr, fb, rect);
 188                        if (!len) {
 189                                ret = -ENOMEM;
 190                                goto end_cpu_access;
 191                        }
 192                } else if (format->format == DRM_FORMAT_RGB565) {
 193                        drm_fb_xrgb8888_to_rgb565(buf, vaddr, fb, rect, gud_is_big_endian());
 194                } else {
 195                        len = gud_xrgb8888_to_color(buf, format, vaddr, fb, rect);
 196                }
 197        } else if (gud_is_big_endian() && format->cpp[0] > 1) {
 198                drm_fb_swab(buf, vaddr, fb, rect, !import_attach);
 199        } else if (compression && !import_attach && pitch == fb->pitches[0]) {
 200                /* can compress directly from the framebuffer */
 201                buf = vaddr + rect->y1 * pitch;
 202        } else {
 203                drm_fb_memcpy(buf, vaddr, fb, rect);
 204        }
 205
 206        memset(req, 0, sizeof(*req));
 207        req->x = cpu_to_le32(rect->x1);
 208        req->y = cpu_to_le32(rect->y1);
 209        req->width = cpu_to_le32(drm_rect_width(rect));
 210        req->height = cpu_to_le32(drm_rect_height(rect));
 211        req->length = cpu_to_le32(len);
 212
 213        if (compression & GUD_COMPRESSION_LZ4) {
 214                int complen;
 215
 216                complen = LZ4_compress_default(buf, gdrm->bulk_buf, len, len, gdrm->lz4_comp_mem);
 217                if (complen <= 0) {
 218                        compression = 0;
 219                        goto retry;
 220                }
 221
 222                req->compression = GUD_COMPRESSION_LZ4;
 223                req->compressed_length = cpu_to_le32(complen);
 224        }
 225
 226end_cpu_access:
 227        drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
 228vunmap:
 229        drm_gem_fb_vunmap(fb, map);
 230
 231        return ret;
 232}
 233
 234struct gud_usb_bulk_context {
 235        struct timer_list timer;
 236        struct usb_sg_request sgr;
 237};
 238
 239static void gud_usb_bulk_timeout(struct timer_list *t)
 240{
 241        struct gud_usb_bulk_context *ctx = from_timer(ctx, t, timer);
 242
 243        usb_sg_cancel(&ctx->sgr);
 244}
 245
 246static int gud_usb_bulk(struct gud_device *gdrm, size_t len)
 247{
 248        struct gud_usb_bulk_context ctx;
 249        int ret;
 250
 251        ret = usb_sg_init(&ctx.sgr, gud_to_usb_device(gdrm), gdrm->bulk_pipe, 0,
 252                          gdrm->bulk_sgt.sgl, gdrm->bulk_sgt.nents, len, GFP_KERNEL);
 253        if (ret)
 254                return ret;
 255
 256        timer_setup_on_stack(&ctx.timer, gud_usb_bulk_timeout, 0);
 257        mod_timer(&ctx.timer, jiffies + msecs_to_jiffies(3000));
 258
 259        usb_sg_wait(&ctx.sgr);
 260
 261        if (!del_timer_sync(&ctx.timer))
 262                ret = -ETIMEDOUT;
 263        else if (ctx.sgr.status < 0)
 264                ret = ctx.sgr.status;
 265        else if (ctx.sgr.bytes != len)
 266                ret = -EIO;
 267
 268        destroy_timer_on_stack(&ctx.timer);
 269
 270        return ret;
 271}
 272
 273static int gud_flush_rect(struct gud_device *gdrm, struct drm_framebuffer *fb,
 274                          const struct drm_format_info *format, struct drm_rect *rect)
 275{
 276        struct gud_set_buffer_req req;
 277        size_t len, trlen;
 278        int ret;
 279
 280        drm_dbg(&gdrm->drm, "Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
 281
 282        ret = gud_prep_flush(gdrm, fb, format, rect, &req);
 283        if (ret)
 284                return ret;
 285
 286        len = le32_to_cpu(req.length);
 287
 288        if (req.compression)
 289                trlen = le32_to_cpu(req.compressed_length);
 290        else
 291                trlen = len;
 292
 293        gdrm->stats_length += len;
 294        /* Did it wrap around? */
 295        if (gdrm->stats_length <= len && gdrm->stats_actual_length) {
 296                gdrm->stats_length = len;
 297                gdrm->stats_actual_length = 0;
 298        }
 299        gdrm->stats_actual_length += trlen;
 300
 301        if (!(gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE) || gdrm->prev_flush_failed) {
 302                ret = gud_usb_set(gdrm, GUD_REQ_SET_BUFFER, 0, &req, sizeof(req));
 303                if (ret)
 304                        return ret;
 305        }
 306
 307        ret = gud_usb_bulk(gdrm, trlen);
 308        if (ret)
 309                gdrm->stats_num_errors++;
 310
 311        return ret;
 312}
 313
 314void gud_clear_damage(struct gud_device *gdrm)
 315{
 316        gdrm->damage.x1 = INT_MAX;
 317        gdrm->damage.y1 = INT_MAX;
 318        gdrm->damage.x2 = 0;
 319        gdrm->damage.y2 = 0;
 320}
 321
 322static void gud_add_damage(struct gud_device *gdrm, struct drm_rect *damage)
 323{
 324        gdrm->damage.x1 = min(gdrm->damage.x1, damage->x1);
 325        gdrm->damage.y1 = min(gdrm->damage.y1, damage->y1);
 326        gdrm->damage.x2 = max(gdrm->damage.x2, damage->x2);
 327        gdrm->damage.y2 = max(gdrm->damage.y2, damage->y2);
 328}
 329
 330static void gud_retry_failed_flush(struct gud_device *gdrm, struct drm_framebuffer *fb,
 331                                   struct drm_rect *damage)
 332{
 333        /*
 334         * pipe_update waits for the worker when the display mode is going to change.
 335         * This ensures that the width and height is still the same making it safe to
 336         * add back the damage.
 337         */
 338
 339        mutex_lock(&gdrm->damage_lock);
 340        if (!gdrm->fb) {
 341                drm_framebuffer_get(fb);
 342                gdrm->fb = fb;
 343        }
 344        gud_add_damage(gdrm, damage);
 345        mutex_unlock(&gdrm->damage_lock);
 346
 347        /* Retry only once to avoid a possible storm in case of continues errors. */
 348        if (!gdrm->prev_flush_failed)
 349                queue_work(system_long_wq, &gdrm->work);
 350        gdrm->prev_flush_failed = true;
 351}
 352
 353void gud_flush_work(struct work_struct *work)
 354{
 355        struct gud_device *gdrm = container_of(work, struct gud_device, work);
 356        const struct drm_format_info *format;
 357        struct drm_framebuffer *fb;
 358        struct drm_rect damage;
 359        unsigned int i, lines;
 360        int idx, ret = 0;
 361        size_t pitch;
 362
 363        if (!drm_dev_enter(&gdrm->drm, &idx))
 364                return;
 365
 366        mutex_lock(&gdrm->damage_lock);
 367        fb = gdrm->fb;
 368        gdrm->fb = NULL;
 369        damage = gdrm->damage;
 370        gud_clear_damage(gdrm);
 371        mutex_unlock(&gdrm->damage_lock);
 372
 373        if (!fb)
 374                goto out;
 375
 376        format = fb->format;
 377        if (format->format == DRM_FORMAT_XRGB8888 && gdrm->xrgb8888_emulation_format)
 378                format = gdrm->xrgb8888_emulation_format;
 379
 380        /* Split update if it's too big */
 381        pitch = drm_format_info_min_pitch(format, 0, drm_rect_width(&damage));
 382        lines = drm_rect_height(&damage);
 383
 384        if (gdrm->bulk_len < lines * pitch)
 385                lines = gdrm->bulk_len / pitch;
 386
 387        for (i = 0; i < DIV_ROUND_UP(drm_rect_height(&damage), lines); i++) {
 388                struct drm_rect rect = damage;
 389
 390                rect.y1 += i * lines;
 391                rect.y2 = min_t(u32, rect.y1 + lines, damage.y2);
 392
 393                ret = gud_flush_rect(gdrm, fb, format, &rect);
 394                if (ret) {
 395                        if (ret != -ENODEV && ret != -ECONNRESET &&
 396                            ret != -ESHUTDOWN && ret != -EPROTO) {
 397                                bool prev_flush_failed = gdrm->prev_flush_failed;
 398
 399                                gud_retry_failed_flush(gdrm, fb, &damage);
 400                                if (!prev_flush_failed)
 401                                        dev_err_ratelimited(fb->dev->dev,
 402                                                            "Failed to flush framebuffer: error=%d\n", ret);
 403                        }
 404                        break;
 405                }
 406
 407                gdrm->prev_flush_failed = false;
 408        }
 409
 410        drm_framebuffer_put(fb);
 411out:
 412        drm_dev_exit(idx);
 413}
 414
 415static void gud_fb_queue_damage(struct gud_device *gdrm, struct drm_framebuffer *fb,
 416                                struct drm_rect *damage)
 417{
 418        struct drm_framebuffer *old_fb = NULL;
 419
 420        mutex_lock(&gdrm->damage_lock);
 421
 422        if (fb != gdrm->fb) {
 423                old_fb = gdrm->fb;
 424                drm_framebuffer_get(fb);
 425                gdrm->fb = fb;
 426        }
 427
 428        gud_add_damage(gdrm, damage);
 429
 430        mutex_unlock(&gdrm->damage_lock);
 431
 432        queue_work(system_long_wq, &gdrm->work);
 433
 434        if (old_fb)
 435                drm_framebuffer_put(old_fb);
 436}
 437
 438int gud_pipe_check(struct drm_simple_display_pipe *pipe,
 439                   struct drm_plane_state *new_plane_state,
 440                   struct drm_crtc_state *new_crtc_state)
 441{
 442        struct gud_device *gdrm = to_gud_device(pipe->crtc.dev);
 443        struct drm_plane_state *old_plane_state = pipe->plane.state;
 444        const struct drm_display_mode *mode = &new_crtc_state->mode;
 445        struct drm_atomic_state *state = new_plane_state->state;
 446        struct drm_framebuffer *old_fb = old_plane_state->fb;
 447        struct drm_connector_state *connector_state = NULL;
 448        struct drm_framebuffer *fb = new_plane_state->fb;
 449        const struct drm_format_info *format = fb->format;
 450        struct drm_connector *connector;
 451        unsigned int i, num_properties;
 452        struct gud_state_req *req;
 453        int idx, ret;
 454        size_t len;
 455
 456        if (WARN_ON_ONCE(!fb))
 457                return -EINVAL;
 458
 459        if (old_plane_state->rotation != new_plane_state->rotation)
 460                new_crtc_state->mode_changed = true;
 461
 462        if (old_fb && old_fb->format != format)
 463                new_crtc_state->mode_changed = true;
 464
 465        if (!new_crtc_state->mode_changed && !new_crtc_state->connectors_changed)
 466                return 0;
 467
 468        /* Only one connector is supported */
 469        if (hweight32(new_crtc_state->connector_mask) != 1)
 470                return -EINVAL;
 471
 472        if (format->format == DRM_FORMAT_XRGB8888 && gdrm->xrgb8888_emulation_format)
 473                format = gdrm->xrgb8888_emulation_format;
 474
 475        for_each_new_connector_in_state(state, connector, connector_state, i) {
 476                if (connector_state->crtc)
 477                        break;
 478        }
 479
 480        /*
 481         * DRM_IOCTL_MODE_OBJ_SETPROPERTY on the rotation property will not have
 482         * the connector included in the state.
 483         */
 484        if (!connector_state) {
 485                struct drm_connector_list_iter conn_iter;
 486
 487                drm_connector_list_iter_begin(pipe->crtc.dev, &conn_iter);
 488                drm_for_each_connector_iter(connector, &conn_iter) {
 489                        if (connector->state->crtc) {
 490                                connector_state = connector->state;
 491                                break;
 492                        }
 493                }
 494                drm_connector_list_iter_end(&conn_iter);
 495        }
 496
 497        if (WARN_ON_ONCE(!connector_state))
 498                return -ENOENT;
 499
 500        len = struct_size(req, properties,
 501                          GUD_PROPERTIES_MAX_NUM + GUD_CONNECTOR_PROPERTIES_MAX_NUM);
 502        req = kzalloc(len, GFP_KERNEL);
 503        if (!req)
 504                return -ENOMEM;
 505
 506        gud_from_display_mode(&req->mode, mode);
 507
 508        req->format = gud_from_fourcc(format->format);
 509        if (WARN_ON_ONCE(!req->format)) {
 510                ret = -EINVAL;
 511                goto out;
 512        }
 513
 514        req->connector = drm_connector_index(connector_state->connector);
 515
 516        ret = gud_connector_fill_properties(connector_state, req->properties);
 517        if (ret < 0)
 518                goto out;
 519
 520        num_properties = ret;
 521        for (i = 0; i < gdrm->num_properties; i++) {
 522                u16 prop = gdrm->properties[i];
 523                u64 val;
 524
 525                switch (prop) {
 526                case GUD_PROPERTY_ROTATION:
 527                        /* DRM UAPI matches the protocol so use value directly */
 528                        val = new_plane_state->rotation;
 529                        break;
 530                default:
 531                        WARN_ON_ONCE(1);
 532                        ret = -EINVAL;
 533                        goto out;
 534                }
 535
 536                req->properties[num_properties + i].prop = cpu_to_le16(prop);
 537                req->properties[num_properties + i].val = cpu_to_le64(val);
 538                num_properties++;
 539        }
 540
 541        if (drm_dev_enter(fb->dev, &idx)) {
 542                len = struct_size(req, properties, num_properties);
 543                ret = gud_usb_set(gdrm, GUD_REQ_SET_STATE_CHECK, 0, req, len);
 544                drm_dev_exit(idx);
 545        }  else {
 546                ret = -ENODEV;
 547        }
 548out:
 549        kfree(req);
 550
 551        return ret;
 552}
 553
 554void gud_pipe_update(struct drm_simple_display_pipe *pipe,
 555                     struct drm_plane_state *old_state)
 556{
 557        struct drm_device *drm = pipe->crtc.dev;
 558        struct gud_device *gdrm = to_gud_device(drm);
 559        struct drm_plane_state *state = pipe->plane.state;
 560        struct drm_framebuffer *fb = state->fb;
 561        struct drm_crtc *crtc = &pipe->crtc;
 562        struct drm_rect damage;
 563        int idx;
 564
 565        if (crtc->state->mode_changed || !crtc->state->enable) {
 566                cancel_work_sync(&gdrm->work);
 567                mutex_lock(&gdrm->damage_lock);
 568                if (gdrm->fb) {
 569                        drm_framebuffer_put(gdrm->fb);
 570                        gdrm->fb = NULL;
 571                }
 572                gud_clear_damage(gdrm);
 573                mutex_unlock(&gdrm->damage_lock);
 574        }
 575
 576        if (!drm_dev_enter(drm, &idx))
 577                return;
 578
 579        if (!old_state->fb)
 580                gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 1);
 581
 582        if (fb && (crtc->state->mode_changed || crtc->state->connectors_changed))
 583                gud_usb_set(gdrm, GUD_REQ_SET_STATE_COMMIT, 0, NULL, 0);
 584
 585        if (crtc->state->active_changed)
 586                gud_usb_set_u8(gdrm, GUD_REQ_SET_DISPLAY_ENABLE, crtc->state->active);
 587
 588        if (drm_atomic_helper_damage_merged(old_state, state, &damage)) {
 589                if (gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE)
 590                        drm_rect_init(&damage, 0, 0, fb->width, fb->height);
 591                gud_fb_queue_damage(gdrm, fb, &damage);
 592                if (!gud_async_flush)
 593                        flush_work(&gdrm->work);
 594        }
 595
 596        if (!crtc->state->enable)
 597                gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 0);
 598
 599        drm_dev_exit(idx);
 600}
 601