linux/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
   3 * Copyright (C) 2013 Red Hat
   4 * Author: Rob Clark <robdclark@gmail.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of the GNU General Public License version 2 as published by
   8 * the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful, but WITHOUT
  11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13 * more details.
  14 *
  15 * You should have received a copy of the GNU General Public License along with
  16 * this program.  If not, see <http://www.gnu.org/licenses/>.
  17 */
  18
  19
  20#include "mdp5_kms.h"
  21#include "mdp5_smp.h"
  22
  23
  24struct mdp5_smp {
  25        struct drm_device *dev;
  26
  27        uint8_t reserved[MAX_CLIENTS]; /* fixed MMBs allocation per client */
  28
  29        int blk_cnt;
  30        int blk_size;
  31
  32        /* register cache */
  33        u32 alloc_w[22];
  34        u32 alloc_r[22];
  35        u32 pipe_reqprio_fifo_wm0[SSPP_MAX];
  36        u32 pipe_reqprio_fifo_wm1[SSPP_MAX];
  37        u32 pipe_reqprio_fifo_wm2[SSPP_MAX];
  38};
  39
  40static inline
  41struct mdp5_kms *get_kms(struct mdp5_smp *smp)
  42{
  43        struct msm_drm_private *priv = smp->dev->dev_private;
  44
  45        return to_mdp5_kms(to_mdp_kms(priv->kms));
  46}
  47
  48static inline u32 pipe2client(enum mdp5_pipe pipe, int plane)
  49{
  50#define CID_UNUSED      0
  51
  52        if (WARN_ON(plane >= pipe2nclients(pipe)))
  53                return CID_UNUSED;
  54
  55        /*
  56         * Note on SMP clients:
  57         * For ViG pipes, fetch Y/Cr/Cb-components clients are always
  58         * consecutive, and in that order.
  59         *
  60         * e.g.:
  61         * if mdp5_cfg->smp.clients[SSPP_VIG0] = N,
  62         *      Y  plane's client ID is N
  63         *      Cr plane's client ID is N + 1
  64         *      Cb plane's client ID is N + 2
  65         */
  66
  67        return mdp5_cfg->smp.clients[pipe] + plane;
  68}
  69
  70/* allocate blocks for the specified request: */
  71static int smp_request_block(struct mdp5_smp *smp,
  72                struct mdp5_smp_state *state,
  73                u32 cid, int nblks)
  74{
  75        void *cs = state->client_state[cid];
  76        int i, avail, cnt = smp->blk_cnt;
  77        uint8_t reserved;
  78
  79        /* we shouldn't be requesting blocks for an in-use client: */
  80        WARN_ON(bitmap_weight(cs, cnt) > 0);
  81
  82        reserved = smp->reserved[cid];
  83
  84        if (reserved) {
  85                nblks = max(0, nblks - reserved);
  86                DBG("%d MMBs allocated (%d reserved)", nblks, reserved);
  87        }
  88
  89        avail = cnt - bitmap_weight(state->state, cnt);
  90        if (nblks > avail) {
  91                dev_err(smp->dev->dev, "out of blks (req=%d > avail=%d)\n",
  92                                nblks, avail);
  93                return -ENOSPC;
  94        }
  95
  96        for (i = 0; i < nblks; i++) {
  97                int blk = find_first_zero_bit(state->state, cnt);
  98                set_bit(blk, cs);
  99                set_bit(blk, state->state);
 100        }
 101
 102        return 0;
 103}
 104
 105static void set_fifo_thresholds(struct mdp5_smp *smp,
 106                enum mdp5_pipe pipe, int nblks)
 107{
 108        u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE);
 109        u32 val;
 110
 111        /* 1/4 of SMP pool that is being fetched */
 112        val = (nblks * smp_entries_per_blk) / 4;
 113
 114        smp->pipe_reqprio_fifo_wm0[pipe] = val * 1;
 115        smp->pipe_reqprio_fifo_wm1[pipe] = val * 2;
 116        smp->pipe_reqprio_fifo_wm2[pipe] = val * 3;
 117}
 118
 119/*
 120 * NOTE: looks like if horizontal decimation is used (if we supported that)
 121 * then the width used to calculate SMP block requirements is the post-
 122 * decimated width.  Ie. SMP buffering sits downstream of decimation (which
 123 * presumably happens during the dma from scanout buffer).
 124 */
 125uint32_t mdp5_smp_calculate(struct mdp5_smp *smp,
 126                const struct mdp_format *format,
 127                u32 width, bool hdecim)
 128{
 129        struct mdp5_kms *mdp5_kms = get_kms(smp);
 130        int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
 131        int i, hsub, nplanes, nlines;
 132        u32 fmt = format->base.pixel_format;
 133        uint32_t blkcfg = 0;
 134
 135        nplanes = drm_format_num_planes(fmt);
 136        hsub = drm_format_horz_chroma_subsampling(fmt);
 137
 138        /* different if BWC (compressed framebuffer?) enabled: */
 139        nlines = 2;
 140
 141        /* Newer MDPs have split/packing logic, which fetches sub-sampled
 142         * U and V components (splits them from Y if necessary) and packs
 143         * them together, writes to SMP using a single client.
 144         */
 145        if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) {
 146                fmt = DRM_FORMAT_NV24;
 147                nplanes = 2;
 148
 149                /* if decimation is enabled, HW decimates less on the
 150                 * sub sampled chroma components
 151                 */
 152                if (hdecim && (hsub > 1))
 153                        hsub = 1;
 154        }
 155
 156        for (i = 0; i < nplanes; i++) {
 157                int n, fetch_stride, cpp;
 158
 159                cpp = drm_format_plane_cpp(fmt, i);
 160                fetch_stride = width * cpp / (i ? hsub : 1);
 161
 162                n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size);
 163
 164                /* for hw rev v1.00 */
 165                if (rev == 0)
 166                        n = roundup_pow_of_two(n);
 167
 168                blkcfg |= (n << (8 * i));
 169        }
 170
 171        return blkcfg;
 172}
 173
 174int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state,
 175                enum mdp5_pipe pipe, uint32_t blkcfg)
 176{
 177        struct mdp5_kms *mdp5_kms = get_kms(smp);
 178        struct drm_device *dev = mdp5_kms->dev;
 179        int i, ret;
 180
 181        for (i = 0; i < pipe2nclients(pipe); i++) {
 182                u32 cid = pipe2client(pipe, i);
 183                int n = blkcfg & 0xff;
 184
 185                if (!n)
 186                        continue;
 187
 188                DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
 189                ret = smp_request_block(smp, state, cid, n);
 190                if (ret) {
 191                        dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
 192                                        n, ret);
 193                        return ret;
 194                }
 195
 196                blkcfg >>= 8;
 197        }
 198
 199        state->assigned |= (1 << pipe);
 200
 201        return 0;
 202}
 203
 204/* Release SMP blocks for all clients of the pipe */
 205void mdp5_smp_release(struct mdp5_smp *smp, struct mdp5_smp_state *state,
 206                enum mdp5_pipe pipe)
 207{
 208        int i;
 209        int cnt = smp->blk_cnt;
 210
 211        for (i = 0; i < pipe2nclients(pipe); i++) {
 212                u32 cid = pipe2client(pipe, i);
 213                void *cs = state->client_state[cid];
 214
 215                /* update global state: */
 216                bitmap_andnot(state->state, state->state, cs, cnt);
 217
 218                /* clear client's state */
 219                bitmap_zero(cs, cnt);
 220        }
 221
 222        state->released |= (1 << pipe);
 223}
 224
 225/* NOTE: SMP_ALLOC_* regs are *not* double buffered, so release has to
 226 * happen after scanout completes.
 227 */
 228static unsigned update_smp_state(struct mdp5_smp *smp,
 229                u32 cid, mdp5_smp_state_t *assigned)
 230{
 231        int cnt = smp->blk_cnt;
 232        unsigned nblks = 0;
 233        u32 blk, val;
 234
 235        for_each_set_bit(blk, *assigned, cnt) {
 236                int idx = blk / 3;
 237                int fld = blk % 3;
 238
 239                val = smp->alloc_w[idx];
 240
 241                switch (fld) {
 242                case 0:
 243                        val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
 244                        val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid);
 245                        break;
 246                case 1:
 247                        val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
 248                        val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid);
 249                        break;
 250                case 2:
 251                        val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
 252                        val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid);
 253                        break;
 254                }
 255
 256                smp->alloc_w[idx] = val;
 257                smp->alloc_r[idx] = val;
 258
 259                nblks++;
 260        }
 261
 262        return nblks;
 263}
 264
 265static void write_smp_alloc_regs(struct mdp5_smp *smp)
 266{
 267        struct mdp5_kms *mdp5_kms = get_kms(smp);
 268        int i, num_regs;
 269
 270        num_regs = smp->blk_cnt / 3 + 1;
 271
 272        for (i = 0; i < num_regs; i++) {
 273                mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(i),
 274                           smp->alloc_w[i]);
 275                mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(i),
 276                           smp->alloc_r[i]);
 277        }
 278}
 279
 280static void write_smp_fifo_regs(struct mdp5_smp *smp)
 281{
 282        struct mdp5_kms *mdp5_kms = get_kms(smp);
 283        int i;
 284
 285        for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
 286                struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
 287                enum mdp5_pipe pipe = hwpipe->pipe;
 288
 289                mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe),
 290                           smp->pipe_reqprio_fifo_wm0[pipe]);
 291                mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe),
 292                           smp->pipe_reqprio_fifo_wm1[pipe]);
 293                mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe),
 294                           smp->pipe_reqprio_fifo_wm2[pipe]);
 295        }
 296}
 297
 298void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state)
 299{
 300        enum mdp5_pipe pipe;
 301
 302        for_each_set_bit(pipe, &state->assigned, sizeof(state->assigned) * 8) {
 303                unsigned i, nblks = 0;
 304
 305                for (i = 0; i < pipe2nclients(pipe); i++) {
 306                        u32 cid = pipe2client(pipe, i);
 307                        void *cs = state->client_state[cid];
 308
 309                        nblks += update_smp_state(smp, cid, cs);
 310
 311                        DBG("assign %s:%u, %u blks",
 312                                pipe2name(pipe), i, nblks);
 313                }
 314
 315                set_fifo_thresholds(smp, pipe, nblks);
 316        }
 317
 318        write_smp_alloc_regs(smp);
 319        write_smp_fifo_regs(smp);
 320
 321        state->assigned = 0;
 322}
 323
 324void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state)
 325{
 326        enum mdp5_pipe pipe;
 327
 328        for_each_set_bit(pipe, &state->released, sizeof(state->released) * 8) {
 329                DBG("release %s", pipe2name(pipe));
 330                set_fifo_thresholds(smp, pipe, 0);
 331        }
 332
 333        write_smp_fifo_regs(smp);
 334
 335        state->released = 0;
 336}
 337
 338void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p)
 339{
 340        struct mdp5_kms *mdp5_kms = get_kms(smp);
 341        struct mdp5_hw_pipe_state *hwpstate;
 342        struct mdp5_smp_state *state;
 343        struct mdp5_global_state *global_state;
 344        int total = 0, i, j;
 345
 346        drm_printf(p, "name\tinuse\tplane\n");
 347        drm_printf(p, "----\t-----\t-----\n");
 348
 349        if (drm_can_sleep())
 350                drm_modeset_lock(&mdp5_kms->glob_state_lock, NULL);
 351
 352        global_state = mdp5_get_existing_global_state(mdp5_kms);
 353
 354        /* grab these *after* we hold the state_lock */
 355        hwpstate = &global_state->hwpipe;
 356        state = &global_state->smp;
 357
 358        for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
 359                struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
 360                struct drm_plane *plane = hwpstate->hwpipe_to_plane[hwpipe->idx];
 361                enum mdp5_pipe pipe = hwpipe->pipe;
 362                for (j = 0; j < pipe2nclients(pipe); j++) {
 363                        u32 cid = pipe2client(pipe, j);
 364                        void *cs = state->client_state[cid];
 365                        int inuse = bitmap_weight(cs, smp->blk_cnt);
 366
 367                        drm_printf(p, "%s:%d\t%d\t%s\n",
 368                                pipe2name(pipe), j, inuse,
 369                                plane ? plane->name : NULL);
 370
 371                        total += inuse;
 372                }
 373        }
 374
 375        drm_printf(p, "TOTAL:\t%d\t(of %d)\n", total, smp->blk_cnt);
 376        drm_printf(p, "AVAIL:\t%d\n", smp->blk_cnt -
 377                        bitmap_weight(state->state, smp->blk_cnt));
 378
 379        if (drm_can_sleep())
 380                drm_modeset_unlock(&mdp5_kms->glob_state_lock);
 381}
 382
 383void mdp5_smp_destroy(struct mdp5_smp *smp)
 384{
 385        kfree(smp);
 386}
 387
 388struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_block *cfg)
 389{
 390        struct mdp5_smp_state *state;
 391        struct mdp5_global_state *global_state;
 392        struct mdp5_smp *smp = NULL;
 393        int ret;
 394
 395        smp = kzalloc(sizeof(*smp), GFP_KERNEL);
 396        if (unlikely(!smp)) {
 397                ret = -ENOMEM;
 398                goto fail;
 399        }
 400
 401        smp->dev = mdp5_kms->dev;
 402        smp->blk_cnt = cfg->mmb_count;
 403        smp->blk_size = cfg->mmb_size;
 404
 405        global_state = mdp5_get_existing_global_state(mdp5_kms);
 406        state = &global_state->smp;
 407
 408        /* statically tied MMBs cannot be re-allocated: */
 409        bitmap_copy(state->state, cfg->reserved_state, smp->blk_cnt);
 410        memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved));
 411
 412        return smp;
 413fail:
 414        if (smp)
 415                mdp5_smp_destroy(smp);
 416
 417        return ERR_PTR(ret);
 418}
 419