linux/drivers/gpu/drm/sti/sti_gdp.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) STMicroelectronics SA 2014
   3 * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
   4 *          Fabien Dessenne <fabien.dessenne@st.com>
   5 *          for STMicroelectronics.
   6 * License terms:  GNU General Public License (GPL), version 2
   7 */
   8
   9#include <linux/clk.h>
  10#include <linux/dma-mapping.h>
  11
  12#include "sti_compositor.h"
  13#include "sti_gdp.h"
  14#include "sti_layer.h"
  15#include "sti_vtg.h"
  16
  17#define ALPHASWITCH     BIT(6)
  18#define ENA_COLOR_FILL  BIT(8)
  19#define BIGNOTLITTLE    BIT(23)
  20#define WAIT_NEXT_VSYNC BIT(31)
  21
  22/* GDP color formats */
  23#define GDP_RGB565      0x00
  24#define GDP_RGB888      0x01
  25#define GDP_RGB888_32   0x02
  26#define GDP_XBGR8888    (GDP_RGB888_32 | BIGNOTLITTLE | ALPHASWITCH)
  27#define GDP_ARGB8565    0x04
  28#define GDP_ARGB8888    0x05
  29#define GDP_ABGR8888    (GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH)
  30#define GDP_ARGB1555    0x06
  31#define GDP_ARGB4444    0x07
  32#define GDP_CLUT8       0x0B
  33#define GDP_YCBR888     0x10
  34#define GDP_YCBR422R    0x12
  35#define GDP_AYCBR8888   0x15
  36
  37#define GAM_GDP_CTL_OFFSET      0x00
  38#define GAM_GDP_AGC_OFFSET      0x04
  39#define GAM_GDP_VPO_OFFSET      0x0C
  40#define GAM_GDP_VPS_OFFSET      0x10
  41#define GAM_GDP_PML_OFFSET      0x14
  42#define GAM_GDP_PMP_OFFSET      0x18
  43#define GAM_GDP_SIZE_OFFSET     0x1C
  44#define GAM_GDP_NVN_OFFSET      0x24
  45#define GAM_GDP_KEY1_OFFSET     0x28
  46#define GAM_GDP_KEY2_OFFSET     0x2C
  47#define GAM_GDP_PPT_OFFSET      0x34
  48#define GAM_GDP_CML_OFFSET      0x3C
  49#define GAM_GDP_MST_OFFSET      0x68
  50
  51#define GAM_GDP_ALPHARANGE_255  BIT(5)
  52#define GAM_GDP_AGC_FULL_RANGE  0x00808080
  53#define GAM_GDP_PPT_IGNORE      (BIT(1) | BIT(0))
  54#define GAM_GDP_SIZE_MAX        0x7FF
  55
  56#define GDP_NODE_NB_BANK        2
  57#define GDP_NODE_PER_FIELD      2
  58
  59struct sti_gdp_node {
  60        u32 gam_gdp_ctl;
  61        u32 gam_gdp_agc;
  62        u32 reserved1;
  63        u32 gam_gdp_vpo;
  64        u32 gam_gdp_vps;
  65        u32 gam_gdp_pml;
  66        u32 gam_gdp_pmp;
  67        u32 gam_gdp_size;
  68        u32 reserved2;
  69        u32 gam_gdp_nvn;
  70        u32 gam_gdp_key1;
  71        u32 gam_gdp_key2;
  72        u32 reserved3;
  73        u32 gam_gdp_ppt;
  74        u32 reserved4;
  75        u32 gam_gdp_cml;
  76};
  77
  78struct sti_gdp_node_list {
  79        struct sti_gdp_node *top_field;
  80        dma_addr_t top_field_paddr;
  81        struct sti_gdp_node *btm_field;
  82        dma_addr_t btm_field_paddr;
  83};
  84
  85/**
  86 * STI GDP structure
  87 *
  88 * @layer:              layer structure
  89 * @clk_pix:            pixel clock for the current gdp
  90 * @clk_main_parent:    gdp parent clock if main path used
  91 * @clk_aux_parent:     gdp parent clock if aux path used
  92 * @vtg_field_nb:       callback for VTG FIELD (top or bottom) notification
  93 * @is_curr_top:        true if the current node processed is the top field
  94 * @node_list:          array of node list
  95 */
  96struct sti_gdp {
  97        struct sti_layer layer;
  98        struct clk *clk_pix;
  99        struct clk *clk_main_parent;
 100        struct clk *clk_aux_parent;
 101        struct notifier_block vtg_field_nb;
 102        bool is_curr_top;
 103        struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK];
 104};
 105
 106#define to_sti_gdp(x) container_of(x, struct sti_gdp, layer)
 107
 108static const uint32_t gdp_supported_formats[] = {
 109        DRM_FORMAT_XRGB8888,
 110        DRM_FORMAT_XBGR8888,
 111        DRM_FORMAT_ARGB8888,
 112        DRM_FORMAT_ABGR8888,
 113        DRM_FORMAT_ARGB4444,
 114        DRM_FORMAT_ARGB1555,
 115        DRM_FORMAT_RGB565,
 116        DRM_FORMAT_RGB888,
 117        DRM_FORMAT_AYUV,
 118        DRM_FORMAT_YUV444,
 119        DRM_FORMAT_VYUY,
 120        DRM_FORMAT_C8,
 121};
 122
 123static const uint32_t *sti_gdp_get_formats(struct sti_layer *layer)
 124{
 125        return gdp_supported_formats;
 126}
 127
 128static unsigned int sti_gdp_get_nb_formats(struct sti_layer *layer)
 129{
 130        return ARRAY_SIZE(gdp_supported_formats);
 131}
 132
 133static int sti_gdp_fourcc2format(int fourcc)
 134{
 135        switch (fourcc) {
 136        case DRM_FORMAT_XRGB8888:
 137                return GDP_RGB888_32;
 138        case DRM_FORMAT_XBGR8888:
 139                return GDP_XBGR8888;
 140        case DRM_FORMAT_ARGB8888:
 141                return GDP_ARGB8888;
 142        case DRM_FORMAT_ABGR8888:
 143                return GDP_ABGR8888;
 144        case DRM_FORMAT_ARGB4444:
 145                return GDP_ARGB4444;
 146        case DRM_FORMAT_ARGB1555:
 147                return GDP_ARGB1555;
 148        case DRM_FORMAT_RGB565:
 149                return GDP_RGB565;
 150        case DRM_FORMAT_RGB888:
 151                return GDP_RGB888;
 152        case DRM_FORMAT_AYUV:
 153                return GDP_AYCBR8888;
 154        case DRM_FORMAT_YUV444:
 155                return GDP_YCBR888;
 156        case DRM_FORMAT_VYUY:
 157                return GDP_YCBR422R;
 158        case DRM_FORMAT_C8:
 159                return GDP_CLUT8;
 160        }
 161        return -1;
 162}
 163
 164static int sti_gdp_get_alpharange(int format)
 165{
 166        switch (format) {
 167        case GDP_ARGB8565:
 168        case GDP_ARGB8888:
 169        case GDP_AYCBR8888:
 170        case GDP_ABGR8888:
 171                return GAM_GDP_ALPHARANGE_255;
 172        }
 173        return 0;
 174}
 175
 176/**
 177 * sti_gdp_get_free_nodes
 178 * @layer: gdp layer
 179 *
 180 * Look for a GDP node list that is not currently read by the HW.
 181 *
 182 * RETURNS:
 183 * Pointer to the free GDP node list
 184 */
 185static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer)
 186{
 187        int hw_nvn;
 188        struct sti_gdp *gdp = to_sti_gdp(layer);
 189        unsigned int i;
 190
 191        hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET);
 192        if (!hw_nvn)
 193                goto end;
 194
 195        for (i = 0; i < GDP_NODE_NB_BANK; i++)
 196                if ((hw_nvn != gdp->node_list[i].btm_field_paddr) &&
 197                    (hw_nvn != gdp->node_list[i].top_field_paddr))
 198                        return &gdp->node_list[i];
 199
 200        /* in hazardious cases restart with the first node */
 201        DRM_ERROR("inconsistent NVN for %s: 0x%08X\n",
 202                        sti_layer_to_str(layer), hw_nvn);
 203
 204end:
 205        return &gdp->node_list[0];
 206}
 207
 208/**
 209 * sti_gdp_get_current_nodes
 210 * @layer: GDP layer
 211 *
 212 * Look for GDP nodes that are currently read by the HW.
 213 *
 214 * RETURNS:
 215 * Pointer to the current GDP node list
 216 */
 217static
 218struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer)
 219{
 220        int hw_nvn;
 221        struct sti_gdp *gdp = to_sti_gdp(layer);
 222        unsigned int i;
 223
 224        hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET);
 225        if (!hw_nvn)
 226                goto end;
 227
 228        for (i = 0; i < GDP_NODE_NB_BANK; i++)
 229                if ((hw_nvn == gdp->node_list[i].btm_field_paddr) ||
 230                                (hw_nvn == gdp->node_list[i].top_field_paddr))
 231                        return &gdp->node_list[i];
 232
 233end:
 234        DRM_DEBUG_DRIVER("Warning, NVN 0x%08X for %s does not match any node\n",
 235                                hw_nvn, sti_layer_to_str(layer));
 236
 237        return NULL;
 238}
 239
 240/**
 241 * sti_gdp_prepare_layer
 242 * @lay: gdp layer
 243 * @first_prepare: true if it is the first time this function is called
 244 *
 245 * Update the free GDP node list according to the layer properties.
 246 *
 247 * RETURNS:
 248 * 0 on success.
 249 */
 250static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
 251{
 252        struct sti_gdp_node_list *list;
 253        struct sti_gdp_node *top_field, *btm_field;
 254        struct drm_display_mode *mode = layer->mode;
 255        struct device *dev = layer->dev;
 256        struct sti_gdp *gdp = to_sti_gdp(layer);
 257        struct sti_compositor *compo = dev_get_drvdata(dev);
 258        int format;
 259        unsigned int depth, bpp;
 260        int rate = mode->clock * 1000;
 261        int res;
 262        u32 ydo, xdo, yds, xds;
 263
 264        list = sti_gdp_get_free_nodes(layer);
 265        top_field = list->top_field;
 266        btm_field = list->btm_field;
 267
 268        dev_dbg(dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
 269                        sti_layer_to_str(layer), top_field, btm_field);
 270
 271        /* Build the top field from layer params */
 272        top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
 273        top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
 274        format = sti_gdp_fourcc2format(layer->format);
 275        if (format == -1) {
 276                DRM_ERROR("Format not supported by GDP %.4s\n",
 277                          (char *)&layer->format);
 278                return 1;
 279        }
 280        top_field->gam_gdp_ctl |= format;
 281        top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
 282        top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
 283
 284        /* pixel memory location */
 285        drm_fb_get_bpp_depth(layer->format, &depth, &bpp);
 286        top_field->gam_gdp_pml = (u32) layer->paddr + layer->offsets[0];
 287        top_field->gam_gdp_pml += layer->src_x * (bpp >> 3);
 288        top_field->gam_gdp_pml += layer->src_y * layer->pitches[0];
 289
 290        /* input parameters */
 291        top_field->gam_gdp_pmp = layer->pitches[0];
 292        top_field->gam_gdp_size =
 293            clamp_val(layer->src_h, 0, GAM_GDP_SIZE_MAX) << 16 |
 294            clamp_val(layer->src_w, 0, GAM_GDP_SIZE_MAX);
 295
 296        /* output parameters */
 297        ydo = sti_vtg_get_line_number(*mode, layer->dst_y);
 298        yds = sti_vtg_get_line_number(*mode, layer->dst_y + layer->dst_h - 1);
 299        xdo = sti_vtg_get_pixel_number(*mode, layer->dst_x);
 300        xds = sti_vtg_get_pixel_number(*mode, layer->dst_x + layer->dst_w - 1);
 301        top_field->gam_gdp_vpo = (ydo << 16) | xdo;
 302        top_field->gam_gdp_vps = (yds << 16) | xds;
 303
 304        /* Same content and chained together */
 305        memcpy(btm_field, top_field, sizeof(*btm_field));
 306        top_field->gam_gdp_nvn = list->btm_field_paddr;
 307        btm_field->gam_gdp_nvn = list->top_field_paddr;
 308
 309        /* Interlaced mode */
 310        if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE)
 311                btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
 312                    layer->pitches[0];
 313
 314        if (first_prepare) {
 315                /* Register gdp callback */
 316                if (sti_vtg_register_client(layer->mixer_id == STI_MIXER_MAIN ?
 317                                compo->vtg_main : compo->vtg_aux,
 318                                &gdp->vtg_field_nb, layer->mixer_id)) {
 319                        DRM_ERROR("Cannot register VTG notifier\n");
 320                        return 1;
 321                }
 322
 323                /* Set and enable gdp clock */
 324                if (gdp->clk_pix) {
 325                        struct clk *clkp;
 326                        /* According to the mixer used, the gdp pixel clock
 327                         * should have a different parent clock. */
 328                        if (layer->mixer_id == STI_MIXER_MAIN)
 329                                clkp = gdp->clk_main_parent;
 330                        else
 331                                clkp = gdp->clk_aux_parent;
 332
 333                        if (clkp)
 334                                clk_set_parent(gdp->clk_pix, clkp);
 335
 336                        res = clk_set_rate(gdp->clk_pix, rate);
 337                        if (res < 0) {
 338                                DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
 339                                                rate);
 340                                return 1;
 341                        }
 342
 343                        if (clk_prepare_enable(gdp->clk_pix)) {
 344                                DRM_ERROR("Failed to prepare/enable gdp\n");
 345                                return 1;
 346                        }
 347                }
 348        }
 349
 350        return 0;
 351}
 352
 353/**
 354 * sti_gdp_commit_layer
 355 * @lay: gdp layer
 356 *
 357 * Update the NVN field of the 'right' field of the current GDP node (being
 358 * used by the HW) with the address of the updated ('free') top field GDP node.
 359 * - In interlaced mode the 'right' field is the bottom field as we update
 360 *   frames starting from their top field
 361 * - In progressive mode, we update both bottom and top fields which are
 362 *   equal nodes.
 363 * At the next VSYNC, the updated node list will be used by the HW.
 364 *
 365 * RETURNS:
 366 * 0 on success.
 367 */
 368static int sti_gdp_commit_layer(struct sti_layer *layer)
 369{
 370        struct sti_gdp_node_list *updated_list = sti_gdp_get_free_nodes(layer);
 371        struct sti_gdp_node *updated_top_node = updated_list->top_field;
 372        struct sti_gdp_node *updated_btm_node = updated_list->btm_field;
 373        struct sti_gdp *gdp = to_sti_gdp(layer);
 374        u32 dma_updated_top = updated_list->top_field_paddr;
 375        u32 dma_updated_btm = updated_list->btm_field_paddr;
 376        struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(layer);
 377
 378        dev_dbg(layer->dev, "%s %s top/btm_node:0x%p/0x%p\n", __func__,
 379                        sti_layer_to_str(layer),
 380                        updated_top_node, updated_btm_node);
 381        dev_dbg(layer->dev, "Current NVN:0x%X\n",
 382                readl(layer->regs + GAM_GDP_NVN_OFFSET));
 383        dev_dbg(layer->dev, "Posted buff: %lx current buff: %x\n",
 384                (unsigned long)layer->paddr,
 385                readl(layer->regs + GAM_GDP_PML_OFFSET));
 386
 387        if (curr_list == NULL) {
 388                /* First update or invalid node should directly write in the
 389                 * hw register */
 390                DRM_DEBUG_DRIVER("%s first update (or invalid node)",
 391                                sti_layer_to_str(layer));
 392
 393                writel(gdp->is_curr_top == true ?
 394                                dma_updated_btm : dma_updated_top,
 395                                layer->regs + GAM_GDP_NVN_OFFSET);
 396                return 0;
 397        }
 398
 399        if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE) {
 400                if (gdp->is_curr_top == true) {
 401                        /* Do not update in the middle of the frame, but
 402                         * postpone the update after the bottom field has
 403                         * been displayed */
 404                        curr_list->btm_field->gam_gdp_nvn = dma_updated_top;
 405                } else {
 406                        /* Direct update to avoid one frame delay */
 407                        writel(dma_updated_top,
 408                                layer->regs + GAM_GDP_NVN_OFFSET);
 409                }
 410        } else {
 411                /* Direct update for progressive to avoid one frame delay */
 412                writel(dma_updated_top, layer->regs + GAM_GDP_NVN_OFFSET);
 413        }
 414
 415        return 0;
 416}
 417
 418/**
 419 * sti_gdp_disable_layer
 420 * @lay: gdp layer
 421 *
 422 * Disable a GDP.
 423 *
 424 * RETURNS:
 425 * 0 on success.
 426 */
 427static int sti_gdp_disable_layer(struct sti_layer *layer)
 428{
 429        unsigned int i;
 430        struct sti_gdp *gdp = to_sti_gdp(layer);
 431        struct sti_compositor *compo = dev_get_drvdata(layer->dev);
 432
 433        DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
 434
 435        /* Set the nodes as 'to be ignored on mixer' */
 436        for (i = 0; i < GDP_NODE_NB_BANK; i++) {
 437                gdp->node_list[i].top_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
 438                gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
 439        }
 440
 441        if (sti_vtg_unregister_client(layer->mixer_id == STI_MIXER_MAIN ?
 442                        compo->vtg_main : compo->vtg_aux, &gdp->vtg_field_nb))
 443                DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
 444
 445        if (gdp->clk_pix)
 446                clk_disable_unprepare(gdp->clk_pix);
 447
 448        return 0;
 449}
 450
 451/**
 452 * sti_gdp_field_cb
 453 * @nb: notifier block
 454 * @event: event message
 455 * @data: private data
 456 *
 457 * Handle VTG top field and bottom field event.
 458 *
 459 * RETURNS:
 460 * 0 on success.
 461 */
 462int sti_gdp_field_cb(struct notifier_block *nb,
 463                unsigned long event, void *data)
 464{
 465        struct sti_gdp *gdp = container_of(nb, struct sti_gdp, vtg_field_nb);
 466
 467        switch (event) {
 468        case VTG_TOP_FIELD_EVENT:
 469                gdp->is_curr_top = true;
 470                break;
 471        case VTG_BOTTOM_FIELD_EVENT:
 472                gdp->is_curr_top = false;
 473                break;
 474        default:
 475                DRM_ERROR("unsupported event: %lu\n", event);
 476                break;
 477        }
 478
 479        return 0;
 480}
 481
 482static void sti_gdp_init(struct sti_layer *layer)
 483{
 484        struct sti_gdp *gdp = to_sti_gdp(layer);
 485        struct device_node *np = layer->dev->of_node;
 486        dma_addr_t dma_addr;
 487        void *base;
 488        unsigned int i, size;
 489
 490        /* Allocate all the nodes within a single memory page */
 491        size = sizeof(struct sti_gdp_node) *
 492            GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
 493        base = dma_alloc_writecombine(layer->dev,
 494                        size, &dma_addr, GFP_KERNEL | GFP_DMA);
 495
 496        if (!base) {
 497                DRM_ERROR("Failed to allocate memory for GDP node\n");
 498                return;
 499        }
 500        memset(base, 0, size);
 501
 502        for (i = 0; i < GDP_NODE_NB_BANK; i++) {
 503                if (dma_addr & 0xF) {
 504                        DRM_ERROR("Mem alignment failed\n");
 505                        return;
 506                }
 507                gdp->node_list[i].top_field = base;
 508                gdp->node_list[i].top_field_paddr = dma_addr;
 509
 510                DRM_DEBUG_DRIVER("node[%d].top_field=%p\n", i, base);
 511                base += sizeof(struct sti_gdp_node);
 512                dma_addr += sizeof(struct sti_gdp_node);
 513
 514                if (dma_addr & 0xF) {
 515                        DRM_ERROR("Mem alignment failed\n");
 516                        return;
 517                }
 518                gdp->node_list[i].btm_field = base;
 519                gdp->node_list[i].btm_field_paddr = dma_addr;
 520                DRM_DEBUG_DRIVER("node[%d].btm_field=%p\n", i, base);
 521                base += sizeof(struct sti_gdp_node);
 522                dma_addr += sizeof(struct sti_gdp_node);
 523        }
 524
 525        if (of_device_is_compatible(np, "st,stih407-compositor")) {
 526                /* GDP of STiH407 chip have its own pixel clock */
 527                char *clk_name;
 528
 529                switch (layer->desc) {
 530                case STI_GDP_0:
 531                        clk_name = "pix_gdp1";
 532                        break;
 533                case STI_GDP_1:
 534                        clk_name = "pix_gdp2";
 535                        break;
 536                case STI_GDP_2:
 537                        clk_name = "pix_gdp3";
 538                        break;
 539                case STI_GDP_3:
 540                        clk_name = "pix_gdp4";
 541                        break;
 542                default:
 543                        DRM_ERROR("GDP id not recognized\n");
 544                        return;
 545                }
 546
 547                gdp->clk_pix = devm_clk_get(layer->dev, clk_name);
 548                if (IS_ERR(gdp->clk_pix))
 549                        DRM_ERROR("Cannot get %s clock\n", clk_name);
 550
 551                gdp->clk_main_parent = devm_clk_get(layer->dev, "main_parent");
 552                if (IS_ERR(gdp->clk_main_parent))
 553                        DRM_ERROR("Cannot get main_parent clock\n");
 554
 555                gdp->clk_aux_parent = devm_clk_get(layer->dev, "aux_parent");
 556                if (IS_ERR(gdp->clk_aux_parent))
 557                        DRM_ERROR("Cannot get aux_parent clock\n");
 558        }
 559}
 560
 561static const struct sti_layer_funcs gdp_ops = {
 562        .get_formats = sti_gdp_get_formats,
 563        .get_nb_formats = sti_gdp_get_nb_formats,
 564        .init = sti_gdp_init,
 565        .prepare = sti_gdp_prepare_layer,
 566        .commit = sti_gdp_commit_layer,
 567        .disable = sti_gdp_disable_layer,
 568};
 569
 570struct sti_layer *sti_gdp_create(struct device *dev, int id)
 571{
 572        struct sti_gdp *gdp;
 573
 574        gdp = devm_kzalloc(dev, sizeof(*gdp), GFP_KERNEL);
 575        if (!gdp) {
 576                DRM_ERROR("Failed to allocate memory for GDP\n");
 577                return NULL;
 578        }
 579
 580        gdp->layer.ops = &gdp_ops;
 581        gdp->vtg_field_nb.notifier_call = sti_gdp_field_cb;
 582
 583        return (struct sti_layer *)gdp;
 584}
 585