linux/drivers/video/mmp/hw/mmp_ctrl.c
<<
>>
Prefs
   1/*
   2 * linux/drivers/video/mmp/hw/mmp_ctrl.c
   3 * Marvell MMP series Display Controller support
   4 *
   5 * Copyright (C) 2012 Marvell Technology Group Ltd.
   6 * Authors:  Guoqing Li <ligq@marvell.com>
   7 *          Lisa Du <cldu@marvell.com>
   8 *          Zhou Zhu <zzhu3@marvell.com>
   9 *
  10 * This program is free software; you can redistribute it and/or modify it
  11 * under the terms of the GNU General Public License as published by the
  12 * Free Software Foundation; either version 2 of the License, or (at your
  13 * option) any later version.
  14 *
  15 * This program is distributed in the hope that it will be useful, but WITHOUT
  16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  17 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  18 * more details.
  19 *
  20 * You should have received a copy of the GNU General Public License along with
  21 * this program.  If not, see <http://www.gnu.org/licenses/>.
  22 *
  23 */
  24#include <linux/module.h>
  25#include <linux/moduleparam.h>
  26#include <linux/kernel.h>
  27#include <linux/errno.h>
  28#include <linux/string.h>
  29#include <linux/interrupt.h>
  30#include <linux/slab.h>
  31#include <linux/delay.h>
  32#include <linux/platform_device.h>
  33#include <linux/dma-mapping.h>
  34#include <linux/clk.h>
  35#include <linux/err.h>
  36#include <linux/vmalloc.h>
  37#include <linux/uaccess.h>
  38#include <linux/kthread.h>
  39#include <linux/io.h>
  40
  41#include "mmp_ctrl.h"
  42
  43static irqreturn_t ctrl_handle_irq(int irq, void *dev_id)
  44{
  45        struct mmphw_ctrl *ctrl = (struct mmphw_ctrl *)dev_id;
  46        u32 isr, imask, tmp;
  47
  48        isr = readl_relaxed(ctrl->reg_base + SPU_IRQ_ISR);
  49        imask = readl_relaxed(ctrl->reg_base + SPU_IRQ_ENA);
  50
  51        do {
  52                /* clear clock only */
  53                tmp = readl_relaxed(ctrl->reg_base + SPU_IRQ_ISR);
  54                if (tmp & isr)
  55                        writel_relaxed(~isr, ctrl->reg_base + SPU_IRQ_ISR);
  56        } while ((isr = readl(ctrl->reg_base + SPU_IRQ_ISR)) & imask);
  57
  58        return IRQ_HANDLED;
  59}
  60
  61static u32 fmt_to_reg(struct mmp_overlay *overlay, int pix_fmt)
  62{
  63        u32 link_config = path_to_path_plat(overlay->path)->link_config;
  64        u32 rbswap, uvswap = 0, yuvswap = 0,
  65                csc_en = 0, val = 0,
  66                vid = overlay_is_vid(overlay);
  67
  68        switch (pix_fmt) {
  69        case PIXFMT_RGB565:
  70        case PIXFMT_RGB1555:
  71        case PIXFMT_RGB888PACK:
  72        case PIXFMT_RGB888UNPACK:
  73        case PIXFMT_RGBA888:
  74                rbswap = !(link_config & 0x1);
  75                break;
  76        case PIXFMT_VYUY:
  77        case PIXFMT_YVU422P:
  78        case PIXFMT_YVU420P:
  79                rbswap = link_config & 0x1;
  80                uvswap = 1;
  81                break;
  82        case PIXFMT_YUYV:
  83                rbswap = link_config & 0x1;
  84                yuvswap = 1;
  85                break;
  86        default:
  87                rbswap = link_config & 0x1;
  88                break;
  89        }
  90
  91        switch (pix_fmt) {
  92        case PIXFMT_RGB565:
  93        case PIXFMT_BGR565:
  94                val = 0;
  95                break;
  96        case PIXFMT_RGB1555:
  97        case PIXFMT_BGR1555:
  98                val = 0x1;
  99                break;
 100        case PIXFMT_RGB888PACK:
 101        case PIXFMT_BGR888PACK:
 102                val = 0x2;
 103                break;
 104        case PIXFMT_RGB888UNPACK:
 105        case PIXFMT_BGR888UNPACK:
 106                val = 0x3;
 107                break;
 108        case PIXFMT_RGBA888:
 109        case PIXFMT_BGRA888:
 110                val = 0x4;
 111                break;
 112        case PIXFMT_UYVY:
 113        case PIXFMT_VYUY:
 114        case PIXFMT_YUYV:
 115                val = 0x5;
 116                csc_en = 1;
 117                break;
 118        case PIXFMT_YUV422P:
 119        case PIXFMT_YVU422P:
 120                val = 0x6;
 121                csc_en = 1;
 122                break;
 123        case PIXFMT_YUV420P:
 124        case PIXFMT_YVU420P:
 125                val = 0x7;
 126                csc_en = 1;
 127                break;
 128        default:
 129                break;
 130        }
 131
 132        return (dma_palette(0) | dma_fmt(vid, val) |
 133                dma_swaprb(vid, rbswap) | dma_swapuv(vid, uvswap) |
 134                dma_swapyuv(vid, yuvswap) | dma_csc(vid, csc_en));
 135}
 136
 137static void dmafetch_set_fmt(struct mmp_overlay *overlay)
 138{
 139        u32 tmp;
 140        struct mmp_path *path = overlay->path;
 141        tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
 142        tmp &= ~dma_mask(overlay_is_vid(overlay));
 143        tmp |= fmt_to_reg(overlay, overlay->win.pix_fmt);
 144        writel_relaxed(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
 145}
 146
 147static void overlay_set_win(struct mmp_overlay *overlay, struct mmp_win *win)
 148{
 149        struct lcd_regs *regs = path_regs(overlay->path);
 150        u32 pitch;
 151
 152        /* assert win supported */
 153        memcpy(&overlay->win, win, sizeof(struct mmp_win));
 154
 155        mutex_lock(&overlay->access_ok);
 156        pitch = win->xsrc * pixfmt_to_stride(win->pix_fmt);
 157        writel_relaxed(pitch, &regs->g_pitch);
 158        writel_relaxed((win->ysrc << 16) | win->xsrc, &regs->g_size);
 159        writel_relaxed((win->ydst << 16) | win->xdst, &regs->g_size_z);
 160        writel_relaxed(0, &regs->g_start);
 161
 162        dmafetch_set_fmt(overlay);
 163        mutex_unlock(&overlay->access_ok);
 164}
 165
 166static void dmafetch_onoff(struct mmp_overlay *overlay, int on)
 167{
 168        u32 mask = overlay_is_vid(overlay) ? CFG_GRA_ENA_MASK :
 169                   CFG_DMA_ENA_MASK;
 170        u32 enable = overlay_is_vid(overlay) ? CFG_GRA_ENA(1) : CFG_DMA_ENA(1);
 171        u32 tmp;
 172        struct mmp_path *path = overlay->path;
 173
 174        mutex_lock(&overlay->access_ok);
 175        tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
 176        tmp &= ~mask;
 177        tmp |= (on ? enable : 0);
 178        writel(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
 179        mutex_unlock(&overlay->access_ok);
 180}
 181
 182static void path_enabledisable(struct mmp_path *path, int on)
 183{
 184        u32 tmp;
 185        mutex_lock(&path->access_ok);
 186        tmp = readl_relaxed(ctrl_regs(path) + LCD_SCLK(path));
 187        if (on)
 188                tmp &= ~SCLK_DISABLE;
 189        else
 190                tmp |= SCLK_DISABLE;
 191        writel_relaxed(tmp, ctrl_regs(path) + LCD_SCLK(path));
 192        mutex_unlock(&path->access_ok);
 193}
 194
 195static void path_onoff(struct mmp_path *path, int on)
 196{
 197        if (path->status == on) {
 198                dev_info(path->dev, "path %s is already %s\n",
 199                                path->name, stat_name(path->status));
 200                return;
 201        }
 202
 203        if (on) {
 204                path_enabledisable(path, 1);
 205
 206                if (path->panel && path->panel->set_onoff)
 207                        path->panel->set_onoff(path->panel, 1);
 208        } else {
 209                if (path->panel && path->panel->set_onoff)
 210                        path->panel->set_onoff(path->panel, 0);
 211
 212                path_enabledisable(path, 0);
 213        }
 214        path->status = on;
 215}
 216
 217static void overlay_set_onoff(struct mmp_overlay *overlay, int on)
 218{
 219        if (overlay->status == on) {
 220                dev_info(overlay_to_ctrl(overlay)->dev, "overlay %s is already %s\n",
 221                        overlay->path->name, stat_name(overlay->status));
 222                return;
 223        }
 224        overlay->status = on;
 225        dmafetch_onoff(overlay, on);
 226        if (overlay->path->ops.check_status(overlay->path)
 227                        != overlay->path->status)
 228                path_onoff(overlay->path, on);
 229}
 230
 231static void overlay_set_fetch(struct mmp_overlay *overlay, int fetch_id)
 232{
 233        overlay->dmafetch_id = fetch_id;
 234}
 235
 236static int overlay_set_addr(struct mmp_overlay *overlay, struct mmp_addr *addr)
 237{
 238        struct lcd_regs *regs = path_regs(overlay->path);
 239
 240        /* FIXME: assert addr supported */
 241        memcpy(&overlay->addr, addr, sizeof(struct mmp_win));
 242        writel(addr->phys[0], &regs->g_0);
 243
 244        return overlay->addr.phys[0];
 245}
 246
 247static void path_set_mode(struct mmp_path *path, struct mmp_mode *mode)
 248{
 249        struct lcd_regs *regs = path_regs(path);
 250        u32 total_x, total_y, vsync_ctrl, tmp, sclk_src, sclk_div,
 251                link_config = path_to_path_plat(path)->link_config;
 252
 253        /* FIXME: assert videomode supported */
 254        memcpy(&path->mode, mode, sizeof(struct mmp_mode));
 255
 256        mutex_lock(&path->access_ok);
 257
 258        /* polarity of timing signals */
 259        tmp = readl_relaxed(ctrl_regs(path) + intf_ctrl(path->id)) & 0x1;
 260        tmp |= mode->vsync_invert ? 0 : 0x8;
 261        tmp |= mode->hsync_invert ? 0 : 0x4;
 262        tmp |= link_config & CFG_DUMBMODE_MASK;
 263        tmp |= CFG_DUMB_ENA(1);
 264        writel_relaxed(tmp, ctrl_regs(path) + intf_ctrl(path->id));
 265
 266        writel_relaxed((mode->yres << 16) | mode->xres, &regs->screen_active);
 267        writel_relaxed((mode->left_margin << 16) | mode->right_margin,
 268                &regs->screen_h_porch);
 269        writel_relaxed((mode->upper_margin << 16) | mode->lower_margin,
 270                &regs->screen_v_porch);
 271        total_x = mode->xres + mode->left_margin + mode->right_margin +
 272                mode->hsync_len;
 273        total_y = mode->yres + mode->upper_margin + mode->lower_margin +
 274                mode->vsync_len;
 275        writel_relaxed((total_y << 16) | total_x, &regs->screen_size);
 276
 277        /* vsync ctrl */
 278        if (path->output_type == PATH_OUT_DSI)
 279                vsync_ctrl = 0x01330133;
 280        else
 281                vsync_ctrl = ((mode->xres + mode->right_margin) << 16)
 282                                        | (mode->xres + mode->right_margin);
 283        writel_relaxed(vsync_ctrl, &regs->vsync_ctrl);
 284
 285        /* set pixclock div */
 286        sclk_src = clk_get_rate(path_to_ctrl(path)->clk);
 287        sclk_div = sclk_src / mode->pixclock_freq;
 288        if (sclk_div * mode->pixclock_freq < sclk_src)
 289                sclk_div++;
 290
 291        dev_info(path->dev, "%s sclk_src %d sclk_div 0x%x pclk %d\n",
 292                        __func__, sclk_src, sclk_div, mode->pixclock_freq);
 293
 294        tmp = readl_relaxed(ctrl_regs(path) + LCD_SCLK(path));
 295        tmp &= ~CLK_INT_DIV_MASK;
 296        tmp |= sclk_div;
 297        writel_relaxed(tmp, ctrl_regs(path) + LCD_SCLK(path));
 298
 299        mutex_unlock(&path->access_ok);
 300}
 301
 302static struct mmp_overlay_ops mmphw_overlay_ops = {
 303        .set_fetch = overlay_set_fetch,
 304        .set_onoff = overlay_set_onoff,
 305        .set_win = overlay_set_win,
 306        .set_addr = overlay_set_addr,
 307};
 308
 309static void ctrl_set_default(struct mmphw_ctrl *ctrl)
 310{
 311        u32 tmp, irq_mask;
 312
 313        /*
 314         * LCD Global control(LCD_TOP_CTRL) should be configed before
 315         * any other LCD registers read/write, or there maybe issues.
 316         */
 317        tmp = readl_relaxed(ctrl->reg_base + LCD_TOP_CTRL);
 318        tmp |= 0xfff0;
 319        writel_relaxed(tmp, ctrl->reg_base + LCD_TOP_CTRL);
 320
 321
 322        /* disable all interrupts */
 323        irq_mask = path_imasks(0) | err_imask(0) |
 324                   path_imasks(1) | err_imask(1);
 325        tmp = readl_relaxed(ctrl->reg_base + SPU_IRQ_ENA);
 326        tmp &= ~irq_mask;
 327        tmp |= irq_mask;
 328        writel_relaxed(tmp, ctrl->reg_base + SPU_IRQ_ENA);
 329}
 330
 331static void path_set_default(struct mmp_path *path)
 332{
 333        struct lcd_regs *regs = path_regs(path);
 334        u32 dma_ctrl1, mask, tmp, path_config;
 335
 336        path_config = path_to_path_plat(path)->path_config;
 337
 338        /* Configure IOPAD: should be parallel only */
 339        if (PATH_OUT_PARALLEL == path->output_type) {
 340                mask = CFG_IOPADMODE_MASK | CFG_BURST_MASK | CFG_BOUNDARY_MASK;
 341                tmp = readl_relaxed(ctrl_regs(path) + SPU_IOPAD_CONTROL);
 342                tmp &= ~mask;
 343                tmp |= path_config;
 344                writel_relaxed(tmp, ctrl_regs(path) + SPU_IOPAD_CONTROL);
 345        }
 346
 347        /* Select path clock source */
 348        tmp = readl_relaxed(ctrl_regs(path) + LCD_SCLK(path));
 349        tmp &= ~SCLK_SRC_SEL_MASK;
 350        tmp |= path_config;
 351        writel_relaxed(tmp, ctrl_regs(path) + LCD_SCLK(path));
 352
 353        /*
 354         * Configure default bits: vsync triggers DMA,
 355         * power save enable, configure alpha registers to
 356         * display 100% graphics, and set pixel command.
 357         */
 358        dma_ctrl1 = 0x2032ff81;
 359
 360        dma_ctrl1 |= CFG_VSYNC_INV_MASK;
 361        writel_relaxed(dma_ctrl1, ctrl_regs(path) + dma_ctrl(1, path->id));
 362
 363        /* Configure default register values */
 364        writel_relaxed(0x00000000, &regs->blank_color);
 365        writel_relaxed(0x00000000, &regs->g_1);
 366        writel_relaxed(0x00000000, &regs->g_start);
 367
 368        /*
 369         * 1.enable multiple burst request in DMA AXI
 370         * bus arbiter for faster read if not tv path;
 371         * 2.enable horizontal smooth filter;
 372         */
 373        if (PATH_PN == path->id) {
 374                mask = CFG_GRA_HSMOOTH_MASK | CFG_DMA_HSMOOTH_MASK
 375                        | CFG_ARBFAST_ENA(1);
 376                tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
 377                tmp |= mask;
 378                writel_relaxed(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
 379        } else if (PATH_TV == path->id) {
 380                mask = CFG_GRA_HSMOOTH_MASK | CFG_DMA_HSMOOTH_MASK
 381                        | CFG_ARBFAST_ENA(1);
 382                tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
 383                tmp &= ~mask;
 384                tmp |= CFG_GRA_HSMOOTH_MASK | CFG_DMA_HSMOOTH_MASK;
 385                writel_relaxed(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
 386        }
 387}
 388
 389static int path_init(struct mmphw_path_plat *path_plat,
 390                struct mmp_mach_path_config *config)
 391{
 392        struct mmphw_ctrl *ctrl = path_plat->ctrl;
 393        struct mmp_path_info *path_info;
 394        struct mmp_path *path = NULL;
 395
 396        dev_info(ctrl->dev, "%s: %s\n", __func__, config->name);
 397
 398        /* init driver data */
 399        path_info = kzalloc(sizeof(struct mmp_path_info), GFP_KERNEL);
 400        if (!path_info) {
 401                dev_err(ctrl->dev, "%s: unable to alloc path_info for %s\n",
 402                                __func__, config->name);
 403                return 0;
 404        }
 405        path_info->name = config->name;
 406        path_info->id = path_plat->id;
 407        path_info->dev = ctrl->dev;
 408        path_info->overlay_num = config->overlay_num;
 409        path_info->overlay_ops = &mmphw_overlay_ops;
 410        path_info->set_mode = path_set_mode;
 411        path_info->plat_data = path_plat;
 412
 413        /* create/register platform device */
 414        path = mmp_register_path(path_info);
 415        if (!path) {
 416                kfree(path_info);
 417                return 0;
 418        }
 419        path_plat->path = path;
 420        path_plat->path_config = config->path_config;
 421        path_plat->link_config = config->link_config;
 422        path_set_default(path);
 423
 424        kfree(path_info);
 425        return 1;
 426}
 427
 428static void path_deinit(struct mmphw_path_plat *path_plat)
 429{
 430        if (!path_plat)
 431                return;
 432
 433        if (path_plat->path)
 434                mmp_unregister_path(path_plat->path);
 435}
 436
 437static int mmphw_probe(struct platform_device *pdev)
 438{
 439        struct mmp_mach_plat_info *mi;
 440        struct resource *res;
 441        int ret, i, size, irq;
 442        struct mmphw_path_plat *path_plat;
 443        struct mmphw_ctrl *ctrl = NULL;
 444
 445        /* get resources from platform data */
 446        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 447        if (res == NULL) {
 448                dev_err(&pdev->dev, "%s: no IO memory defined\n", __func__);
 449                ret = -ENOENT;
 450                goto failed;
 451        }
 452
 453        irq = platform_get_irq(pdev, 0);
 454        if (irq < 0) {
 455                dev_err(&pdev->dev, "%s: no IRQ defined\n", __func__);
 456                ret = -ENOENT;
 457                goto failed;
 458        }
 459
 460        /* get configs from platform data */
 461        mi = pdev->dev.platform_data;
 462        if (mi == NULL || !mi->path_num || !mi->paths) {
 463                dev_err(&pdev->dev, "%s: no platform data defined\n", __func__);
 464                ret = -EINVAL;
 465                goto failed;
 466        }
 467
 468        /* allocate */
 469        size = sizeof(struct mmphw_ctrl) + sizeof(struct mmphw_path_plat) *
 470               mi->path_num;
 471        ctrl = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
 472        if (!ctrl) {
 473                ret = -ENOMEM;
 474                goto failed;
 475        }
 476
 477        ctrl->name = mi->name;
 478        ctrl->path_num = mi->path_num;
 479        ctrl->dev = &pdev->dev;
 480        ctrl->irq = irq;
 481        platform_set_drvdata(pdev, ctrl);
 482        mutex_init(&ctrl->access_ok);
 483
 484        /* map registers.*/
 485        if (!devm_request_mem_region(ctrl->dev, res->start,
 486                        resource_size(res), ctrl->name)) {
 487                dev_err(ctrl->dev,
 488                        "can't request region for resource %pR\n", res);
 489                ret = -EINVAL;
 490                goto failed;
 491        }
 492
 493        ctrl->reg_base = devm_ioremap_nocache(ctrl->dev,
 494                        res->start, resource_size(res));
 495        if (ctrl->reg_base == NULL) {
 496                dev_err(ctrl->dev, "%s: res %x - %x map failed\n", __func__,
 497                        res->start, res->end);
 498                ret = -ENOMEM;
 499                goto failed;
 500        }
 501
 502        /* request irq */
 503        ret = devm_request_irq(ctrl->dev, ctrl->irq, ctrl_handle_irq,
 504                IRQF_SHARED, "lcd_controller", ctrl);
 505        if (ret < 0) {
 506                dev_err(ctrl->dev, "%s unable to request IRQ %d\n",
 507                                __func__, ctrl->irq);
 508                ret = -ENXIO;
 509                goto failed;
 510        }
 511
 512        /* get clock */
 513        ctrl->clk = devm_clk_get(ctrl->dev, mi->clk_name);
 514        if (IS_ERR(ctrl->clk)) {
 515                dev_err(ctrl->dev, "unable to get clk %s\n", mi->clk_name);
 516                ret = -ENOENT;
 517                goto failed_get_clk;
 518        }
 519        clk_prepare_enable(ctrl->clk);
 520
 521        /* init global regs */
 522        ctrl_set_default(ctrl);
 523
 524        /* init pathes from machine info and register them */
 525        for (i = 0; i < ctrl->path_num; i++) {
 526                /* get from config and machine info */
 527                path_plat = &ctrl->path_plats[i];
 528                path_plat->id = i;
 529                path_plat->ctrl = ctrl;
 530
 531                /* path init */
 532                if (!path_init(path_plat, &mi->paths[i])) {
 533                        ret = -EINVAL;
 534                        goto failed_path_init;
 535                }
 536        }
 537
 538#ifdef CONFIG_MMP_DISP_SPI
 539        ret = lcd_spi_register(ctrl);
 540        if (ret < 0)
 541                goto failed_path_init;
 542#endif
 543
 544        dev_info(ctrl->dev, "device init done\n");
 545
 546        return 0;
 547
 548failed_path_init:
 549        for (i = 0; i < ctrl->path_num; i++) {
 550                path_plat = &ctrl->path_plats[i];
 551                path_deinit(path_plat);
 552        }
 553
 554        if (ctrl->clk) {
 555                devm_clk_put(ctrl->dev, ctrl->clk);
 556                clk_disable_unprepare(ctrl->clk);
 557        }
 558failed_get_clk:
 559        devm_free_irq(ctrl->dev, ctrl->irq, ctrl);
 560failed:
 561        if (ctrl) {
 562                if (ctrl->reg_base)
 563                        devm_iounmap(ctrl->dev, ctrl->reg_base);
 564                devm_release_mem_region(ctrl->dev, res->start,
 565                                resource_size(res));
 566                devm_kfree(ctrl->dev, ctrl);
 567        }
 568
 569        platform_set_drvdata(pdev, NULL);
 570        dev_err(&pdev->dev, "device init failed\n");
 571
 572        return ret;
 573}
 574
 575static struct platform_driver mmphw_driver = {
 576        .driver         = {
 577                .name   = "mmp-disp",
 578                .owner  = THIS_MODULE,
 579        },
 580        .probe          = mmphw_probe,
 581};
 582
 583static int mmphw_init(void)
 584{
 585        return platform_driver_register(&mmphw_driver);
 586}
 587module_init(mmphw_init);
 588
 589MODULE_AUTHOR("Li Guoqing<ligq@marvell.com>");
 590MODULE_DESCRIPTION("Framebuffer driver for mmp");
 591MODULE_LICENSE("GPL");
 592