linux/drivers/char/hw_random/cctrng.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (C) 2019-2020 ARM Limited or its affiliates. */
   3
   4#include <linux/kernel.h>
   5#include <linux/module.h>
   6#include <linux/clk.h>
   7#include <linux/hw_random.h>
   8#include <linux/io.h>
   9#include <linux/platform_device.h>
  10#include <linux/pm_runtime.h>
  11#include <linux/interrupt.h>
  12#include <linux/irqreturn.h>
  13#include <linux/workqueue.h>
  14#include <linux/circ_buf.h>
  15#include <linux/completion.h>
  16#include <linux/of.h>
  17#include <linux/bitfield.h>
  18#include <linux/fips.h>
  19
  20#include "cctrng.h"
  21
  22#define CC_REG_LOW(name)  (name ## _BIT_SHIFT)
  23#define CC_REG_HIGH(name) (CC_REG_LOW(name) + name ## _BIT_SIZE - 1)
  24#define CC_GENMASK(name)  GENMASK(CC_REG_HIGH(name), CC_REG_LOW(name))
  25
  26#define CC_REG_FLD_GET(reg_name, fld_name, reg_val)     \
  27        (FIELD_GET(CC_GENMASK(CC_ ## reg_name ## _ ## fld_name), reg_val))
  28
  29#define CC_HW_RESET_LOOP_COUNT 10
  30#define CC_TRNG_SUSPEND_TIMEOUT 3000
  31
  32/* data circular buffer in words must be:
  33 *  - of a power-of-2 size (limitation of circ_buf.h macros)
  34 *  - at least 6, the size generated in the EHR according to HW implementation
  35 */
  36#define CCTRNG_DATA_BUF_WORDS 32
  37
  38/* The timeout for the TRNG operation should be calculated with the formula:
  39 * Timeout = EHR_NUM * VN_COEFF * EHR_LENGTH * SAMPLE_CNT * SCALE_VALUE
  40 * while:
  41 *  - SAMPLE_CNT is input value from the characterisation process
  42 *  - all the rest are constants
  43 */
  44#define EHR_NUM 1
  45#define VN_COEFF 4
  46#define EHR_LENGTH CC_TRNG_EHR_IN_BITS
  47#define SCALE_VALUE 2
  48#define CCTRNG_TIMEOUT(smpl_cnt) \
  49        (EHR_NUM * VN_COEFF * EHR_LENGTH * smpl_cnt * SCALE_VALUE)
  50
  51struct cctrng_drvdata {
  52        struct platform_device *pdev;
  53        void __iomem *cc_base;
  54        struct clk *clk;
  55        struct hwrng rng;
  56        u32 active_rosc;
  57        /* Sampling interval for each ring oscillator:
  58         * count of ring oscillator cycles between consecutive bits sampling.
  59         * Value of 0 indicates non-valid rosc
  60         */
  61        u32 smpl_ratio[CC_TRNG_NUM_OF_ROSCS];
  62
  63        u32 data_buf[CCTRNG_DATA_BUF_WORDS];
  64        struct circ_buf circ;
  65        struct work_struct compwork;
  66        struct work_struct startwork;
  67
  68        /* pending_hw - 1 when HW is pending, 0 when it is idle */
  69        atomic_t pending_hw;
  70
  71        /* protects against multiple concurrent consumers of data_buf */
  72        spinlock_t read_lock;
  73};
  74
  75
  76/* functions for write/read CC registers */
  77static inline void cc_iowrite(struct cctrng_drvdata *drvdata, u32 reg, u32 val)
  78{
  79        iowrite32(val, (drvdata->cc_base + reg));
  80}
  81static inline u32 cc_ioread(struct cctrng_drvdata *drvdata, u32 reg)
  82{
  83        return ioread32(drvdata->cc_base + reg);
  84}
  85
  86
  87static int cc_trng_pm_get(struct device *dev)
  88{
  89        int rc = 0;
  90
  91        rc = pm_runtime_get_sync(dev);
  92
  93        /* pm_runtime_get_sync() can return 1 as a valid return code */
  94        return (rc == 1 ? 0 : rc);
  95}
  96
  97static void cc_trng_pm_put_suspend(struct device *dev)
  98{
  99        int rc = 0;
 100
 101        pm_runtime_mark_last_busy(dev);
 102        rc = pm_runtime_put_autosuspend(dev);
 103        if (rc)
 104                dev_err(dev, "pm_runtime_put_autosuspend returned %x\n", rc);
 105}
 106
 107static int cc_trng_pm_init(struct cctrng_drvdata *drvdata)
 108{
 109        struct device *dev = &(drvdata->pdev->dev);
 110
 111        /* must be before the enabling to avoid redundant suspending */
 112        pm_runtime_set_autosuspend_delay(dev, CC_TRNG_SUSPEND_TIMEOUT);
 113        pm_runtime_use_autosuspend(dev);
 114        /* set us as active - note we won't do PM ops until cc_trng_pm_go()! */
 115        return pm_runtime_set_active(dev);
 116}
 117
 118static void cc_trng_pm_go(struct cctrng_drvdata *drvdata)
 119{
 120        struct device *dev = &(drvdata->pdev->dev);
 121
 122        /* enable the PM module*/
 123        pm_runtime_enable(dev);
 124}
 125
 126static void cc_trng_pm_fini(struct cctrng_drvdata *drvdata)
 127{
 128        struct device *dev = &(drvdata->pdev->dev);
 129
 130        pm_runtime_disable(dev);
 131}
 132
 133
 134static inline int cc_trng_parse_sampling_ratio(struct cctrng_drvdata *drvdata)
 135{
 136        struct device *dev = &(drvdata->pdev->dev);
 137        struct device_node *np = drvdata->pdev->dev.of_node;
 138        int rc;
 139        int i;
 140        /* ret will be set to 0 if at least one rosc has (sampling ratio > 0) */
 141        int ret = -EINVAL;
 142
 143        rc = of_property_read_u32_array(np, "arm,rosc-ratio",
 144                                        drvdata->smpl_ratio,
 145                                        CC_TRNG_NUM_OF_ROSCS);
 146        if (rc) {
 147                /* arm,rosc-ratio was not found in device tree */
 148                return rc;
 149        }
 150
 151        /* verify that at least one rosc has (sampling ratio > 0) */
 152        for (i = 0; i < CC_TRNG_NUM_OF_ROSCS; ++i) {
 153                dev_dbg(dev, "rosc %d sampling ratio %u",
 154                        i, drvdata->smpl_ratio[i]);
 155
 156                if (drvdata->smpl_ratio[i] > 0)
 157                        ret = 0;
 158        }
 159
 160        return ret;
 161}
 162
 163static int cc_trng_change_rosc(struct cctrng_drvdata *drvdata)
 164{
 165        struct device *dev = &(drvdata->pdev->dev);
 166
 167        dev_dbg(dev, "cctrng change rosc (was %d)\n", drvdata->active_rosc);
 168        drvdata->active_rosc += 1;
 169
 170        while (drvdata->active_rosc < CC_TRNG_NUM_OF_ROSCS) {
 171                if (drvdata->smpl_ratio[drvdata->active_rosc] > 0)
 172                        return 0;
 173
 174                drvdata->active_rosc += 1;
 175        }
 176        return -EINVAL;
 177}
 178
 179
 180static void cc_trng_enable_rnd_source(struct cctrng_drvdata *drvdata)
 181{
 182        u32 max_cycles;
 183
 184        /* Set watchdog threshold to maximal allowed time (in CPU cycles) */
 185        max_cycles = CCTRNG_TIMEOUT(drvdata->smpl_ratio[drvdata->active_rosc]);
 186        cc_iowrite(drvdata, CC_RNG_WATCHDOG_VAL_REG_OFFSET, max_cycles);
 187
 188        /* enable the RND source */
 189        cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0x1);
 190
 191        /* unmask RNG interrupts */
 192        cc_iowrite(drvdata, CC_RNG_IMR_REG_OFFSET, (u32)~CC_RNG_INT_MASK);
 193}
 194
 195
 196/* increase circular data buffer index (head/tail) */
 197static inline void circ_idx_inc(int *idx, int bytes)
 198{
 199        *idx += (bytes + 3) >> 2;
 200        *idx &= (CCTRNG_DATA_BUF_WORDS - 1);
 201}
 202
 203static inline size_t circ_buf_space(struct cctrng_drvdata *drvdata)
 204{
 205        return CIRC_SPACE(drvdata->circ.head,
 206                          drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS);
 207
 208}
 209
 210static int cctrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
 211{
 212        /* current implementation ignores "wait" */
 213
 214        struct cctrng_drvdata *drvdata = (struct cctrng_drvdata *)rng->priv;
 215        struct device *dev = &(drvdata->pdev->dev);
 216        u32 *buf = (u32 *)drvdata->circ.buf;
 217        size_t copied = 0;
 218        size_t cnt_w;
 219        size_t size;
 220        size_t left;
 221
 222        if (!spin_trylock(&drvdata->read_lock)) {
 223                /* concurrent consumers from data_buf cannot be served */
 224                dev_dbg_ratelimited(dev, "unable to hold lock\n");
 225                return 0;
 226        }
 227
 228        /* copy till end of data buffer (without wrap back) */
 229        cnt_w = CIRC_CNT_TO_END(drvdata->circ.head,
 230                                drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS);
 231        size = min((cnt_w<<2), max);
 232        memcpy(data, &(buf[drvdata->circ.tail]), size);
 233        copied = size;
 234        circ_idx_inc(&drvdata->circ.tail, size);
 235        /* copy rest of data in data buffer */
 236        left = max - copied;
 237        if (left > 0) {
 238                cnt_w = CIRC_CNT(drvdata->circ.head,
 239                                 drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS);
 240                size = min((cnt_w<<2), left);
 241                memcpy(data, &(buf[drvdata->circ.tail]), size);
 242                copied += size;
 243                circ_idx_inc(&drvdata->circ.tail, size);
 244        }
 245
 246        spin_unlock(&drvdata->read_lock);
 247
 248        if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) {
 249                if (atomic_cmpxchg(&drvdata->pending_hw, 0, 1) == 0) {
 250                        /* re-check space in buffer to avoid potential race */
 251                        if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) {
 252                                /* increment device's usage counter */
 253                                int rc = cc_trng_pm_get(dev);
 254
 255                                if (rc) {
 256                                        dev_err(dev,
 257                                                "cc_trng_pm_get returned %x\n",
 258                                                rc);
 259                                        return rc;
 260                                }
 261
 262                                /* schedule execution of deferred work handler
 263                                 * for filling of data buffer
 264                                 */
 265                                schedule_work(&drvdata->startwork);
 266                        } else {
 267                                atomic_set(&drvdata->pending_hw, 0);
 268                        }
 269                }
 270        }
 271
 272        return copied;
 273}
 274
 275static void cc_trng_hw_trigger(struct cctrng_drvdata *drvdata)
 276{
 277        u32 tmp_smpl_cnt = 0;
 278        struct device *dev = &(drvdata->pdev->dev);
 279
 280        dev_dbg(dev, "cctrng hw trigger.\n");
 281
 282        /* enable the HW RND clock */
 283        cc_iowrite(drvdata, CC_RNG_CLK_ENABLE_REG_OFFSET, 0x1);
 284
 285        /* do software reset */
 286        cc_iowrite(drvdata, CC_RNG_SW_RESET_REG_OFFSET, 0x1);
 287        /* in order to verify that the reset has completed,
 288         * the sample count need to be verified
 289         */
 290        do {
 291                /* enable the HW RND clock   */
 292                cc_iowrite(drvdata, CC_RNG_CLK_ENABLE_REG_OFFSET, 0x1);
 293
 294                /* set sampling ratio (rng_clocks) between consecutive bits */
 295                cc_iowrite(drvdata, CC_SAMPLE_CNT1_REG_OFFSET,
 296                           drvdata->smpl_ratio[drvdata->active_rosc]);
 297
 298                /* read the sampling ratio  */
 299                tmp_smpl_cnt = cc_ioread(drvdata, CC_SAMPLE_CNT1_REG_OFFSET);
 300
 301        } while (tmp_smpl_cnt != drvdata->smpl_ratio[drvdata->active_rosc]);
 302
 303        /* disable the RND source for setting new parameters in HW */
 304        cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0);
 305
 306        cc_iowrite(drvdata, CC_RNG_ICR_REG_OFFSET, 0xFFFFFFFF);
 307
 308        cc_iowrite(drvdata, CC_TRNG_CONFIG_REG_OFFSET, drvdata->active_rosc);
 309
 310        /* Debug Control register: set to 0 - no bypasses */
 311        cc_iowrite(drvdata, CC_TRNG_DEBUG_CONTROL_REG_OFFSET, 0);
 312
 313        cc_trng_enable_rnd_source(drvdata);
 314}
 315
 316static void cc_trng_compwork_handler(struct work_struct *w)
 317{
 318        u32 isr = 0;
 319        u32 ehr_valid = 0;
 320        struct cctrng_drvdata *drvdata =
 321                        container_of(w, struct cctrng_drvdata, compwork);
 322        struct device *dev = &(drvdata->pdev->dev);
 323        int i;
 324
 325        /* stop DMA and the RNG source */
 326        cc_iowrite(drvdata, CC_RNG_DMA_ENABLE_REG_OFFSET, 0);
 327        cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0);
 328
 329        /* read RNG_ISR and check for errors */
 330        isr = cc_ioread(drvdata, CC_RNG_ISR_REG_OFFSET);
 331        ehr_valid = CC_REG_FLD_GET(RNG_ISR, EHR_VALID, isr);
 332        dev_dbg(dev, "Got RNG_ISR=0x%08X (EHR_VALID=%u)\n", isr, ehr_valid);
 333
 334        if (fips_enabled && CC_REG_FLD_GET(RNG_ISR, CRNGT_ERR, isr)) {
 335                fips_fail_notify();
 336                /* FIPS error is fatal */
 337                panic("Got HW CRNGT error while fips is enabled!\n");
 338        }
 339
 340        /* Clear all pending RNG interrupts */
 341        cc_iowrite(drvdata, CC_RNG_ICR_REG_OFFSET, isr);
 342
 343
 344        if (!ehr_valid) {
 345                /* in case of AUTOCORR/TIMEOUT error, try the next ROSC */
 346                if (CC_REG_FLD_GET(RNG_ISR, AUTOCORR_ERR, isr) ||
 347                                CC_REG_FLD_GET(RNG_ISR, WATCHDOG, isr)) {
 348                        dev_dbg(dev, "cctrng autocorr/timeout error.\n");
 349                        goto next_rosc;
 350                }
 351
 352                /* in case of VN error, ignore it */
 353        }
 354
 355        /* read EHR data from registers */
 356        for (i = 0; i < CC_TRNG_EHR_IN_WORDS; i++) {
 357                /* calc word ptr in data_buf */
 358                u32 *buf = (u32 *)drvdata->circ.buf;
 359
 360                buf[drvdata->circ.head] = cc_ioread(drvdata,
 361                                CC_EHR_DATA_0_REG_OFFSET + (i*sizeof(u32)));
 362
 363                /* EHR_DATA registers are cleared on read. In case 0 value was
 364                 * returned, restart the entropy collection.
 365                 */
 366                if (buf[drvdata->circ.head] == 0) {
 367                        dev_dbg(dev, "Got 0 value in EHR. active_rosc %u\n",
 368                                drvdata->active_rosc);
 369                        goto next_rosc;
 370                }
 371
 372                circ_idx_inc(&drvdata->circ.head, 1<<2);
 373        }
 374
 375        atomic_set(&drvdata->pending_hw, 0);
 376
 377        /* continue to fill data buffer if needed */
 378        if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) {
 379                if (atomic_cmpxchg(&drvdata->pending_hw, 0, 1) == 0) {
 380                        /* Re-enable rnd source */
 381                        cc_trng_enable_rnd_source(drvdata);
 382                        return;
 383                }
 384        }
 385
 386        cc_trng_pm_put_suspend(dev);
 387
 388        dev_dbg(dev, "compwork handler done\n");
 389        return;
 390
 391next_rosc:
 392        if ((circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) &&
 393                        (cc_trng_change_rosc(drvdata) == 0)) {
 394                /* trigger trng hw with next rosc */
 395                cc_trng_hw_trigger(drvdata);
 396        } else {
 397                atomic_set(&drvdata->pending_hw, 0);
 398                cc_trng_pm_put_suspend(dev);
 399        }
 400}
 401
 402static irqreturn_t cc_isr(int irq, void *dev_id)
 403{
 404        struct cctrng_drvdata *drvdata = (struct cctrng_drvdata *)dev_id;
 405        struct device *dev = &(drvdata->pdev->dev);
 406        u32 irr;
 407
 408        /* if driver suspended return, probably shared interrupt */
 409        if (pm_runtime_suspended(dev))
 410                return IRQ_NONE;
 411
 412        /* read the interrupt status */
 413        irr = cc_ioread(drvdata, CC_HOST_RGF_IRR_REG_OFFSET);
 414        dev_dbg(dev, "Got IRR=0x%08X\n", irr);
 415
 416        if (irr == 0) /* Probably shared interrupt line */
 417                return IRQ_NONE;
 418
 419        /* clear interrupt - must be before processing events */
 420        cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, irr);
 421
 422        /* RNG interrupt - most probable */
 423        if (irr & CC_HOST_RNG_IRQ_MASK) {
 424                /* Mask RNG interrupts - will be unmasked in deferred work */
 425                cc_iowrite(drvdata, CC_RNG_IMR_REG_OFFSET, 0xFFFFFFFF);
 426
 427                /* We clear RNG interrupt here,
 428                 * to avoid it from firing as we'll unmask RNG interrupts.
 429                 */
 430                cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET,
 431                           CC_HOST_RNG_IRQ_MASK);
 432
 433                irr &= ~CC_HOST_RNG_IRQ_MASK;
 434
 435                /* schedule execution of deferred work handler */
 436                schedule_work(&drvdata->compwork);
 437        }
 438
 439        if (irr) {
 440                dev_dbg_ratelimited(dev,
 441                                "IRR includes unknown cause bits (0x%08X)\n",
 442                                irr);
 443                /* Just warning */
 444        }
 445
 446        return IRQ_HANDLED;
 447}
 448
 449static void cc_trng_startwork_handler(struct work_struct *w)
 450{
 451        struct cctrng_drvdata *drvdata =
 452                        container_of(w, struct cctrng_drvdata, startwork);
 453
 454        drvdata->active_rosc = 0;
 455        cc_trng_hw_trigger(drvdata);
 456}
 457
 458
 459static int cc_trng_clk_init(struct cctrng_drvdata *drvdata)
 460{
 461        struct clk *clk;
 462        struct device *dev = &(drvdata->pdev->dev);
 463        int rc = 0;
 464
 465        clk = devm_clk_get_optional(dev, NULL);
 466        if (IS_ERR(clk))
 467                return dev_err_probe(dev, PTR_ERR(clk),
 468                                     "Error getting clock\n");
 469
 470        drvdata->clk = clk;
 471
 472        rc = clk_prepare_enable(drvdata->clk);
 473        if (rc) {
 474                dev_err(dev, "Failed to enable clock\n");
 475                return rc;
 476        }
 477
 478        return 0;
 479}
 480
 481static void cc_trng_clk_fini(struct cctrng_drvdata *drvdata)
 482{
 483        clk_disable_unprepare(drvdata->clk);
 484}
 485
 486
 487static int cctrng_probe(struct platform_device *pdev)
 488{
 489        struct cctrng_drvdata *drvdata;
 490        struct device *dev = &pdev->dev;
 491        int rc = 0;
 492        u32 val;
 493        int irq;
 494
 495        drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
 496        if (!drvdata)
 497                return -ENOMEM;
 498
 499        drvdata->rng.name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
 500        if (!drvdata->rng.name)
 501                return -ENOMEM;
 502
 503        drvdata->rng.read = cctrng_read;
 504        drvdata->rng.priv = (unsigned long)drvdata;
 505        drvdata->rng.quality = CC_TRNG_QUALITY;
 506
 507        platform_set_drvdata(pdev, drvdata);
 508        drvdata->pdev = pdev;
 509
 510        drvdata->circ.buf = (char *)drvdata->data_buf;
 511
 512        drvdata->cc_base = devm_platform_ioremap_resource(pdev, 0);
 513        if (IS_ERR(drvdata->cc_base)) {
 514                dev_err(dev, "Failed to ioremap registers");
 515                return PTR_ERR(drvdata->cc_base);
 516        }
 517
 518        /* Then IRQ */
 519        irq = platform_get_irq(pdev, 0);
 520        if (irq < 0)
 521                return irq;
 522
 523        /* parse sampling rate from device tree */
 524        rc = cc_trng_parse_sampling_ratio(drvdata);
 525        if (rc) {
 526                dev_err(dev, "Failed to get legal sampling ratio for rosc\n");
 527                return rc;
 528        }
 529
 530        rc = cc_trng_clk_init(drvdata);
 531        if (rc) {
 532                dev_err(dev, "cc_trng_clk_init failed\n");
 533                return rc;
 534        }
 535
 536        INIT_WORK(&drvdata->compwork, cc_trng_compwork_handler);
 537        INIT_WORK(&drvdata->startwork, cc_trng_startwork_handler);
 538        spin_lock_init(&drvdata->read_lock);
 539
 540        /* register the driver isr function */
 541        rc = devm_request_irq(dev, irq, cc_isr, IRQF_SHARED, "cctrng", drvdata);
 542        if (rc) {
 543                dev_err(dev, "Could not register to interrupt %d\n", irq);
 544                goto post_clk_err;
 545        }
 546        dev_dbg(dev, "Registered to IRQ: %d\n", irq);
 547
 548        /* Clear all pending interrupts */
 549        val = cc_ioread(drvdata, CC_HOST_RGF_IRR_REG_OFFSET);
 550        dev_dbg(dev, "IRR=0x%08X\n", val);
 551        cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, val);
 552
 553        /* unmask HOST RNG interrupt */
 554        cc_iowrite(drvdata, CC_HOST_RGF_IMR_REG_OFFSET,
 555                   cc_ioread(drvdata, CC_HOST_RGF_IMR_REG_OFFSET) &
 556                   ~CC_HOST_RNG_IRQ_MASK);
 557
 558        /* init PM */
 559        rc = cc_trng_pm_init(drvdata);
 560        if (rc) {
 561                dev_err(dev, "cc_trng_pm_init failed\n");
 562                goto post_clk_err;
 563        }
 564
 565        /* increment device's usage counter */
 566        rc = cc_trng_pm_get(dev);
 567        if (rc) {
 568                dev_err(dev, "cc_trng_pm_get returned %x\n", rc);
 569                goto post_pm_err;
 570        }
 571
 572        /* set pending_hw to verify that HW won't be triggered from read */
 573        atomic_set(&drvdata->pending_hw, 1);
 574
 575        /* registration of the hwrng device */
 576        rc = devm_hwrng_register(dev, &drvdata->rng);
 577        if (rc) {
 578                dev_err(dev, "Could not register hwrng device.\n");
 579                goto post_pm_err;
 580        }
 581
 582        /* trigger HW to start generate data */
 583        drvdata->active_rosc = 0;
 584        cc_trng_hw_trigger(drvdata);
 585
 586        /* All set, we can allow auto-suspend */
 587        cc_trng_pm_go(drvdata);
 588
 589        dev_info(dev, "ARM cctrng device initialized\n");
 590
 591        return 0;
 592
 593post_pm_err:
 594        cc_trng_pm_fini(drvdata);
 595
 596post_clk_err:
 597        cc_trng_clk_fini(drvdata);
 598
 599        return rc;
 600}
 601
 602static int cctrng_remove(struct platform_device *pdev)
 603{
 604        struct cctrng_drvdata *drvdata = platform_get_drvdata(pdev);
 605        struct device *dev = &pdev->dev;
 606
 607        dev_dbg(dev, "Releasing cctrng resources...\n");
 608
 609        cc_trng_pm_fini(drvdata);
 610
 611        cc_trng_clk_fini(drvdata);
 612
 613        dev_info(dev, "ARM cctrng device terminated\n");
 614
 615        return 0;
 616}
 617
 618static int __maybe_unused cctrng_suspend(struct device *dev)
 619{
 620        struct cctrng_drvdata *drvdata = dev_get_drvdata(dev);
 621
 622        dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
 623        cc_iowrite(drvdata, CC_HOST_POWER_DOWN_EN_REG_OFFSET,
 624                        POWER_DOWN_ENABLE);
 625
 626        clk_disable_unprepare(drvdata->clk);
 627
 628        return 0;
 629}
 630
 631static bool cctrng_wait_for_reset_completion(struct cctrng_drvdata *drvdata)
 632{
 633        unsigned int val;
 634        unsigned int i;
 635
 636        for (i = 0; i < CC_HW_RESET_LOOP_COUNT; i++) {
 637                /* in cc7x3 NVM_IS_IDLE indicates that CC reset is
 638                 *  completed and device is fully functional
 639                 */
 640                val = cc_ioread(drvdata, CC_NVM_IS_IDLE_REG_OFFSET);
 641                if (val & BIT(CC_NVM_IS_IDLE_VALUE_BIT_SHIFT)) {
 642                        /* hw indicate reset completed */
 643                        return true;
 644                }
 645                /* allow scheduling other process on the processor */
 646                schedule();
 647        }
 648        /* reset not completed */
 649        return false;
 650}
 651
 652static int __maybe_unused cctrng_resume(struct device *dev)
 653{
 654        struct cctrng_drvdata *drvdata = dev_get_drvdata(dev);
 655        int rc;
 656
 657        dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
 658        /* Enables the device source clk */
 659        rc = clk_prepare_enable(drvdata->clk);
 660        if (rc) {
 661                dev_err(dev, "failed getting clock back on. We're toast.\n");
 662                return rc;
 663        }
 664
 665        /* wait for Cryptocell reset completion */
 666        if (!cctrng_wait_for_reset_completion(drvdata)) {
 667                dev_err(dev, "Cryptocell reset not completed");
 668                return -EBUSY;
 669        }
 670
 671        /* unmask HOST RNG interrupt */
 672        cc_iowrite(drvdata, CC_HOST_RGF_IMR_REG_OFFSET,
 673                   cc_ioread(drvdata, CC_HOST_RGF_IMR_REG_OFFSET) &
 674                   ~CC_HOST_RNG_IRQ_MASK);
 675
 676        cc_iowrite(drvdata, CC_HOST_POWER_DOWN_EN_REG_OFFSET,
 677                   POWER_DOWN_DISABLE);
 678
 679        return 0;
 680}
 681
 682static UNIVERSAL_DEV_PM_OPS(cctrng_pm, cctrng_suspend, cctrng_resume, NULL);
 683
 684static const struct of_device_id arm_cctrng_dt_match[] = {
 685        { .compatible = "arm,cryptocell-713-trng", },
 686        { .compatible = "arm,cryptocell-703-trng", },
 687        {},
 688};
 689MODULE_DEVICE_TABLE(of, arm_cctrng_dt_match);
 690
 691static struct platform_driver cctrng_driver = {
 692        .driver = {
 693                .name = "cctrng",
 694                .of_match_table = arm_cctrng_dt_match,
 695                .pm = &cctrng_pm,
 696        },
 697        .probe = cctrng_probe,
 698        .remove = cctrng_remove,
 699};
 700
 701static int __init cctrng_mod_init(void)
 702{
 703        /* Compile time assertion checks */
 704        BUILD_BUG_ON(CCTRNG_DATA_BUF_WORDS < 6);
 705        BUILD_BUG_ON((CCTRNG_DATA_BUF_WORDS & (CCTRNG_DATA_BUF_WORDS-1)) != 0);
 706
 707        return platform_driver_register(&cctrng_driver);
 708}
 709module_init(cctrng_mod_init);
 710
 711static void __exit cctrng_mod_exit(void)
 712{
 713        platform_driver_unregister(&cctrng_driver);
 714}
 715module_exit(cctrng_mod_exit);
 716
 717/* Module description */
 718MODULE_DESCRIPTION("ARM CryptoCell TRNG Driver");
 719MODULE_AUTHOR("ARM");
 720MODULE_LICENSE("GPL v2");
 721