uboot/drivers/clk/clk-uclass.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Copyright (C) 2015 Google, Inc
   4 * Written by Simon Glass <sjg@chromium.org>
   5 * Copyright (c) 2016, NVIDIA CORPORATION.
   6 * Copyright (c) 2018, Theobroma Systems Design und Consulting GmbH
   7 */
   8
   9#define LOG_CATEGORY UCLASS_CLK
  10
  11#include <common.h>
  12#include <clk.h>
  13#include <clk-uclass.h>
  14#include <dm.h>
  15#include <dt-structs.h>
  16#include <errno.h>
  17#include <log.h>
  18#include <malloc.h>
  19#include <asm/global_data.h>
  20#include <dm/device_compat.h>
  21#include <dm/device-internal.h>
  22#include <dm/devres.h>
  23#include <dm/read.h>
  24#include <linux/bug.h>
  25#include <linux/clk-provider.h>
  26#include <linux/err.h>
  27
  28static inline const struct clk_ops *clk_dev_ops(struct udevice *dev)
  29{
  30        return (const struct clk_ops *)dev->driver->ops;
  31}
  32
  33struct clk *dev_get_clk_ptr(struct udevice *dev)
  34{
  35        return (struct clk *)dev_get_uclass_priv(dev);
  36}
  37
  38#if CONFIG_IS_ENABLED(OF_PLATDATA)
  39int clk_get_by_phandle(struct udevice *dev, const struct phandle_1_arg *cells,
  40                       struct clk *clk)
  41{
  42        int ret;
  43
  44        ret = device_get_by_ofplat_idx(cells->idx, &clk->dev);
  45        if (ret)
  46                return ret;
  47        clk->id = cells->arg[0];
  48
  49        return 0;
  50}
  51#endif
  52
  53#if CONFIG_IS_ENABLED(OF_REAL)
  54static int clk_of_xlate_default(struct clk *clk,
  55                                struct ofnode_phandle_args *args)
  56{
  57        debug("%s(clk=%p)\n", __func__, clk);
  58
  59        if (args->args_count > 1) {
  60                debug("Invaild args_count: %d\n", args->args_count);
  61                return -EINVAL;
  62        }
  63
  64        if (args->args_count)
  65                clk->id = args->args[0];
  66        else
  67                clk->id = 0;
  68
  69        clk->data = 0;
  70
  71        return 0;
  72}
  73
  74static int clk_get_by_index_tail(int ret, ofnode node,
  75                                 struct ofnode_phandle_args *args,
  76                                 const char *list_name, int index,
  77                                 struct clk *clk)
  78{
  79        struct udevice *dev_clk;
  80        const struct clk_ops *ops;
  81
  82        assert(clk);
  83        clk->dev = NULL;
  84        if (ret)
  85                goto err;
  86
  87        ret = uclass_get_device_by_ofnode(UCLASS_CLK, args->node, &dev_clk);
  88        if (ret) {
  89                debug("%s: uclass_get_device_by_of_offset failed: err=%d\n",
  90                      __func__, ret);
  91                return log_msg_ret("get", ret);
  92        }
  93
  94        clk->dev = dev_clk;
  95
  96        ops = clk_dev_ops(dev_clk);
  97
  98        if (ops->of_xlate)
  99                ret = ops->of_xlate(clk, args);
 100        else
 101                ret = clk_of_xlate_default(clk, args);
 102        if (ret) {
 103                debug("of_xlate() failed: %d\n", ret);
 104                return log_msg_ret("xlate", ret);
 105        }
 106
 107        return clk_request(dev_clk, clk);
 108err:
 109        debug("%s: Node '%s', property '%s', failed to request CLK index %d: %d\n",
 110              __func__, ofnode_get_name(node), list_name, index, ret);
 111
 112        return log_msg_ret("prop", ret);
 113}
 114
 115static int clk_get_by_indexed_prop(struct udevice *dev, const char *prop_name,
 116                                   int index, struct clk *clk)
 117{
 118        int ret;
 119        struct ofnode_phandle_args args;
 120
 121        debug("%s(dev=%p, index=%d, clk=%p)\n", __func__, dev, index, clk);
 122
 123        assert(clk);
 124        clk->dev = NULL;
 125
 126        ret = dev_read_phandle_with_args(dev, prop_name, "#clock-cells", 0,
 127                                         index, &args);
 128        if (ret) {
 129                debug("%s: fdtdec_parse_phandle_with_args failed: err=%d\n",
 130                      __func__, ret);
 131                return log_ret(ret);
 132        }
 133
 134
 135        return clk_get_by_index_tail(ret, dev_ofnode(dev), &args, "clocks",
 136                                     index, clk);
 137}
 138
 139int clk_get_by_index(struct udevice *dev, int index, struct clk *clk)
 140{
 141        struct ofnode_phandle_args args;
 142        int ret;
 143
 144        ret = dev_read_phandle_with_args(dev, "clocks", "#clock-cells", 0,
 145                                         index, &args);
 146
 147        return clk_get_by_index_tail(ret, dev_ofnode(dev), &args, "clocks",
 148                                     index, clk);
 149}
 150
 151int clk_get_by_index_nodev(ofnode node, int index, struct clk *clk)
 152{
 153        struct ofnode_phandle_args args;
 154        int ret;
 155
 156        ret = ofnode_parse_phandle_with_args(node, "clocks", "#clock-cells", 0,
 157                                             index, &args);
 158
 159        return clk_get_by_index_tail(ret, node, &args, "clocks",
 160                                     index, clk);
 161}
 162
 163int clk_get_bulk(struct udevice *dev, struct clk_bulk *bulk)
 164{
 165        int i, ret, err, count;
 166
 167        bulk->count = 0;
 168
 169        count = dev_count_phandle_with_args(dev, "clocks", "#clock-cells", 0);
 170        if (count < 1)
 171                return count;
 172
 173        bulk->clks = devm_kcalloc(dev, count, sizeof(struct clk), GFP_KERNEL);
 174        if (!bulk->clks)
 175                return -ENOMEM;
 176
 177        for (i = 0; i < count; i++) {
 178                ret = clk_get_by_index(dev, i, &bulk->clks[i]);
 179                if (ret < 0)
 180                        goto bulk_get_err;
 181
 182                ++bulk->count;
 183        }
 184
 185        return 0;
 186
 187bulk_get_err:
 188        err = clk_release_all(bulk->clks, bulk->count);
 189        if (err)
 190                debug("%s: could release all clocks for %p\n",
 191                      __func__, dev);
 192
 193        return ret;
 194}
 195
 196static struct clk *clk_set_default_get_by_id(struct clk *clk)
 197{
 198        struct clk *c = clk;
 199
 200        if (CONFIG_IS_ENABLED(CLK_CCF)) {
 201                int ret = clk_get_by_id(clk->id, &c);
 202
 203                if (ret) {
 204                        debug("%s(): could not get parent clock pointer, id %lu\n",
 205                              __func__, clk->id);
 206                        ERR_PTR(ret);
 207                }
 208        }
 209
 210        return c;
 211}
 212
 213static int clk_set_default_parents(struct udevice *dev,
 214                                   enum clk_defaults_stage stage)
 215{
 216        struct clk clk, parent_clk, *c, *p;
 217        int index;
 218        int num_parents;
 219        int ret;
 220
 221        num_parents = dev_count_phandle_with_args(dev, "assigned-clock-parents",
 222                                                  "#clock-cells", 0);
 223        if (num_parents < 0) {
 224                debug("%s: could not read assigned-clock-parents for %p\n",
 225                      __func__, dev);
 226                return 0;
 227        }
 228
 229        for (index = 0; index < num_parents; index++) {
 230                ret = clk_get_by_indexed_prop(dev, "assigned-clock-parents",
 231                                              index, &parent_clk);
 232                /* If -ENOENT, this is a no-op entry */
 233                if (ret == -ENOENT)
 234                        continue;
 235
 236                if (ret) {
 237                        debug("%s: could not get parent clock %d for %s\n",
 238                              __func__, index, dev_read_name(dev));
 239                        return ret;
 240                }
 241
 242                p = clk_set_default_get_by_id(&parent_clk);
 243                if (IS_ERR(p))
 244                        return PTR_ERR(p);
 245
 246                ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
 247                                              index, &clk);
 248                /*
 249                 * If the clock provider is not ready yet, let it handle
 250                 * the re-programming later.
 251                 */
 252                if (ret == -EPROBE_DEFER) {
 253                        ret = 0;
 254                        continue;
 255                }
 256
 257                if (ret) {
 258                        debug("%s: could not get assigned clock %d for %s\n",
 259                              __func__, index, dev_read_name(dev));
 260                        return ret;
 261                }
 262
 263                /* This is clk provider device trying to reparent itself
 264                 * It cannot be done right now but need to wait after the
 265                 * device is probed
 266                 */
 267                if (stage == CLK_DEFAULTS_PRE && clk.dev == dev)
 268                        continue;
 269
 270                if (stage != CLK_DEFAULTS_PRE && clk.dev != dev)
 271                        /* do not setup twice the parent clocks */
 272                        continue;
 273
 274                c = clk_set_default_get_by_id(&clk);
 275                if (IS_ERR(c))
 276                        return PTR_ERR(c);
 277
 278                ret = clk_set_parent(c, p);
 279                /*
 280                 * Not all drivers may support clock-reparenting (as of now).
 281                 * Ignore errors due to this.
 282                 */
 283                if (ret == -ENOSYS)
 284                        continue;
 285
 286                if (ret < 0) {
 287                        debug("%s: failed to reparent clock %d for %s\n",
 288                              __func__, index, dev_read_name(dev));
 289                        return ret;
 290                }
 291        }
 292
 293        return 0;
 294}
 295
 296static int clk_set_default_rates(struct udevice *dev,
 297                                 enum clk_defaults_stage stage)
 298{
 299        struct clk clk, *c;
 300        int index;
 301        int num_rates;
 302        int size;
 303        int ret = 0;
 304        u32 *rates = NULL;
 305
 306        size = dev_read_size(dev, "assigned-clock-rates");
 307        if (size < 0)
 308                return 0;
 309
 310        num_rates = size / sizeof(u32);
 311        rates = calloc(num_rates, sizeof(u32));
 312        if (!rates)
 313                return -ENOMEM;
 314
 315        ret = dev_read_u32_array(dev, "assigned-clock-rates", rates, num_rates);
 316        if (ret)
 317                goto fail;
 318
 319        for (index = 0; index < num_rates; index++) {
 320                /* If 0 is passed, this is a no-op */
 321                if (!rates[index])
 322                        continue;
 323
 324                ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
 325                                              index, &clk);
 326                /*
 327                 * If the clock provider is not ready yet, let it handle
 328                 * the re-programming later.
 329                 */
 330                if (ret == -EPROBE_DEFER) {
 331                        ret = 0;
 332                        continue;
 333                }
 334
 335                if (ret) {
 336                        dev_dbg(dev,
 337                                "could not get assigned clock %d (err = %d)\n",
 338                                index, ret);
 339                        continue;
 340                }
 341
 342                /* This is clk provider device trying to program itself
 343                 * It cannot be done right now but need to wait after the
 344                 * device is probed
 345                 */
 346                if (stage == CLK_DEFAULTS_PRE && clk.dev == dev)
 347                        continue;
 348
 349                if (stage != CLK_DEFAULTS_PRE && clk.dev != dev)
 350                        /* do not setup twice the parent clocks */
 351                        continue;
 352
 353                c = clk_set_default_get_by_id(&clk);
 354                if (IS_ERR(c))
 355                        return PTR_ERR(c);
 356
 357                ret = clk_set_rate(c, rates[index]);
 358
 359                if (ret < 0) {
 360                        dev_warn(dev,
 361                                 "failed to set rate on clock index %d (%ld) (error = %d)\n",
 362                                 index, clk.id, ret);
 363                        break;
 364                }
 365        }
 366
 367fail:
 368        free(rates);
 369        return ret;
 370}
 371
 372int clk_set_defaults(struct udevice *dev, enum clk_defaults_stage stage)
 373{
 374        int ret;
 375
 376        if (!dev_has_ofnode(dev))
 377                return 0;
 378
 379        /*
 380         * To avoid setting defaults twice, don't set them before relocation.
 381         * However, still set them for SPL. And still set them if explicitly
 382         * asked.
 383         */
 384        if (!(IS_ENABLED(CONFIG_SPL_BUILD) || (gd->flags & GD_FLG_RELOC)))
 385                if (stage != CLK_DEFAULTS_POST_FORCE)
 386                        return 0;
 387
 388        debug("%s(%s)\n", __func__, dev_read_name(dev));
 389
 390        ret = clk_set_default_parents(dev, stage);
 391        if (ret)
 392                return ret;
 393
 394        ret = clk_set_default_rates(dev, stage);
 395        if (ret < 0)
 396                return ret;
 397
 398        return 0;
 399}
 400
 401int clk_get_by_name(struct udevice *dev, const char *name, struct clk *clk)
 402{
 403        int index;
 404
 405        debug("%s(dev=%p, name=%s, clk=%p)\n", __func__, dev, name, clk);
 406        clk->dev = NULL;
 407
 408        index = dev_read_stringlist_search(dev, "clock-names", name);
 409        if (index < 0) {
 410                debug("fdt_stringlist_search() failed: %d\n", index);
 411                return index;
 412        }
 413
 414        return clk_get_by_index(dev, index, clk);
 415}
 416#endif /* OF_REAL */
 417
 418int clk_get_by_name_nodev(ofnode node, const char *name, struct clk *clk)
 419{
 420        int index;
 421
 422        debug("%s(node=%p, name=%s, clk=%p)\n", __func__,
 423                ofnode_get_name(node), name, clk);
 424        clk->dev = NULL;
 425
 426        index = ofnode_stringlist_search(node, "clock-names", name);
 427        if (index < 0) {
 428                debug("fdt_stringlist_search() failed: %d\n", index);
 429                return index;
 430        }
 431
 432        return clk_get_by_index_nodev(node, index, clk);
 433}
 434
 435int clk_get_optional_nodev(ofnode node, const char *name, struct clk *clk)
 436{
 437        int ret;
 438
 439        ret = clk_get_by_name_nodev(node, name, clk);
 440        if (ret == -ENODATA)
 441                return 0;
 442
 443        return ret;
 444}
 445
 446int clk_release_all(struct clk *clk, int count)
 447{
 448        int i, ret;
 449
 450        for (i = 0; i < count; i++) {
 451                debug("%s(clk[%d]=%p)\n", __func__, i, &clk[i]);
 452
 453                /* check if clock has been previously requested */
 454                if (!clk[i].dev)
 455                        continue;
 456
 457                ret = clk_disable(&clk[i]);
 458                if (ret && ret != -ENOSYS)
 459                        return ret;
 460
 461                ret = clk_free(&clk[i]);
 462                if (ret && ret != -ENOSYS)
 463                        return ret;
 464        }
 465
 466        return 0;
 467}
 468
 469int clk_request(struct udevice *dev, struct clk *clk)
 470{
 471        const struct clk_ops *ops;
 472
 473        debug("%s(dev=%p, clk=%p)\n", __func__, dev, clk);
 474        if (!clk)
 475                return 0;
 476        ops = clk_dev_ops(dev);
 477
 478        clk->dev = dev;
 479
 480        if (!ops->request)
 481                return 0;
 482
 483        return ops->request(clk);
 484}
 485
 486int clk_free(struct clk *clk)
 487{
 488        const struct clk_ops *ops;
 489
 490        debug("%s(clk=%p)\n", __func__, clk);
 491        if (!clk_valid(clk))
 492                return 0;
 493        ops = clk_dev_ops(clk->dev);
 494
 495        if (!ops->rfree)
 496                return 0;
 497
 498        return ops->rfree(clk);
 499}
 500
 501ulong clk_get_rate(struct clk *clk)
 502{
 503        const struct clk_ops *ops;
 504        int ret;
 505
 506        debug("%s(clk=%p)\n", __func__, clk);
 507        if (!clk_valid(clk))
 508                return 0;
 509        ops = clk_dev_ops(clk->dev);
 510
 511        if (!ops->get_rate)
 512                return -ENOSYS;
 513
 514        ret = ops->get_rate(clk);
 515        if (ret)
 516                return log_ret(ret);
 517
 518        return 0;
 519}
 520
 521struct clk *clk_get_parent(struct clk *clk)
 522{
 523        struct udevice *pdev;
 524        struct clk *pclk;
 525
 526        debug("%s(clk=%p)\n", __func__, clk);
 527        if (!clk_valid(clk))
 528                return NULL;
 529
 530        pdev = dev_get_parent(clk->dev);
 531        if (!pdev)
 532                return ERR_PTR(-ENODEV);
 533        pclk = dev_get_clk_ptr(pdev);
 534        if (!pclk)
 535                return ERR_PTR(-ENODEV);
 536
 537        return pclk;
 538}
 539
 540long long clk_get_parent_rate(struct clk *clk)
 541{
 542        const struct clk_ops *ops;
 543        struct clk *pclk;
 544
 545        debug("%s(clk=%p)\n", __func__, clk);
 546        if (!clk_valid(clk))
 547                return 0;
 548
 549        pclk = clk_get_parent(clk);
 550        if (IS_ERR(pclk))
 551                return -ENODEV;
 552
 553        ops = clk_dev_ops(pclk->dev);
 554        if (!ops->get_rate)
 555                return -ENOSYS;
 556
 557        /* Read the 'rate' if not already set or if proper flag set*/
 558        if (!pclk->rate || pclk->flags & CLK_GET_RATE_NOCACHE)
 559                pclk->rate = clk_get_rate(pclk);
 560
 561        return pclk->rate;
 562}
 563
 564ulong clk_round_rate(struct clk *clk, ulong rate)
 565{
 566        const struct clk_ops *ops;
 567
 568        debug("%s(clk=%p, rate=%lu)\n", __func__, clk, rate);
 569        if (!clk_valid(clk))
 570                return 0;
 571
 572        ops = clk_dev_ops(clk->dev);
 573        if (!ops->round_rate)
 574                return -ENOSYS;
 575
 576        return ops->round_rate(clk, rate);
 577}
 578
 579static void clk_clean_rate_cache(struct clk *clk)
 580{
 581        struct udevice *child_dev;
 582        struct clk *clkp;
 583
 584        if (!clk)
 585                return;
 586
 587        clk->rate = 0;
 588
 589        list_for_each_entry(child_dev, &clk->dev->child_head, sibling_node) {
 590                clkp = dev_get_clk_ptr(child_dev);
 591                clk_clean_rate_cache(clkp);
 592        }
 593}
 594
 595ulong clk_set_rate(struct clk *clk, ulong rate)
 596{
 597        const struct clk_ops *ops;
 598
 599        debug("%s(clk=%p, rate=%lu)\n", __func__, clk, rate);
 600        if (!clk_valid(clk))
 601                return 0;
 602        ops = clk_dev_ops(clk->dev);
 603
 604        if (!ops->set_rate)
 605                return -ENOSYS;
 606
 607        /* Clean up cached rates for us and all child clocks */
 608        clk_clean_rate_cache(clk);
 609
 610        return ops->set_rate(clk, rate);
 611}
 612
 613int clk_set_parent(struct clk *clk, struct clk *parent)
 614{
 615        const struct clk_ops *ops;
 616        int ret;
 617
 618        debug("%s(clk=%p, parent=%p)\n", __func__, clk, parent);
 619        if (!clk_valid(clk))
 620                return 0;
 621        ops = clk_dev_ops(clk->dev);
 622
 623        if (!ops->set_parent)
 624                return -ENOSYS;
 625
 626        ret = ops->set_parent(clk, parent);
 627        if (ret)
 628                return ret;
 629
 630        if (CONFIG_IS_ENABLED(CLK_CCF))
 631                ret = device_reparent(clk->dev, parent->dev);
 632
 633        return ret;
 634}
 635
 636int clk_enable(struct clk *clk)
 637{
 638        const struct clk_ops *ops;
 639        struct clk *clkp = NULL;
 640        int ret;
 641
 642        debug("%s(clk=%p)\n", __func__, clk);
 643        if (!clk_valid(clk))
 644                return 0;
 645        ops = clk_dev_ops(clk->dev);
 646
 647        if (CONFIG_IS_ENABLED(CLK_CCF)) {
 648                /* Take id 0 as a non-valid clk, such as dummy */
 649                if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
 650                        if (clkp->enable_count) {
 651                                clkp->enable_count++;
 652                                return 0;
 653                        }
 654                        if (clkp->dev->parent &&
 655                            device_get_uclass_id(clkp->dev) == UCLASS_CLK) {
 656                                ret = clk_enable(dev_get_clk_ptr(clkp->dev->parent));
 657                                if (ret) {
 658                                        printf("Enable %s failed\n",
 659                                               clkp->dev->parent->name);
 660                                        return ret;
 661                                }
 662                        }
 663                }
 664
 665                if (ops->enable) {
 666                        ret = ops->enable(clk);
 667                        if (ret) {
 668                                printf("Enable %s failed\n", clk->dev->name);
 669                                return ret;
 670                        }
 671                }
 672                if (clkp)
 673                        clkp->enable_count++;
 674        } else {
 675                if (!ops->enable)
 676                        return -ENOSYS;
 677                return ops->enable(clk);
 678        }
 679
 680        return 0;
 681}
 682
 683int clk_enable_bulk(struct clk_bulk *bulk)
 684{
 685        int i, ret;
 686
 687        for (i = 0; i < bulk->count; i++) {
 688                ret = clk_enable(&bulk->clks[i]);
 689                if (ret < 0 && ret != -ENOSYS)
 690                        return ret;
 691        }
 692
 693        return 0;
 694}
 695
 696int clk_disable(struct clk *clk)
 697{
 698        const struct clk_ops *ops;
 699        struct clk *clkp = NULL;
 700        int ret;
 701
 702        debug("%s(clk=%p)\n", __func__, clk);
 703        if (!clk_valid(clk))
 704                return 0;
 705        ops = clk_dev_ops(clk->dev);
 706
 707        if (CONFIG_IS_ENABLED(CLK_CCF)) {
 708                if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
 709                        if (clkp->flags & CLK_IS_CRITICAL)
 710                                return 0;
 711
 712                        if (clkp->enable_count == 0) {
 713                                printf("clk %s already disabled\n",
 714                                       clkp->dev->name);
 715                                return 0;
 716                        }
 717
 718                        if (--clkp->enable_count > 0)
 719                                return 0;
 720                }
 721
 722                if (ops->disable) {
 723                        ret = ops->disable(clk);
 724                        if (ret)
 725                                return ret;
 726                }
 727
 728                if (clkp && clkp->dev->parent &&
 729                    device_get_uclass_id(clkp->dev) == UCLASS_CLK) {
 730                        ret = clk_disable(dev_get_clk_ptr(clkp->dev->parent));
 731                        if (ret) {
 732                                printf("Disable %s failed\n",
 733                                       clkp->dev->parent->name);
 734                                return ret;
 735                        }
 736                }
 737        } else {
 738                if (!ops->disable)
 739                        return -ENOSYS;
 740
 741                return ops->disable(clk);
 742        }
 743
 744        return 0;
 745}
 746
 747int clk_disable_bulk(struct clk_bulk *bulk)
 748{
 749        int i, ret;
 750
 751        for (i = 0; i < bulk->count; i++) {
 752                ret = clk_disable(&bulk->clks[i]);
 753                if (ret < 0 && ret != -ENOSYS)
 754                        return ret;
 755        }
 756
 757        return 0;
 758}
 759
 760int clk_get_by_id(ulong id, struct clk **clkp)
 761{
 762        struct udevice *dev;
 763        struct uclass *uc;
 764        int ret;
 765
 766        ret = uclass_get(UCLASS_CLK, &uc);
 767        if (ret)
 768                return ret;
 769
 770        uclass_foreach_dev(dev, uc) {
 771                struct clk *clk = dev_get_clk_ptr(dev);
 772
 773                if (clk && clk->id == id) {
 774                        *clkp = clk;
 775                        return 0;
 776                }
 777        }
 778
 779        return -ENOENT;
 780}
 781
 782bool clk_is_match(const struct clk *p, const struct clk *q)
 783{
 784        /* trivial case: identical struct clk's or both NULL */
 785        if (p == q)
 786                return true;
 787
 788        /* trivial case #2: on the clk pointer is NULL */
 789        if (!p || !q)
 790                return false;
 791
 792        /* same device, id and data */
 793        if (p->dev == q->dev && p->id == q->id && p->data == q->data)
 794                return true;
 795
 796        return false;
 797}
 798
 799static void devm_clk_release(struct udevice *dev, void *res)
 800{
 801        clk_free(res);
 802}
 803
 804static int devm_clk_match(struct udevice *dev, void *res, void *data)
 805{
 806        return res == data;
 807}
 808
 809struct clk *devm_clk_get(struct udevice *dev, const char *id)
 810{
 811        int rc;
 812        struct clk *clk;
 813
 814        clk = devres_alloc(devm_clk_release, sizeof(struct clk), __GFP_ZERO);
 815        if (unlikely(!clk))
 816                return ERR_PTR(-ENOMEM);
 817
 818        rc = clk_get_by_name(dev, id, clk);
 819        if (rc)
 820                return ERR_PTR(rc);
 821
 822        devres_add(dev, clk);
 823        return clk;
 824}
 825
 826struct clk *devm_clk_get_optional(struct udevice *dev, const char *id)
 827{
 828        struct clk *clk = devm_clk_get(dev, id);
 829
 830        if (PTR_ERR(clk) == -ENODATA)
 831                return NULL;
 832
 833        return clk;
 834}
 835
 836void devm_clk_put(struct udevice *dev, struct clk *clk)
 837{
 838        int rc;
 839
 840        if (!clk)
 841                return;
 842
 843        rc = devres_release(dev, devm_clk_release, devm_clk_match, clk);
 844        WARN_ON(rc);
 845}
 846
 847int clk_uclass_post_probe(struct udevice *dev)
 848{
 849        /*
 850         * when a clock provider is probed. Call clk_set_defaults()
 851         * also after the device is probed. This takes care of cases
 852         * where the DT is used to setup default parents and rates
 853         * using assigned-clocks
 854         */
 855        clk_set_defaults(dev, CLK_DEFAULTS_POST);
 856
 857        return 0;
 858}
 859
 860UCLASS_DRIVER(clk) = {
 861        .id             = UCLASS_CLK,
 862        .name           = "clk",
 863        .post_probe     = clk_uclass_post_probe,
 864};
 865