uboot/drivers/smem/msm_smem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Copyright (c) 2015, Sony Mobile Communications AB.
   4 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
   5 * Copyright (c) 2018, Ramon Fried <ramon.fried@gmail.com>
   6 */
   7
   8#include <common.h>
   9#include <errno.h>
  10#include <dm.h>
  11#include <asm/global_data.h>
  12#include <dm/device_compat.h>
  13#include <dm/devres.h>
  14#include <dm/of_access.h>
  15#include <dm/of_addr.h>
  16#include <asm/io.h>
  17#include <linux/bug.h>
  18#include <linux/err.h>
  19#include <linux/ioport.h>
  20#include <linux/io.h>
  21#include <linux/sizes.h>
  22#include <smem.h>
  23
  24DECLARE_GLOBAL_DATA_PTR;
  25
  26/*
  27 * The Qualcomm shared memory system is an allocate-only heap structure that
  28 * consists of one of more memory areas that can be accessed by the processors
  29 * in the SoC.
  30 *
  31 * All systems contains a global heap, accessible by all processors in the SoC,
  32 * with a table of contents data structure (@smem_header) at the beginning of
  33 * the main shared memory block.
  34 *
  35 * The global header contains meta data for allocations as well as a fixed list
  36 * of 512 entries (@smem_global_entry) that can be initialized to reference
  37 * parts of the shared memory space.
  38 *
  39 *
  40 * In addition to this global heap, a set of "private" heaps can be set up at
  41 * boot time with access restrictions so that only certain processor pairs can
  42 * access the data.
  43 *
  44 * These partitions are referenced from an optional partition table
  45 * (@smem_ptable), that is found 4kB from the end of the main smem region. The
  46 * partition table entries (@smem_ptable_entry) lists the involved processors
  47 * (or hosts) and their location in the main shared memory region.
  48 *
  49 * Each partition starts with a header (@smem_partition_header) that identifies
  50 * the partition and holds properties for the two internal memory regions. The
  51 * two regions are cached and non-cached memory respectively. Each region
  52 * contain a link list of allocation headers (@smem_private_entry) followed by
  53 * their data.
  54 *
  55 * Items in the non-cached region are allocated from the start of the partition
  56 * while items in the cached region are allocated from the end. The free area
  57 * is hence the region between the cached and non-cached offsets. The header of
  58 * cached items comes after the data.
  59 *
  60 * Version 12 (SMEM_GLOBAL_PART_VERSION) changes the item alloc/get procedure
  61 * for the global heap. A new global partition is created from the global heap
  62 * region with partition type (SMEM_GLOBAL_HOST) and the max smem item count is
  63 * set by the bootloader.
  64 *
  65 */
  66
  67/*
  68 * The version member of the smem header contains an array of versions for the
  69 * various software components in the SoC. We verify that the boot loader
  70 * version is a valid version as a sanity check.
  71 */
  72#define SMEM_MASTER_SBL_VERSION_INDEX   7
  73#define SMEM_GLOBAL_HEAP_VERSION        11
  74#define SMEM_GLOBAL_PART_VERSION        12
  75
  76/*
  77 * The first 8 items are only to be allocated by the boot loader while
  78 * initializing the heap.
  79 */
  80#define SMEM_ITEM_LAST_FIXED    8
  81
  82/* Highest accepted item number, for both global and private heaps */
  83#define SMEM_ITEM_COUNT         512
  84
  85/* Processor/host identifier for the application processor */
  86#define SMEM_HOST_APPS          0
  87
  88/* Processor/host identifier for the global partition */
  89#define SMEM_GLOBAL_HOST        0xfffe
  90
  91/* Max number of processors/hosts in a system */
  92#define SMEM_HOST_COUNT         10
  93
  94/**
  95 * struct smem_proc_comm - proc_comm communication struct (legacy)
  96 * @command:    current command to be executed
  97 * @status:     status of the currently requested command
  98 * @params:     parameters to the command
  99 */
 100struct smem_proc_comm {
 101        __le32 command;
 102        __le32 status;
 103        __le32 params[2];
 104};
 105
 106/**
 107 * struct smem_global_entry - entry to reference smem items on the heap
 108 * @allocated:  boolean to indicate if this entry is used
 109 * @offset:     offset to the allocated space
 110 * @size:       size of the allocated space, 8 byte aligned
 111 * @aux_base:   base address for the memory region used by this unit, or 0 for
 112 *              the default region. bits 0,1 are reserved
 113 */
 114struct smem_global_entry {
 115        __le32 allocated;
 116        __le32 offset;
 117        __le32 size;
 118        __le32 aux_base; /* bits 1:0 reserved */
 119};
 120#define AUX_BASE_MASK           0xfffffffc
 121
 122/**
 123 * struct smem_header - header found in beginning of primary smem region
 124 * @proc_comm:          proc_comm communication interface (legacy)
 125 * @version:            array of versions for the various subsystems
 126 * @initialized:        boolean to indicate that smem is initialized
 127 * @free_offset:        index of the first unallocated byte in smem
 128 * @available:          number of bytes available for allocation
 129 * @reserved:           reserved field, must be 0
 130 * toc:                 array of references to items
 131 */
 132struct smem_header {
 133        struct smem_proc_comm proc_comm[4];
 134        __le32 version[32];
 135        __le32 initialized;
 136        __le32 free_offset;
 137        __le32 available;
 138        __le32 reserved;
 139        struct smem_global_entry toc[SMEM_ITEM_COUNT];
 140};
 141
 142/**
 143 * struct smem_ptable_entry - one entry in the @smem_ptable list
 144 * @offset:     offset, within the main shared memory region, of the partition
 145 * @size:       size of the partition
 146 * @flags:      flags for the partition (currently unused)
 147 * @host0:      first processor/host with access to this partition
 148 * @host1:      second processor/host with access to this partition
 149 * @cacheline:  alignment for "cached" entries
 150 * @reserved:   reserved entries for later use
 151 */
 152struct smem_ptable_entry {
 153        __le32 offset;
 154        __le32 size;
 155        __le32 flags;
 156        __le16 host0;
 157        __le16 host1;
 158        __le32 cacheline;
 159        __le32 reserved[7];
 160};
 161
 162/**
 163 * struct smem_ptable - partition table for the private partitions
 164 * @magic:      magic number, must be SMEM_PTABLE_MAGIC
 165 * @version:    version of the partition table
 166 * @num_entries: number of partitions in the table
 167 * @reserved:   for now reserved entries
 168 * @entry:      list of @smem_ptable_entry for the @num_entries partitions
 169 */
 170struct smem_ptable {
 171        u8 magic[4];
 172        __le32 version;
 173        __le32 num_entries;
 174        __le32 reserved[5];
 175        struct smem_ptable_entry entry[];
 176};
 177
 178static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */
 179
 180/**
 181 * struct smem_partition_header - header of the partitions
 182 * @magic:      magic number, must be SMEM_PART_MAGIC
 183 * @host0:      first processor/host with access to this partition
 184 * @host1:      second processor/host with access to this partition
 185 * @size:       size of the partition
 186 * @offset_free_uncached: offset to the first free byte of uncached memory in
 187 *              this partition
 188 * @offset_free_cached: offset to the first free byte of cached memory in this
 189 *              partition
 190 * @reserved:   for now reserved entries
 191 */
 192struct smem_partition_header {
 193        u8 magic[4];
 194        __le16 host0;
 195        __le16 host1;
 196        __le32 size;
 197        __le32 offset_free_uncached;
 198        __le32 offset_free_cached;
 199        __le32 reserved[3];
 200};
 201
 202static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };
 203
 204/**
 205 * struct smem_private_entry - header of each item in the private partition
 206 * @canary:     magic number, must be SMEM_PRIVATE_CANARY
 207 * @item:       identifying number of the smem item
 208 * @size:       size of the data, including padding bytes
 209 * @padding_data: number of bytes of padding of data
 210 * @padding_hdr: number of bytes of padding between the header and the data
 211 * @reserved:   for now reserved entry
 212 */
 213struct smem_private_entry {
 214        u16 canary; /* bytes are the same so no swapping needed */
 215        __le16 item;
 216        __le32 size; /* includes padding bytes */
 217        __le16 padding_data;
 218        __le16 padding_hdr;
 219        __le32 reserved;
 220};
 221#define SMEM_PRIVATE_CANARY     0xa5a5
 222
 223/**
 224 * struct smem_info - smem region info located after the table of contents
 225 * @magic:      magic number, must be SMEM_INFO_MAGIC
 226 * @size:       size of the smem region
 227 * @base_addr:  base address of the smem region
 228 * @reserved:   for now reserved entry
 229 * @num_items:  highest accepted item number
 230 */
 231struct smem_info {
 232        u8 magic[4];
 233        __le32 size;
 234        __le32 base_addr;
 235        __le32 reserved;
 236        __le16 num_items;
 237};
 238
 239static const u8 SMEM_INFO_MAGIC[] = { 0x53, 0x49, 0x49, 0x49 }; /* SIII */
 240
 241/**
 242 * struct smem_region - representation of a chunk of memory used for smem
 243 * @aux_base:   identifier of aux_mem base
 244 * @virt_base:  virtual base address of memory with this aux_mem identifier
 245 * @size:       size of the memory region
 246 */
 247struct smem_region {
 248        u32 aux_base;
 249        void __iomem *virt_base;
 250        size_t size;
 251};
 252
 253/**
 254 * struct qcom_smem - device data for the smem device
 255 * @dev:        device pointer
 256 * @global_partition:   pointer to global partition when in use
 257 * @global_cacheline:   cacheline size for global partition
 258 * @partitions: list of pointers to partitions affecting the current
 259 *              processor/host
 260 * @cacheline:  list of cacheline sizes for each host
 261 * @item_count: max accepted item number
 262 * @num_regions: number of @regions
 263 * @regions:    list of the memory regions defining the shared memory
 264 */
 265struct qcom_smem {
 266        struct udevice *dev;
 267
 268        struct smem_partition_header *global_partition;
 269        size_t global_cacheline;
 270        struct smem_partition_header *partitions[SMEM_HOST_COUNT];
 271        size_t cacheline[SMEM_HOST_COUNT];
 272        u32 item_count;
 273
 274        unsigned int num_regions;
 275        struct smem_region regions[0];
 276};
 277
 278static struct smem_private_entry *
 279phdr_to_last_uncached_entry(struct smem_partition_header *phdr)
 280{
 281        void *p = phdr;
 282
 283        return p + le32_to_cpu(phdr->offset_free_uncached);
 284}
 285
 286static void *phdr_to_first_cached_entry(struct smem_partition_header *phdr,
 287                                        size_t cacheline)
 288{
 289        void *p = phdr;
 290
 291        return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*phdr), cacheline);
 292}
 293
 294static void *phdr_to_last_cached_entry(struct smem_partition_header *phdr)
 295{
 296        void *p = phdr;
 297
 298        return p + le32_to_cpu(phdr->offset_free_cached);
 299}
 300
 301static struct smem_private_entry *
 302phdr_to_first_uncached_entry(struct smem_partition_header *phdr)
 303{
 304        void *p = phdr;
 305
 306        return p + sizeof(*phdr);
 307}
 308
 309static struct smem_private_entry *
 310uncached_entry_next(struct smem_private_entry *e)
 311{
 312        void *p = e;
 313
 314        return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) +
 315               le32_to_cpu(e->size);
 316}
 317
 318static struct smem_private_entry *
 319cached_entry_next(struct smem_private_entry *e, size_t cacheline)
 320{
 321        void *p = e;
 322
 323        return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline);
 324}
 325
 326static void *uncached_entry_to_item(struct smem_private_entry *e)
 327{
 328        void *p = e;
 329
 330        return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
 331}
 332
 333static void *cached_entry_to_item(struct smem_private_entry *e)
 334{
 335        void *p = e;
 336
 337        return p - le32_to_cpu(e->size);
 338}
 339
 340/* Pointer to the one and only smem handle */
 341static struct qcom_smem *__smem;
 342
 343static int qcom_smem_alloc_private(struct qcom_smem *smem,
 344                                   struct smem_partition_header *phdr,
 345                                   unsigned int item,
 346                                   size_t size)
 347{
 348        struct smem_private_entry *hdr, *end;
 349        size_t alloc_size;
 350        void *cached;
 351
 352        hdr = phdr_to_first_uncached_entry(phdr);
 353        end = phdr_to_last_uncached_entry(phdr);
 354        cached = phdr_to_last_cached_entry(phdr);
 355
 356        while (hdr < end) {
 357                if (hdr->canary != SMEM_PRIVATE_CANARY) {
 358                        dev_err(smem->dev,
 359                                "Found invalid canary in hosts %d:%d partition\n",
 360                                phdr->host0, phdr->host1);
 361                        return -EINVAL;
 362                }
 363
 364                if (le16_to_cpu(hdr->item) == item)
 365                        return -EEXIST;
 366
 367                hdr = uncached_entry_next(hdr);
 368        }
 369
 370        /* Check that we don't grow into the cached region */
 371        alloc_size = sizeof(*hdr) + ALIGN(size, 8);
 372        if ((void *)hdr + alloc_size >= cached) {
 373                dev_err(smem->dev, "Out of memory\n");
 374                return -ENOSPC;
 375        }
 376
 377        hdr->canary = SMEM_PRIVATE_CANARY;
 378        hdr->item = cpu_to_le16(item);
 379        hdr->size = cpu_to_le32(ALIGN(size, 8));
 380        hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size);
 381        hdr->padding_hdr = 0;
 382
 383        /*
 384         * Ensure the header is written before we advance the free offset, so
 385         * that remote processors that does not take the remote spinlock still
 386         * gets a consistent view of the linked list.
 387         */
 388        dmb();
 389        le32_add_cpu(&phdr->offset_free_uncached, alloc_size);
 390
 391        return 0;
 392}
 393
 394static int qcom_smem_alloc_global(struct qcom_smem *smem,
 395                                  unsigned int item,
 396                                  size_t size)
 397{
 398        struct smem_global_entry *entry;
 399        struct smem_header *header;
 400
 401        header = smem->regions[0].virt_base;
 402        entry = &header->toc[item];
 403        if (entry->allocated)
 404                return -EEXIST;
 405
 406        size = ALIGN(size, 8);
 407        if (WARN_ON(size > le32_to_cpu(header->available)))
 408                return -ENOMEM;
 409
 410        entry->offset = header->free_offset;
 411        entry->size = cpu_to_le32(size);
 412
 413        /*
 414         * Ensure the header is consistent before we mark the item allocated,
 415         * so that remote processors will get a consistent view of the item
 416         * even though they do not take the spinlock on read.
 417         */
 418        dmb();
 419        entry->allocated = cpu_to_le32(1);
 420
 421        le32_add_cpu(&header->free_offset, size);
 422        le32_add_cpu(&header->available, -size);
 423
 424        return 0;
 425}
 426
 427/**
 428 * qcom_smem_alloc() - allocate space for a smem item
 429 * @host:       remote processor id, or -1
 430 * @item:       smem item handle
 431 * @size:       number of bytes to be allocated
 432 *
 433 * Allocate space for a given smem item of size @size, given that the item is
 434 * not yet allocated.
 435 */
 436static int qcom_smem_alloc(unsigned int host, unsigned int item, size_t size)
 437{
 438        struct smem_partition_header *phdr;
 439        int ret;
 440
 441        if (!__smem)
 442                return -ENOMEM;
 443
 444        if (item < SMEM_ITEM_LAST_FIXED) {
 445                dev_err(__smem->dev,
 446                        "Rejecting allocation of static entry %d\n", item);
 447                return -EINVAL;
 448        }
 449
 450        if (WARN_ON(item >= __smem->item_count))
 451                return -EINVAL;
 452
 453        if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
 454                phdr = __smem->partitions[host];
 455                ret = qcom_smem_alloc_private(__smem, phdr, item, size);
 456        } else if (__smem->global_partition) {
 457                phdr = __smem->global_partition;
 458                ret = qcom_smem_alloc_private(__smem, phdr, item, size);
 459        } else {
 460                ret = qcom_smem_alloc_global(__smem, item, size);
 461        }
 462
 463        return ret;
 464}
 465
 466static void *qcom_smem_get_global(struct qcom_smem *smem,
 467                                  unsigned int item,
 468                                  size_t *size)
 469{
 470        struct smem_header *header;
 471        struct smem_region *area;
 472        struct smem_global_entry *entry;
 473        u32 aux_base;
 474        unsigned int i;
 475
 476        header = smem->regions[0].virt_base;
 477        entry = &header->toc[item];
 478        if (!entry->allocated)
 479                return ERR_PTR(-ENXIO);
 480
 481        aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK;
 482
 483        for (i = 0; i < smem->num_regions; i++) {
 484                area = &smem->regions[i];
 485
 486                if (area->aux_base == aux_base || !aux_base) {
 487                        if (size != NULL)
 488                                *size = le32_to_cpu(entry->size);
 489                        return area->virt_base + le32_to_cpu(entry->offset);
 490                }
 491        }
 492
 493        return ERR_PTR(-ENOENT);
 494}
 495
 496static void *qcom_smem_get_private(struct qcom_smem *smem,
 497                                   struct smem_partition_header *phdr,
 498                                   size_t cacheline,
 499                                   unsigned int item,
 500                                   size_t *size)
 501{
 502        struct smem_private_entry *e, *end;
 503
 504        e = phdr_to_first_uncached_entry(phdr);
 505        end = phdr_to_last_uncached_entry(phdr);
 506
 507        while (e < end) {
 508                if (e->canary != SMEM_PRIVATE_CANARY)
 509                        goto invalid_canary;
 510
 511                if (le16_to_cpu(e->item) == item) {
 512                        if (size != NULL)
 513                                *size = le32_to_cpu(e->size) -
 514                                        le16_to_cpu(e->padding_data);
 515
 516                        return uncached_entry_to_item(e);
 517                }
 518
 519                e = uncached_entry_next(e);
 520        }
 521
 522        /* Item was not found in the uncached list, search the cached list */
 523
 524        e = phdr_to_first_cached_entry(phdr, cacheline);
 525        end = phdr_to_last_cached_entry(phdr);
 526
 527        while (e > end) {
 528                if (e->canary != SMEM_PRIVATE_CANARY)
 529                        goto invalid_canary;
 530
 531                if (le16_to_cpu(e->item) == item) {
 532                        if (size != NULL)
 533                                *size = le32_to_cpu(e->size) -
 534                                        le16_to_cpu(e->padding_data);
 535
 536                        return cached_entry_to_item(e);
 537                }
 538
 539                e = cached_entry_next(e, cacheline);
 540        }
 541
 542        return ERR_PTR(-ENOENT);
 543
 544invalid_canary:
 545        dev_err(smem->dev, "Found invalid canary in hosts %d:%d partition\n",
 546                        phdr->host0, phdr->host1);
 547
 548        return ERR_PTR(-EINVAL);
 549}
 550
 551/**
 552 * qcom_smem_get() - resolve ptr of size of a smem item
 553 * @host:       the remote processor, or -1
 554 * @item:       smem item handle
 555 * @size:       pointer to be filled out with size of the item
 556 *
 557 * Looks up smem item and returns pointer to it. Size of smem
 558 * item is returned in @size.
 559 */
 560static void *qcom_smem_get(unsigned int host, unsigned int item, size_t *size)
 561{
 562        struct smem_partition_header *phdr;
 563        size_t cacheln;
 564        void *ptr = ERR_PTR(-ENOMEM);
 565
 566        if (!__smem)
 567                return ptr;
 568
 569        if (WARN_ON(item >= __smem->item_count))
 570                return ERR_PTR(-EINVAL);
 571
 572        if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
 573                phdr = __smem->partitions[host];
 574                cacheln = __smem->cacheline[host];
 575                ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
 576        } else if (__smem->global_partition) {
 577                phdr = __smem->global_partition;
 578                cacheln = __smem->global_cacheline;
 579                ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
 580        } else {
 581                ptr = qcom_smem_get_global(__smem, item, size);
 582        }
 583
 584        return ptr;
 585
 586}
 587
 588/**
 589 * qcom_smem_get_free_space() - retrieve amount of free space in a partition
 590 * @host:       the remote processor identifying a partition, or -1
 591 *
 592 * To be used by smem clients as a quick way to determine if any new
 593 * allocations has been made.
 594 */
 595static int qcom_smem_get_free_space(unsigned int host)
 596{
 597        struct smem_partition_header *phdr;
 598        struct smem_header *header;
 599        unsigned int ret;
 600
 601        if (!__smem)
 602                return -ENOMEM;
 603
 604        if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
 605                phdr = __smem->partitions[host];
 606                ret = le32_to_cpu(phdr->offset_free_cached) -
 607                      le32_to_cpu(phdr->offset_free_uncached);
 608        } else if (__smem->global_partition) {
 609                phdr = __smem->global_partition;
 610                ret = le32_to_cpu(phdr->offset_free_cached) -
 611                      le32_to_cpu(phdr->offset_free_uncached);
 612        } else {
 613                header = __smem->regions[0].virt_base;
 614                ret = le32_to_cpu(header->available);
 615        }
 616
 617        return ret;
 618}
 619
 620static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
 621{
 622        struct smem_header *header;
 623        __le32 *versions;
 624
 625        header = smem->regions[0].virt_base;
 626        versions = header->version;
 627
 628        return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]);
 629}
 630
 631static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem)
 632{
 633        struct smem_ptable *ptable;
 634        u32 version;
 635
 636        ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
 637        if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
 638                return ERR_PTR(-ENOENT);
 639
 640        version = le32_to_cpu(ptable->version);
 641        if (version != 1) {
 642                dev_err(smem->dev,
 643                        "Unsupported partition header version %d\n", version);
 644                return ERR_PTR(-EINVAL);
 645        }
 646        return ptable;
 647}
 648
 649static u32 qcom_smem_get_item_count(struct qcom_smem *smem)
 650{
 651        struct smem_ptable *ptable;
 652        struct smem_info *info;
 653
 654        ptable = qcom_smem_get_ptable(smem);
 655        if (IS_ERR_OR_NULL(ptable))
 656                return SMEM_ITEM_COUNT;
 657
 658        info = (struct smem_info *)&ptable->entry[ptable->num_entries];
 659        if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic)))
 660                return SMEM_ITEM_COUNT;
 661
 662        return le16_to_cpu(info->num_items);
 663}
 664
 665static int qcom_smem_set_global_partition(struct qcom_smem *smem)
 666{
 667        struct smem_partition_header *header;
 668        struct smem_ptable_entry *entry = NULL;
 669        struct smem_ptable *ptable;
 670        u32 host0, host1, size;
 671        int i;
 672
 673        ptable = qcom_smem_get_ptable(smem);
 674        if (IS_ERR(ptable))
 675                return PTR_ERR(ptable);
 676
 677        for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
 678                entry = &ptable->entry[i];
 679                host0 = le16_to_cpu(entry->host0);
 680                host1 = le16_to_cpu(entry->host1);
 681
 682                if (host0 == SMEM_GLOBAL_HOST && host0 == host1)
 683                        break;
 684        }
 685
 686        if (!entry) {
 687                dev_err(smem->dev, "Missing entry for global partition\n");
 688                return -EINVAL;
 689        }
 690
 691        if (!le32_to_cpu(entry->offset) || !le32_to_cpu(entry->size)) {
 692                dev_err(smem->dev, "Invalid entry for global partition\n");
 693                return -EINVAL;
 694        }
 695
 696        if (smem->global_partition) {
 697                dev_err(smem->dev, "Already found the global partition\n");
 698                return -EINVAL;
 699        }
 700
 701        header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
 702        host0 = le16_to_cpu(header->host0);
 703        host1 = le16_to_cpu(header->host1);
 704
 705        if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) {
 706                dev_err(smem->dev, "Global partition has invalid magic\n");
 707                return -EINVAL;
 708        }
 709
 710        if (host0 != SMEM_GLOBAL_HOST && host1 != SMEM_GLOBAL_HOST) {
 711                dev_err(smem->dev, "Global partition hosts are invalid\n");
 712                return -EINVAL;
 713        }
 714
 715        if (le32_to_cpu(header->size) != le32_to_cpu(entry->size)) {
 716                dev_err(smem->dev, "Global partition has invalid size\n");
 717                return -EINVAL;
 718        }
 719
 720        size = le32_to_cpu(header->offset_free_uncached);
 721        if (size > le32_to_cpu(header->size)) {
 722                dev_err(smem->dev,
 723                        "Global partition has invalid free pointer\n");
 724                return -EINVAL;
 725        }
 726
 727        smem->global_partition = header;
 728        smem->global_cacheline = le32_to_cpu(entry->cacheline);
 729
 730        return 0;
 731}
 732
 733static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
 734                                          unsigned int local_host)
 735{
 736        struct smem_partition_header *header;
 737        struct smem_ptable_entry *entry;
 738        struct smem_ptable *ptable;
 739        unsigned int remote_host;
 740        u32 host0, host1;
 741        int i;
 742
 743        ptable = qcom_smem_get_ptable(smem);
 744        if (IS_ERR(ptable))
 745                return PTR_ERR(ptable);
 746
 747        for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
 748                entry = &ptable->entry[i];
 749                host0 = le16_to_cpu(entry->host0);
 750                host1 = le16_to_cpu(entry->host1);
 751
 752                if (host0 != local_host && host1 != local_host)
 753                        continue;
 754
 755                if (!le32_to_cpu(entry->offset))
 756                        continue;
 757
 758                if (!le32_to_cpu(entry->size))
 759                        continue;
 760
 761                if (host0 == local_host)
 762                        remote_host = host1;
 763                else
 764                        remote_host = host0;
 765
 766                if (remote_host >= SMEM_HOST_COUNT) {
 767                        dev_err(smem->dev,
 768                                "Invalid remote host %d\n",
 769                                remote_host);
 770                        return -EINVAL;
 771                }
 772
 773                if (smem->partitions[remote_host]) {
 774                        dev_err(smem->dev,
 775                                "Already found a partition for host %d\n",
 776                                remote_host);
 777                        return -EINVAL;
 778                }
 779
 780                header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
 781                host0 = le16_to_cpu(header->host0);
 782                host1 = le16_to_cpu(header->host1);
 783
 784                if (memcmp(header->magic, SMEM_PART_MAGIC,
 785                            sizeof(header->magic))) {
 786                        dev_err(smem->dev,
 787                                "Partition %d has invalid magic\n", i);
 788                        return -EINVAL;
 789                }
 790
 791                if (host0 != local_host && host1 != local_host) {
 792                        dev_err(smem->dev,
 793                                "Partition %d hosts are invalid\n", i);
 794                        return -EINVAL;
 795                }
 796
 797                if (host0 != remote_host && host1 != remote_host) {
 798                        dev_err(smem->dev,
 799                                "Partition %d hosts are invalid\n", i);
 800                        return -EINVAL;
 801                }
 802
 803                if (le32_to_cpu(header->size) != le32_to_cpu(entry->size)) {
 804                        dev_err(smem->dev,
 805                                "Partition %d has invalid size\n", i);
 806                        return -EINVAL;
 807                }
 808
 809                if (le32_to_cpu(header->offset_free_uncached) > le32_to_cpu(header->size)) {
 810                        dev_err(smem->dev,
 811                                "Partition %d has invalid free pointer\n", i);
 812                        return -EINVAL;
 813                }
 814
 815                smem->partitions[remote_host] = header;
 816                smem->cacheline[remote_host] = le32_to_cpu(entry->cacheline);
 817        }
 818
 819        return 0;
 820}
 821
 822static int qcom_smem_map_memory(struct qcom_smem *smem, struct udevice *dev,
 823                                const char *name, int i)
 824{
 825        struct fdt_resource r;
 826        int ret;
 827        int node = dev_of_offset(dev);
 828
 829        ret = fdtdec_lookup_phandle(gd->fdt_blob, node, name);
 830        if (ret < 0) {
 831                dev_err(dev, "No %s specified\n", name);
 832                return -EINVAL;
 833        }
 834
 835        ret = fdt_get_resource(gd->fdt_blob, ret, "reg", 0, &r);
 836        if (ret)
 837                return ret;
 838
 839        smem->regions[i].aux_base = (u32)r.start;
 840        smem->regions[i].size = fdt_resource_size(&r);
 841        smem->regions[i].virt_base = devm_ioremap(dev, r.start, fdt_resource_size(&r));
 842        if (!smem->regions[i].virt_base)
 843                return -ENOMEM;
 844
 845        return 0;
 846}
 847
 848static int qcom_smem_probe(struct udevice *dev)
 849{
 850        struct smem_header *header;
 851        struct qcom_smem *smem;
 852        size_t array_size;
 853        int num_regions;
 854        u32 version;
 855        int ret;
 856        int node = dev_of_offset(dev);
 857
 858        num_regions = 1;
 859        if (fdtdec_lookup_phandle(gd->fdt_blob, node, "qcomrpm-msg-ram") >= 0)
 860                num_regions++;
 861
 862        array_size = num_regions * sizeof(struct smem_region);
 863        smem = devm_kzalloc(dev, sizeof(*smem) + array_size, GFP_KERNEL);
 864        if (!smem)
 865                return -ENOMEM;
 866
 867        smem->dev = dev;
 868        smem->num_regions = num_regions;
 869
 870        ret = qcom_smem_map_memory(smem, dev, "memory-region", 0);
 871        if (ret)
 872                return ret;
 873
 874        if (num_regions > 1) {
 875                ret = qcom_smem_map_memory(smem, dev,
 876                                        "qcom,rpm-msg-ram", 1);
 877                if (ret)
 878                        return ret;
 879        }
 880
 881        header = smem->regions[0].virt_base;
 882        if (le32_to_cpu(header->initialized) != 1 ||
 883            le32_to_cpu(header->reserved)) {
 884                dev_err(dev, "SMEM is not initialized by SBL\n");
 885                return -EINVAL;
 886        }
 887
 888        version = qcom_smem_get_sbl_version(smem);
 889        switch (version >> 16) {
 890        case SMEM_GLOBAL_PART_VERSION:
 891                ret = qcom_smem_set_global_partition(smem);
 892                if (ret < 0)
 893                        return ret;
 894                smem->item_count = qcom_smem_get_item_count(smem);
 895                break;
 896        case SMEM_GLOBAL_HEAP_VERSION:
 897                smem->item_count = SMEM_ITEM_COUNT;
 898                break;
 899        default:
 900                dev_err(dev, "Unsupported SMEM version 0x%x\n", version);
 901                return -EINVAL;
 902        }
 903
 904        ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
 905        if (ret < 0 && ret != -ENOENT)
 906                return ret;
 907
 908        __smem = smem;
 909
 910        return 0;
 911}
 912
 913static int qcom_smem_remove(struct udevice *dev)
 914{
 915        __smem = NULL;
 916
 917        return 0;
 918}
 919
 920const struct udevice_id qcom_smem_of_match[] = {
 921        { .compatible = "qcom,smem" },
 922        { }
 923};
 924
 925static const struct smem_ops msm_smem_ops = {
 926        .alloc = qcom_smem_alloc,
 927        .get = qcom_smem_get,
 928        .get_free_space = qcom_smem_get_free_space,
 929};
 930
 931U_BOOT_DRIVER(qcom_smem) = {
 932        .name   = "qcom_smem",
 933        .id     = UCLASS_SMEM,
 934        .of_match = qcom_smem_of_match,
 935        .ops = &msm_smem_ops,
 936        .probe = qcom_smem_probe,
 937        .remove = qcom_smem_remove,
 938};
 939