uboot/drivers/smem/msm_smem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Copyright (c) 2015, Sony Mobile Communications AB.
   4 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
   5 * Copyright (c) 2018, Ramon Fried <ramon.fried@gmail.com>
   6 */
   7
   8#include <common.h>
   9#include <errno.h>
  10#include <dm.h>
  11#include <dm/device_compat.h>
  12#include <dm/devres.h>
  13#include <dm/of_access.h>
  14#include <dm/of_addr.h>
  15#include <asm/io.h>
  16#include <linux/bug.h>
  17#include <linux/err.h>
  18#include <linux/ioport.h>
  19#include <linux/io.h>
  20#include <smem.h>
  21
  22DECLARE_GLOBAL_DATA_PTR;
  23
  24/*
  25 * The Qualcomm shared memory system is an allocate-only heap structure that
  26 * consists of one of more memory areas that can be accessed by the processors
  27 * in the SoC.
  28 *
  29 * All systems contains a global heap, accessible by all processors in the SoC,
  30 * with a table of contents data structure (@smem_header) at the beginning of
  31 * the main shared memory block.
  32 *
  33 * The global header contains meta data for allocations as well as a fixed list
  34 * of 512 entries (@smem_global_entry) that can be initialized to reference
  35 * parts of the shared memory space.
  36 *
  37 *
  38 * In addition to this global heap, a set of "private" heaps can be set up at
  39 * boot time with access restrictions so that only certain processor pairs can
  40 * access the data.
  41 *
  42 * These partitions are referenced from an optional partition table
  43 * (@smem_ptable), that is found 4kB from the end of the main smem region. The
  44 * partition table entries (@smem_ptable_entry) lists the involved processors
  45 * (or hosts) and their location in the main shared memory region.
  46 *
  47 * Each partition starts with a header (@smem_partition_header) that identifies
  48 * the partition and holds properties for the two internal memory regions. The
  49 * two regions are cached and non-cached memory respectively. Each region
  50 * contain a link list of allocation headers (@smem_private_entry) followed by
  51 * their data.
  52 *
  53 * Items in the non-cached region are allocated from the start of the partition
  54 * while items in the cached region are allocated from the end. The free area
  55 * is hence the region between the cached and non-cached offsets. The header of
  56 * cached items comes after the data.
  57 *
  58 * Version 12 (SMEM_GLOBAL_PART_VERSION) changes the item alloc/get procedure
  59 * for the global heap. A new global partition is created from the global heap
  60 * region with partition type (SMEM_GLOBAL_HOST) and the max smem item count is
  61 * set by the bootloader.
  62 *
  63 */
  64
  65/*
  66 * The version member of the smem header contains an array of versions for the
  67 * various software components in the SoC. We verify that the boot loader
  68 * version is a valid version as a sanity check.
  69 */
  70#define SMEM_MASTER_SBL_VERSION_INDEX   7
  71#define SMEM_GLOBAL_HEAP_VERSION        11
  72#define SMEM_GLOBAL_PART_VERSION        12
  73
  74/*
  75 * The first 8 items are only to be allocated by the boot loader while
  76 * initializing the heap.
  77 */
  78#define SMEM_ITEM_LAST_FIXED    8
  79
  80/* Highest accepted item number, for both global and private heaps */
  81#define SMEM_ITEM_COUNT         512
  82
  83/* Processor/host identifier for the application processor */
  84#define SMEM_HOST_APPS          0
  85
  86/* Processor/host identifier for the global partition */
  87#define SMEM_GLOBAL_HOST        0xfffe
  88
  89/* Max number of processors/hosts in a system */
  90#define SMEM_HOST_COUNT         10
  91
  92/**
  93 * struct smem_proc_comm - proc_comm communication struct (legacy)
  94 * @command:    current command to be executed
  95 * @status:     status of the currently requested command
  96 * @params:     parameters to the command
  97 */
  98struct smem_proc_comm {
  99        __le32 command;
 100        __le32 status;
 101        __le32 params[2];
 102};
 103
 104/**
 105 * struct smem_global_entry - entry to reference smem items on the heap
 106 * @allocated:  boolean to indicate if this entry is used
 107 * @offset:     offset to the allocated space
 108 * @size:       size of the allocated space, 8 byte aligned
 109 * @aux_base:   base address for the memory region used by this unit, or 0 for
 110 *              the default region. bits 0,1 are reserved
 111 */
 112struct smem_global_entry {
 113        __le32 allocated;
 114        __le32 offset;
 115        __le32 size;
 116        __le32 aux_base; /* bits 1:0 reserved */
 117};
 118#define AUX_BASE_MASK           0xfffffffc
 119
 120/**
 121 * struct smem_header - header found in beginning of primary smem region
 122 * @proc_comm:          proc_comm communication interface (legacy)
 123 * @version:            array of versions for the various subsystems
 124 * @initialized:        boolean to indicate that smem is initialized
 125 * @free_offset:        index of the first unallocated byte in smem
 126 * @available:          number of bytes available for allocation
 127 * @reserved:           reserved field, must be 0
 128 * toc:                 array of references to items
 129 */
 130struct smem_header {
 131        struct smem_proc_comm proc_comm[4];
 132        __le32 version[32];
 133        __le32 initialized;
 134        __le32 free_offset;
 135        __le32 available;
 136        __le32 reserved;
 137        struct smem_global_entry toc[SMEM_ITEM_COUNT];
 138};
 139
 140/**
 141 * struct smem_ptable_entry - one entry in the @smem_ptable list
 142 * @offset:     offset, within the main shared memory region, of the partition
 143 * @size:       size of the partition
 144 * @flags:      flags for the partition (currently unused)
 145 * @host0:      first processor/host with access to this partition
 146 * @host1:      second processor/host with access to this partition
 147 * @cacheline:  alignment for "cached" entries
 148 * @reserved:   reserved entries for later use
 149 */
 150struct smem_ptable_entry {
 151        __le32 offset;
 152        __le32 size;
 153        __le32 flags;
 154        __le16 host0;
 155        __le16 host1;
 156        __le32 cacheline;
 157        __le32 reserved[7];
 158};
 159
 160/**
 161 * struct smem_ptable - partition table for the private partitions
 162 * @magic:      magic number, must be SMEM_PTABLE_MAGIC
 163 * @version:    version of the partition table
 164 * @num_entries: number of partitions in the table
 165 * @reserved:   for now reserved entries
 166 * @entry:      list of @smem_ptable_entry for the @num_entries partitions
 167 */
 168struct smem_ptable {
 169        u8 magic[4];
 170        __le32 version;
 171        __le32 num_entries;
 172        __le32 reserved[5];
 173        struct smem_ptable_entry entry[];
 174};
 175
 176static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */
 177
 178/**
 179 * struct smem_partition_header - header of the partitions
 180 * @magic:      magic number, must be SMEM_PART_MAGIC
 181 * @host0:      first processor/host with access to this partition
 182 * @host1:      second processor/host with access to this partition
 183 * @size:       size of the partition
 184 * @offset_free_uncached: offset to the first free byte of uncached memory in
 185 *              this partition
 186 * @offset_free_cached: offset to the first free byte of cached memory in this
 187 *              partition
 188 * @reserved:   for now reserved entries
 189 */
 190struct smem_partition_header {
 191        u8 magic[4];
 192        __le16 host0;
 193        __le16 host1;
 194        __le32 size;
 195        __le32 offset_free_uncached;
 196        __le32 offset_free_cached;
 197        __le32 reserved[3];
 198};
 199
 200static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };
 201
 202/**
 203 * struct smem_private_entry - header of each item in the private partition
 204 * @canary:     magic number, must be SMEM_PRIVATE_CANARY
 205 * @item:       identifying number of the smem item
 206 * @size:       size of the data, including padding bytes
 207 * @padding_data: number of bytes of padding of data
 208 * @padding_hdr: number of bytes of padding between the header and the data
 209 * @reserved:   for now reserved entry
 210 */
 211struct smem_private_entry {
 212        u16 canary; /* bytes are the same so no swapping needed */
 213        __le16 item;
 214        __le32 size; /* includes padding bytes */
 215        __le16 padding_data;
 216        __le16 padding_hdr;
 217        __le32 reserved;
 218};
 219#define SMEM_PRIVATE_CANARY     0xa5a5
 220
 221/**
 222 * struct smem_info - smem region info located after the table of contents
 223 * @magic:      magic number, must be SMEM_INFO_MAGIC
 224 * @size:       size of the smem region
 225 * @base_addr:  base address of the smem region
 226 * @reserved:   for now reserved entry
 227 * @num_items:  highest accepted item number
 228 */
 229struct smem_info {
 230        u8 magic[4];
 231        __le32 size;
 232        __le32 base_addr;
 233        __le32 reserved;
 234        __le16 num_items;
 235};
 236
 237static const u8 SMEM_INFO_MAGIC[] = { 0x53, 0x49, 0x49, 0x49 }; /* SIII */
 238
 239/**
 240 * struct smem_region - representation of a chunk of memory used for smem
 241 * @aux_base:   identifier of aux_mem base
 242 * @virt_base:  virtual base address of memory with this aux_mem identifier
 243 * @size:       size of the memory region
 244 */
 245struct smem_region {
 246        u32 aux_base;
 247        void __iomem *virt_base;
 248        size_t size;
 249};
 250
 251/**
 252 * struct qcom_smem - device data for the smem device
 253 * @dev:        device pointer
 254 * @global_partition:   pointer to global partition when in use
 255 * @global_cacheline:   cacheline size for global partition
 256 * @partitions: list of pointers to partitions affecting the current
 257 *              processor/host
 258 * @cacheline:  list of cacheline sizes for each host
 259 * @item_count: max accepted item number
 260 * @num_regions: number of @regions
 261 * @regions:    list of the memory regions defining the shared memory
 262 */
 263struct qcom_smem {
 264        struct udevice *dev;
 265
 266        struct smem_partition_header *global_partition;
 267        size_t global_cacheline;
 268        struct smem_partition_header *partitions[SMEM_HOST_COUNT];
 269        size_t cacheline[SMEM_HOST_COUNT];
 270        u32 item_count;
 271
 272        unsigned int num_regions;
 273        struct smem_region regions[0];
 274};
 275
 276static struct smem_private_entry *
 277phdr_to_last_uncached_entry(struct smem_partition_header *phdr)
 278{
 279        void *p = phdr;
 280
 281        return p + le32_to_cpu(phdr->offset_free_uncached);
 282}
 283
 284static void *phdr_to_first_cached_entry(struct smem_partition_header *phdr,
 285                                        size_t cacheline)
 286{
 287        void *p = phdr;
 288
 289        return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*phdr), cacheline);
 290}
 291
 292static void *phdr_to_last_cached_entry(struct smem_partition_header *phdr)
 293{
 294        void *p = phdr;
 295
 296        return p + le32_to_cpu(phdr->offset_free_cached);
 297}
 298
 299static struct smem_private_entry *
 300phdr_to_first_uncached_entry(struct smem_partition_header *phdr)
 301{
 302        void *p = phdr;
 303
 304        return p + sizeof(*phdr);
 305}
 306
 307static struct smem_private_entry *
 308uncached_entry_next(struct smem_private_entry *e)
 309{
 310        void *p = e;
 311
 312        return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) +
 313               le32_to_cpu(e->size);
 314}
 315
 316static struct smem_private_entry *
 317cached_entry_next(struct smem_private_entry *e, size_t cacheline)
 318{
 319        void *p = e;
 320
 321        return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline);
 322}
 323
 324static void *uncached_entry_to_item(struct smem_private_entry *e)
 325{
 326        void *p = e;
 327
 328        return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
 329}
 330
 331static void *cached_entry_to_item(struct smem_private_entry *e)
 332{
 333        void *p = e;
 334
 335        return p - le32_to_cpu(e->size);
 336}
 337
 338/* Pointer to the one and only smem handle */
 339static struct qcom_smem *__smem;
 340
 341static int qcom_smem_alloc_private(struct qcom_smem *smem,
 342                                   struct smem_partition_header *phdr,
 343                                   unsigned int item,
 344                                   size_t size)
 345{
 346        struct smem_private_entry *hdr, *end;
 347        size_t alloc_size;
 348        void *cached;
 349
 350        hdr = phdr_to_first_uncached_entry(phdr);
 351        end = phdr_to_last_uncached_entry(phdr);
 352        cached = phdr_to_last_cached_entry(phdr);
 353
 354        while (hdr < end) {
 355                if (hdr->canary != SMEM_PRIVATE_CANARY) {
 356                        dev_err(smem->dev,
 357                                "Found invalid canary in hosts %d:%d partition\n",
 358                                phdr->host0, phdr->host1);
 359                        return -EINVAL;
 360                }
 361
 362                if (le16_to_cpu(hdr->item) == item)
 363                        return -EEXIST;
 364
 365                hdr = uncached_entry_next(hdr);
 366        }
 367
 368        /* Check that we don't grow into the cached region */
 369        alloc_size = sizeof(*hdr) + ALIGN(size, 8);
 370        if ((void *)hdr + alloc_size >= cached) {
 371                dev_err(smem->dev, "Out of memory\n");
 372                return -ENOSPC;
 373        }
 374
 375        hdr->canary = SMEM_PRIVATE_CANARY;
 376        hdr->item = cpu_to_le16(item);
 377        hdr->size = cpu_to_le32(ALIGN(size, 8));
 378        hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size);
 379        hdr->padding_hdr = 0;
 380
 381        /*
 382         * Ensure the header is written before we advance the free offset, so
 383         * that remote processors that does not take the remote spinlock still
 384         * gets a consistent view of the linked list.
 385         */
 386        dmb();
 387        le32_add_cpu(&phdr->offset_free_uncached, alloc_size);
 388
 389        return 0;
 390}
 391
 392static int qcom_smem_alloc_global(struct qcom_smem *smem,
 393                                  unsigned int item,
 394                                  size_t size)
 395{
 396        struct smem_global_entry *entry;
 397        struct smem_header *header;
 398
 399        header = smem->regions[0].virt_base;
 400        entry = &header->toc[item];
 401        if (entry->allocated)
 402                return -EEXIST;
 403
 404        size = ALIGN(size, 8);
 405        if (WARN_ON(size > le32_to_cpu(header->available)))
 406                return -ENOMEM;
 407
 408        entry->offset = header->free_offset;
 409        entry->size = cpu_to_le32(size);
 410
 411        /*
 412         * Ensure the header is consistent before we mark the item allocated,
 413         * so that remote processors will get a consistent view of the item
 414         * even though they do not take the spinlock on read.
 415         */
 416        dmb();
 417        entry->allocated = cpu_to_le32(1);
 418
 419        le32_add_cpu(&header->free_offset, size);
 420        le32_add_cpu(&header->available, -size);
 421
 422        return 0;
 423}
 424
 425/**
 426 * qcom_smem_alloc() - allocate space for a smem item
 427 * @host:       remote processor id, or -1
 428 * @item:       smem item handle
 429 * @size:       number of bytes to be allocated
 430 *
 431 * Allocate space for a given smem item of size @size, given that the item is
 432 * not yet allocated.
 433 */
 434static int qcom_smem_alloc(unsigned int host, unsigned int item, size_t size)
 435{
 436        struct smem_partition_header *phdr;
 437        int ret;
 438
 439        if (!__smem)
 440                return -EPROBE_DEFER;
 441
 442        if (item < SMEM_ITEM_LAST_FIXED) {
 443                dev_err(__smem->dev,
 444                        "Rejecting allocation of static entry %d\n", item);
 445                return -EINVAL;
 446        }
 447
 448        if (WARN_ON(item >= __smem->item_count))
 449                return -EINVAL;
 450
 451        if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
 452                phdr = __smem->partitions[host];
 453                ret = qcom_smem_alloc_private(__smem, phdr, item, size);
 454        } else if (__smem->global_partition) {
 455                phdr = __smem->global_partition;
 456                ret = qcom_smem_alloc_private(__smem, phdr, item, size);
 457        } else {
 458                ret = qcom_smem_alloc_global(__smem, item, size);
 459        }
 460
 461        return ret;
 462}
 463
 464static void *qcom_smem_get_global(struct qcom_smem *smem,
 465                                  unsigned int item,
 466                                  size_t *size)
 467{
 468        struct smem_header *header;
 469        struct smem_region *area;
 470        struct smem_global_entry *entry;
 471        u32 aux_base;
 472        unsigned int i;
 473
 474        header = smem->regions[0].virt_base;
 475        entry = &header->toc[item];
 476        if (!entry->allocated)
 477                return ERR_PTR(-ENXIO);
 478
 479        aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK;
 480
 481        for (i = 0; i < smem->num_regions; i++) {
 482                area = &smem->regions[i];
 483
 484                if (area->aux_base == aux_base || !aux_base) {
 485                        if (size != NULL)
 486                                *size = le32_to_cpu(entry->size);
 487                        return area->virt_base + le32_to_cpu(entry->offset);
 488                }
 489        }
 490
 491        return ERR_PTR(-ENOENT);
 492}
 493
 494static void *qcom_smem_get_private(struct qcom_smem *smem,
 495                                   struct smem_partition_header *phdr,
 496                                   size_t cacheline,
 497                                   unsigned int item,
 498                                   size_t *size)
 499{
 500        struct smem_private_entry *e, *end;
 501
 502        e = phdr_to_first_uncached_entry(phdr);
 503        end = phdr_to_last_uncached_entry(phdr);
 504
 505        while (e < end) {
 506                if (e->canary != SMEM_PRIVATE_CANARY)
 507                        goto invalid_canary;
 508
 509                if (le16_to_cpu(e->item) == item) {
 510                        if (size != NULL)
 511                                *size = le32_to_cpu(e->size) -
 512                                        le16_to_cpu(e->padding_data);
 513
 514                        return uncached_entry_to_item(e);
 515                }
 516
 517                e = uncached_entry_next(e);
 518        }
 519
 520        /* Item was not found in the uncached list, search the cached list */
 521
 522        e = phdr_to_first_cached_entry(phdr, cacheline);
 523        end = phdr_to_last_cached_entry(phdr);
 524
 525        while (e > end) {
 526                if (e->canary != SMEM_PRIVATE_CANARY)
 527                        goto invalid_canary;
 528
 529                if (le16_to_cpu(e->item) == item) {
 530                        if (size != NULL)
 531                                *size = le32_to_cpu(e->size) -
 532                                        le16_to_cpu(e->padding_data);
 533
 534                        return cached_entry_to_item(e);
 535                }
 536
 537                e = cached_entry_next(e, cacheline);
 538        }
 539
 540        return ERR_PTR(-ENOENT);
 541
 542invalid_canary:
 543        dev_err(smem->dev, "Found invalid canary in hosts %d:%d partition\n",
 544                        phdr->host0, phdr->host1);
 545
 546        return ERR_PTR(-EINVAL);
 547}
 548
 549/**
 550 * qcom_smem_get() - resolve ptr of size of a smem item
 551 * @host:       the remote processor, or -1
 552 * @item:       smem item handle
 553 * @size:       pointer to be filled out with size of the item
 554 *
 555 * Looks up smem item and returns pointer to it. Size of smem
 556 * item is returned in @size.
 557 */
 558static void *qcom_smem_get(unsigned int host, unsigned int item, size_t *size)
 559{
 560        struct smem_partition_header *phdr;
 561        size_t cacheln;
 562        void *ptr = ERR_PTR(-EPROBE_DEFER);
 563
 564        if (!__smem)
 565                return ptr;
 566
 567        if (WARN_ON(item >= __smem->item_count))
 568                return ERR_PTR(-EINVAL);
 569
 570        if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
 571                phdr = __smem->partitions[host];
 572                cacheln = __smem->cacheline[host];
 573                ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
 574        } else if (__smem->global_partition) {
 575                phdr = __smem->global_partition;
 576                cacheln = __smem->global_cacheline;
 577                ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
 578        } else {
 579                ptr = qcom_smem_get_global(__smem, item, size);
 580        }
 581
 582        return ptr;
 583
 584}
 585
 586/**
 587 * qcom_smem_get_free_space() - retrieve amount of free space in a partition
 588 * @host:       the remote processor identifying a partition, or -1
 589 *
 590 * To be used by smem clients as a quick way to determine if any new
 591 * allocations has been made.
 592 */
 593static int qcom_smem_get_free_space(unsigned int host)
 594{
 595        struct smem_partition_header *phdr;
 596        struct smem_header *header;
 597        unsigned int ret;
 598
 599        if (!__smem)
 600                return -EPROBE_DEFER;
 601
 602        if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
 603                phdr = __smem->partitions[host];
 604                ret = le32_to_cpu(phdr->offset_free_cached) -
 605                      le32_to_cpu(phdr->offset_free_uncached);
 606        } else if (__smem->global_partition) {
 607                phdr = __smem->global_partition;
 608                ret = le32_to_cpu(phdr->offset_free_cached) -
 609                      le32_to_cpu(phdr->offset_free_uncached);
 610        } else {
 611                header = __smem->regions[0].virt_base;
 612                ret = le32_to_cpu(header->available);
 613        }
 614
 615        return ret;
 616}
 617
 618static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
 619{
 620        struct smem_header *header;
 621        __le32 *versions;
 622
 623        header = smem->regions[0].virt_base;
 624        versions = header->version;
 625
 626        return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]);
 627}
 628
 629static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem)
 630{
 631        struct smem_ptable *ptable;
 632        u32 version;
 633
 634        ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
 635        if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
 636                return ERR_PTR(-ENOENT);
 637
 638        version = le32_to_cpu(ptable->version);
 639        if (version != 1) {
 640                dev_err(smem->dev,
 641                        "Unsupported partition header version %d\n", version);
 642                return ERR_PTR(-EINVAL);
 643        }
 644        return ptable;
 645}
 646
 647static u32 qcom_smem_get_item_count(struct qcom_smem *smem)
 648{
 649        struct smem_ptable *ptable;
 650        struct smem_info *info;
 651
 652        ptable = qcom_smem_get_ptable(smem);
 653        if (IS_ERR_OR_NULL(ptable))
 654                return SMEM_ITEM_COUNT;
 655
 656        info = (struct smem_info *)&ptable->entry[ptable->num_entries];
 657        if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic)))
 658                return SMEM_ITEM_COUNT;
 659
 660        return le16_to_cpu(info->num_items);
 661}
 662
 663static int qcom_smem_set_global_partition(struct qcom_smem *smem)
 664{
 665        struct smem_partition_header *header;
 666        struct smem_ptable_entry *entry = NULL;
 667        struct smem_ptable *ptable;
 668        u32 host0, host1, size;
 669        int i;
 670
 671        ptable = qcom_smem_get_ptable(smem);
 672        if (IS_ERR(ptable))
 673                return PTR_ERR(ptable);
 674
 675        for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
 676                entry = &ptable->entry[i];
 677                host0 = le16_to_cpu(entry->host0);
 678                host1 = le16_to_cpu(entry->host1);
 679
 680                if (host0 == SMEM_GLOBAL_HOST && host0 == host1)
 681                        break;
 682        }
 683
 684        if (!entry) {
 685                dev_err(smem->dev, "Missing entry for global partition\n");
 686                return -EINVAL;
 687        }
 688
 689        if (!le32_to_cpu(entry->offset) || !le32_to_cpu(entry->size)) {
 690                dev_err(smem->dev, "Invalid entry for global partition\n");
 691                return -EINVAL;
 692        }
 693
 694        if (smem->global_partition) {
 695                dev_err(smem->dev, "Already found the global partition\n");
 696                return -EINVAL;
 697        }
 698
 699        header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
 700        host0 = le16_to_cpu(header->host0);
 701        host1 = le16_to_cpu(header->host1);
 702
 703        if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) {
 704                dev_err(smem->dev, "Global partition has invalid magic\n");
 705                return -EINVAL;
 706        }
 707
 708        if (host0 != SMEM_GLOBAL_HOST && host1 != SMEM_GLOBAL_HOST) {
 709                dev_err(smem->dev, "Global partition hosts are invalid\n");
 710                return -EINVAL;
 711        }
 712
 713        if (le32_to_cpu(header->size) != le32_to_cpu(entry->size)) {
 714                dev_err(smem->dev, "Global partition has invalid size\n");
 715                return -EINVAL;
 716        }
 717
 718        size = le32_to_cpu(header->offset_free_uncached);
 719        if (size > le32_to_cpu(header->size)) {
 720                dev_err(smem->dev,
 721                        "Global partition has invalid free pointer\n");
 722                return -EINVAL;
 723        }
 724
 725        smem->global_partition = header;
 726        smem->global_cacheline = le32_to_cpu(entry->cacheline);
 727
 728        return 0;
 729}
 730
 731static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
 732                                          unsigned int local_host)
 733{
 734        struct smem_partition_header *header;
 735        struct smem_ptable_entry *entry;
 736        struct smem_ptable *ptable;
 737        unsigned int remote_host;
 738        u32 host0, host1;
 739        int i;
 740
 741        ptable = qcom_smem_get_ptable(smem);
 742        if (IS_ERR(ptable))
 743                return PTR_ERR(ptable);
 744
 745        for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
 746                entry = &ptable->entry[i];
 747                host0 = le16_to_cpu(entry->host0);
 748                host1 = le16_to_cpu(entry->host1);
 749
 750                if (host0 != local_host && host1 != local_host)
 751                        continue;
 752
 753                if (!le32_to_cpu(entry->offset))
 754                        continue;
 755
 756                if (!le32_to_cpu(entry->size))
 757                        continue;
 758
 759                if (host0 == local_host)
 760                        remote_host = host1;
 761                else
 762                        remote_host = host0;
 763
 764                if (remote_host >= SMEM_HOST_COUNT) {
 765                        dev_err(smem->dev,
 766                                "Invalid remote host %d\n",
 767                                remote_host);
 768                        return -EINVAL;
 769                }
 770
 771                if (smem->partitions[remote_host]) {
 772                        dev_err(smem->dev,
 773                                "Already found a partition for host %d\n",
 774                                remote_host);
 775                        return -EINVAL;
 776                }
 777
 778                header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
 779                host0 = le16_to_cpu(header->host0);
 780                host1 = le16_to_cpu(header->host1);
 781
 782                if (memcmp(header->magic, SMEM_PART_MAGIC,
 783                            sizeof(header->magic))) {
 784                        dev_err(smem->dev,
 785                                "Partition %d has invalid magic\n", i);
 786                        return -EINVAL;
 787                }
 788
 789                if (host0 != local_host && host1 != local_host) {
 790                        dev_err(smem->dev,
 791                                "Partition %d hosts are invalid\n", i);
 792                        return -EINVAL;
 793                }
 794
 795                if (host0 != remote_host && host1 != remote_host) {
 796                        dev_err(smem->dev,
 797                                "Partition %d hosts are invalid\n", i);
 798                        return -EINVAL;
 799                }
 800
 801                if (le32_to_cpu(header->size) != le32_to_cpu(entry->size)) {
 802                        dev_err(smem->dev,
 803                                "Partition %d has invalid size\n", i);
 804                        return -EINVAL;
 805                }
 806
 807                if (le32_to_cpu(header->offset_free_uncached) > le32_to_cpu(header->size)) {
 808                        dev_err(smem->dev,
 809                                "Partition %d has invalid free pointer\n", i);
 810                        return -EINVAL;
 811                }
 812
 813                smem->partitions[remote_host] = header;
 814                smem->cacheline[remote_host] = le32_to_cpu(entry->cacheline);
 815        }
 816
 817        return 0;
 818}
 819
 820static int qcom_smem_map_memory(struct qcom_smem *smem, struct udevice *dev,
 821                                const char *name, int i)
 822{
 823        struct fdt_resource r;
 824        int ret;
 825        int node = dev_of_offset(dev);
 826
 827        ret = fdtdec_lookup_phandle(gd->fdt_blob, node, name);
 828        if (ret < 0) {
 829                dev_err(dev, "No %s specified\n", name);
 830                return -EINVAL;
 831        }
 832
 833        ret = fdt_get_resource(gd->fdt_blob, ret, "reg", 0, &r);
 834        if (ret)
 835                return ret;
 836
 837        smem->regions[i].aux_base = (u32)r.start;
 838        smem->regions[i].size = fdt_resource_size(&r);
 839        smem->regions[i].virt_base = devm_ioremap(dev, r.start, fdt_resource_size(&r));
 840        if (!smem->regions[i].virt_base)
 841                return -ENOMEM;
 842
 843        return 0;
 844}
 845
 846static int qcom_smem_probe(struct udevice *dev)
 847{
 848        struct smem_header *header;
 849        struct qcom_smem *smem;
 850        size_t array_size;
 851        int num_regions;
 852        u32 version;
 853        int ret;
 854        int node = dev_of_offset(dev);
 855
 856        num_regions = 1;
 857        if (fdtdec_lookup_phandle(gd->fdt_blob, node, "qcomrpm-msg-ram") >= 0)
 858                num_regions++;
 859
 860        array_size = num_regions * sizeof(struct smem_region);
 861        smem = devm_kzalloc(dev, sizeof(*smem) + array_size, GFP_KERNEL);
 862        if (!smem)
 863                return -ENOMEM;
 864
 865        smem->dev = dev;
 866        smem->num_regions = num_regions;
 867
 868        ret = qcom_smem_map_memory(smem, dev, "memory-region", 0);
 869        if (ret)
 870                return ret;
 871
 872        if (num_regions > 1) {
 873                ret = qcom_smem_map_memory(smem, dev,
 874                                        "qcom,rpm-msg-ram", 1);
 875                if (ret)
 876                        return ret;
 877        }
 878
 879        header = smem->regions[0].virt_base;
 880        if (le32_to_cpu(header->initialized) != 1 ||
 881            le32_to_cpu(header->reserved)) {
 882                dev_err(dev, "SMEM is not initialized by SBL\n");
 883                return -EINVAL;
 884        }
 885
 886        version = qcom_smem_get_sbl_version(smem);
 887        switch (version >> 16) {
 888        case SMEM_GLOBAL_PART_VERSION:
 889                ret = qcom_smem_set_global_partition(smem);
 890                if (ret < 0)
 891                        return ret;
 892                smem->item_count = qcom_smem_get_item_count(smem);
 893                break;
 894        case SMEM_GLOBAL_HEAP_VERSION:
 895                smem->item_count = SMEM_ITEM_COUNT;
 896                break;
 897        default:
 898                dev_err(dev, "Unsupported SMEM version 0x%x\n", version);
 899                return -EINVAL;
 900        }
 901
 902        ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
 903        if (ret < 0 && ret != -ENOENT)
 904                return ret;
 905
 906        __smem = smem;
 907
 908        return 0;
 909}
 910
 911static int qcom_smem_remove(struct udevice *dev)
 912{
 913        __smem = NULL;
 914
 915        return 0;
 916}
 917
 918const struct udevice_id qcom_smem_of_match[] = {
 919        { .compatible = "qcom,smem" },
 920        { }
 921};
 922
 923static const struct smem_ops msm_smem_ops = {
 924        .alloc = qcom_smem_alloc,
 925        .get = qcom_smem_get,
 926        .get_free_space = qcom_smem_get_free_space,
 927};
 928
 929U_BOOT_DRIVER(qcom_smem) = {
 930        .name   = "qcom_smem",
 931        .id     = UCLASS_SMEM,
 932        .of_match = qcom_smem_of_match,
 933        .ops = &msm_smem_ops,
 934        .probe = qcom_smem_probe,
 935        .remove = qcom_smem_remove,
 936};
 937