linux/drivers/soc/qcom/smem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2015, Sony Mobile Communications AB.
   4 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
   5 */
   6
   7#include <linux/hwspinlock.h>
   8#include <linux/io.h>
   9#include <linux/module.h>
  10#include <linux/of.h>
  11#include <linux/of_address.h>
  12#include <linux/platform_device.h>
  13#include <linux/sizes.h>
  14#include <linux/slab.h>
  15#include <linux/soc/qcom/smem.h>
  16
  17/*
  18 * The Qualcomm shared memory system is a allocate only heap structure that
  19 * consists of one of more memory areas that can be accessed by the processors
  20 * in the SoC.
  21 *
  22 * All systems contains a global heap, accessible by all processors in the SoC,
  23 * with a table of contents data structure (@smem_header) at the beginning of
  24 * the main shared memory block.
  25 *
  26 * The global header contains meta data for allocations as well as a fixed list
  27 * of 512 entries (@smem_global_entry) that can be initialized to reference
  28 * parts of the shared memory space.
  29 *
  30 *
  31 * In addition to this global heap a set of "private" heaps can be set up at
  32 * boot time with access restrictions so that only certain processor pairs can
  33 * access the data.
  34 *
  35 * These partitions are referenced from an optional partition table
  36 * (@smem_ptable), that is found 4kB from the end of the main smem region. The
  37 * partition table entries (@smem_ptable_entry) lists the involved processors
  38 * (or hosts) and their location in the main shared memory region.
  39 *
  40 * Each partition starts with a header (@smem_partition_header) that identifies
  41 * the partition and holds properties for the two internal memory regions. The
  42 * two regions are cached and non-cached memory respectively. Each region
  43 * contain a link list of allocation headers (@smem_private_entry) followed by
  44 * their data.
  45 *
  46 * Items in the non-cached region are allocated from the start of the partition
  47 * while items in the cached region are allocated from the end. The free area
  48 * is hence the region between the cached and non-cached offsets. The header of
  49 * cached items comes after the data.
  50 *
  51 * Version 12 (SMEM_GLOBAL_PART_VERSION) changes the item alloc/get procedure
  52 * for the global heap. A new global partition is created from the global heap
  53 * region with partition type (SMEM_GLOBAL_HOST) and the max smem item count is
  54 * set by the bootloader.
  55 *
  56 * To synchronize allocations in the shared memory heaps a remote spinlock must
  57 * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
  58 * platforms.
  59 *
  60 */
  61
  62/*
  63 * The version member of the smem header contains an array of versions for the
  64 * various software components in the SoC. We verify that the boot loader
  65 * version is a valid version as a sanity check.
  66 */
  67#define SMEM_MASTER_SBL_VERSION_INDEX   7
  68#define SMEM_GLOBAL_HEAP_VERSION        11
  69#define SMEM_GLOBAL_PART_VERSION        12
  70
  71/*
  72 * The first 8 items are only to be allocated by the boot loader while
  73 * initializing the heap.
  74 */
  75#define SMEM_ITEM_LAST_FIXED    8
  76
  77/* Highest accepted item number, for both global and private heaps */
  78#define SMEM_ITEM_COUNT         512
  79
  80/* Processor/host identifier for the application processor */
  81#define SMEM_HOST_APPS          0
  82
  83/* Processor/host identifier for the global partition */
  84#define SMEM_GLOBAL_HOST        0xfffe
  85
  86/* Max number of processors/hosts in a system */
  87#define SMEM_HOST_COUNT         14
  88
  89/**
  90  * struct smem_proc_comm - proc_comm communication struct (legacy)
  91  * @command:   current command to be executed
  92  * @status:    status of the currently requested command
  93  * @params:    parameters to the command
  94  */
  95struct smem_proc_comm {
  96        __le32 command;
  97        __le32 status;
  98        __le32 params[2];
  99};
 100
 101/**
 102 * struct smem_global_entry - entry to reference smem items on the heap
 103 * @allocated:  boolean to indicate if this entry is used
 104 * @offset:     offset to the allocated space
 105 * @size:       size of the allocated space, 8 byte aligned
 106 * @aux_base:   base address for the memory region used by this unit, or 0 for
 107 *              the default region. bits 0,1 are reserved
 108 */
 109struct smem_global_entry {
 110        __le32 allocated;
 111        __le32 offset;
 112        __le32 size;
 113        __le32 aux_base; /* bits 1:0 reserved */
 114};
 115#define AUX_BASE_MASK           0xfffffffc
 116
 117/**
 118 * struct smem_header - header found in beginning of primary smem region
 119 * @proc_comm:          proc_comm communication interface (legacy)
 120 * @version:            array of versions for the various subsystems
 121 * @initialized:        boolean to indicate that smem is initialized
 122 * @free_offset:        index of the first unallocated byte in smem
 123 * @available:          number of bytes available for allocation
 124 * @reserved:           reserved field, must be 0
 125 * @toc:                array of references to items
 126 */
 127struct smem_header {
 128        struct smem_proc_comm proc_comm[4];
 129        __le32 version[32];
 130        __le32 initialized;
 131        __le32 free_offset;
 132        __le32 available;
 133        __le32 reserved;
 134        struct smem_global_entry toc[SMEM_ITEM_COUNT];
 135};
 136
 137/**
 138 * struct smem_ptable_entry - one entry in the @smem_ptable list
 139 * @offset:     offset, within the main shared memory region, of the partition
 140 * @size:       size of the partition
 141 * @flags:      flags for the partition (currently unused)
 142 * @host0:      first processor/host with access to this partition
 143 * @host1:      second processor/host with access to this partition
 144 * @cacheline:  alignment for "cached" entries
 145 * @reserved:   reserved entries for later use
 146 */
 147struct smem_ptable_entry {
 148        __le32 offset;
 149        __le32 size;
 150        __le32 flags;
 151        __le16 host0;
 152        __le16 host1;
 153        __le32 cacheline;
 154        __le32 reserved[7];
 155};
 156
 157/**
 158 * struct smem_ptable - partition table for the private partitions
 159 * @magic:      magic number, must be SMEM_PTABLE_MAGIC
 160 * @version:    version of the partition table
 161 * @num_entries: number of partitions in the table
 162 * @reserved:   for now reserved entries
 163 * @entry:      list of @smem_ptable_entry for the @num_entries partitions
 164 */
 165struct smem_ptable {
 166        u8 magic[4];
 167        __le32 version;
 168        __le32 num_entries;
 169        __le32 reserved[5];
 170        struct smem_ptable_entry entry[];
 171};
 172
 173static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */
 174
 175/**
 176 * struct smem_partition_header - header of the partitions
 177 * @magic:      magic number, must be SMEM_PART_MAGIC
 178 * @host0:      first processor/host with access to this partition
 179 * @host1:      second processor/host with access to this partition
 180 * @size:       size of the partition
 181 * @offset_free_uncached: offset to the first free byte of uncached memory in
 182 *              this partition
 183 * @offset_free_cached: offset to the first free byte of cached memory in this
 184 *              partition
 185 * @reserved:   for now reserved entries
 186 */
 187struct smem_partition_header {
 188        u8 magic[4];
 189        __le16 host0;
 190        __le16 host1;
 191        __le32 size;
 192        __le32 offset_free_uncached;
 193        __le32 offset_free_cached;
 194        __le32 reserved[3];
 195};
 196
 197static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };
 198
 199/**
 200 * struct smem_private_entry - header of each item in the private partition
 201 * @canary:     magic number, must be SMEM_PRIVATE_CANARY
 202 * @item:       identifying number of the smem item
 203 * @size:       size of the data, including padding bytes
 204 * @padding_data: number of bytes of padding of data
 205 * @padding_hdr: number of bytes of padding between the header and the data
 206 * @reserved:   for now reserved entry
 207 */
 208struct smem_private_entry {
 209        u16 canary; /* bytes are the same so no swapping needed */
 210        __le16 item;
 211        __le32 size; /* includes padding bytes */
 212        __le16 padding_data;
 213        __le16 padding_hdr;
 214        __le32 reserved;
 215};
 216#define SMEM_PRIVATE_CANARY     0xa5a5
 217
 218/**
 219 * struct smem_info - smem region info located after the table of contents
 220 * @magic:      magic number, must be SMEM_INFO_MAGIC
 221 * @size:       size of the smem region
 222 * @base_addr:  base address of the smem region
 223 * @reserved:   for now reserved entry
 224 * @num_items:  highest accepted item number
 225 */
 226struct smem_info {
 227        u8 magic[4];
 228        __le32 size;
 229        __le32 base_addr;
 230        __le32 reserved;
 231        __le16 num_items;
 232};
 233
 234static const u8 SMEM_INFO_MAGIC[] = { 0x53, 0x49, 0x49, 0x49 }; /* SIII */
 235
 236/**
 237 * struct smem_region - representation of a chunk of memory used for smem
 238 * @aux_base:   identifier of aux_mem base
 239 * @virt_base:  virtual base address of memory with this aux_mem identifier
 240 * @size:       size of the memory region
 241 */
 242struct smem_region {
 243        u32 aux_base;
 244        void __iomem *virt_base;
 245        size_t size;
 246};
 247
 248/**
 249 * struct qcom_smem - device data for the smem device
 250 * @dev:        device pointer
 251 * @hwlock:     reference to a hwspinlock
 252 * @global_partition:   pointer to global partition when in use
 253 * @global_cacheline:   cacheline size for global partition
 254 * @partitions: list of pointers to partitions affecting the current
 255 *              processor/host
 256 * @cacheline:  list of cacheline sizes for each host
 257 * @item_count: max accepted item number
 258 * @socinfo:    platform device pointer
 259 * @num_regions: number of @regions
 260 * @regions:    list of the memory regions defining the shared memory
 261 */
 262struct qcom_smem {
 263        struct device *dev;
 264
 265        struct hwspinlock *hwlock;
 266
 267        struct smem_partition_header *global_partition;
 268        size_t global_cacheline;
 269        struct smem_partition_header *partitions[SMEM_HOST_COUNT];
 270        size_t cacheline[SMEM_HOST_COUNT];
 271        u32 item_count;
 272        struct platform_device *socinfo;
 273
 274        unsigned num_regions;
 275        struct smem_region regions[];
 276};
 277
 278static void *
 279phdr_to_last_uncached_entry(struct smem_partition_header *phdr)
 280{
 281        void *p = phdr;
 282
 283        return p + le32_to_cpu(phdr->offset_free_uncached);
 284}
 285
 286static struct smem_private_entry *
 287phdr_to_first_cached_entry(struct smem_partition_header *phdr,
 288                                        size_t cacheline)
 289{
 290        void *p = phdr;
 291        struct smem_private_entry *e;
 292
 293        return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*e), cacheline);
 294}
 295
 296static void *
 297phdr_to_last_cached_entry(struct smem_partition_header *phdr)
 298{
 299        void *p = phdr;
 300
 301        return p + le32_to_cpu(phdr->offset_free_cached);
 302}
 303
 304static struct smem_private_entry *
 305phdr_to_first_uncached_entry(struct smem_partition_header *phdr)
 306{
 307        void *p = phdr;
 308
 309        return p + sizeof(*phdr);
 310}
 311
 312static struct smem_private_entry *
 313uncached_entry_next(struct smem_private_entry *e)
 314{
 315        void *p = e;
 316
 317        return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) +
 318               le32_to_cpu(e->size);
 319}
 320
 321static struct smem_private_entry *
 322cached_entry_next(struct smem_private_entry *e, size_t cacheline)
 323{
 324        void *p = e;
 325
 326        return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline);
 327}
 328
 329static void *uncached_entry_to_item(struct smem_private_entry *e)
 330{
 331        void *p = e;
 332
 333        return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
 334}
 335
 336static void *cached_entry_to_item(struct smem_private_entry *e)
 337{
 338        void *p = e;
 339
 340        return p - le32_to_cpu(e->size);
 341}
 342
 343/* Pointer to the one and only smem handle */
 344static struct qcom_smem *__smem;
 345
 346/* Timeout (ms) for the trylock of remote spinlocks */
 347#define HWSPINLOCK_TIMEOUT      1000
 348
 349static int qcom_smem_alloc_private(struct qcom_smem *smem,
 350                                   struct smem_partition_header *phdr,
 351                                   unsigned item,
 352                                   size_t size)
 353{
 354        struct smem_private_entry *hdr, *end;
 355        size_t alloc_size;
 356        void *cached;
 357
 358        hdr = phdr_to_first_uncached_entry(phdr);
 359        end = phdr_to_last_uncached_entry(phdr);
 360        cached = phdr_to_last_cached_entry(phdr);
 361
 362        while (hdr < end) {
 363                if (hdr->canary != SMEM_PRIVATE_CANARY)
 364                        goto bad_canary;
 365                if (le16_to_cpu(hdr->item) == item)
 366                        return -EEXIST;
 367
 368                hdr = uncached_entry_next(hdr);
 369        }
 370
 371        /* Check that we don't grow into the cached region */
 372        alloc_size = sizeof(*hdr) + ALIGN(size, 8);
 373        if ((void *)hdr + alloc_size > cached) {
 374                dev_err(smem->dev, "Out of memory\n");
 375                return -ENOSPC;
 376        }
 377
 378        hdr->canary = SMEM_PRIVATE_CANARY;
 379        hdr->item = cpu_to_le16(item);
 380        hdr->size = cpu_to_le32(ALIGN(size, 8));
 381        hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size);
 382        hdr->padding_hdr = 0;
 383
 384        /*
 385         * Ensure the header is written before we advance the free offset, so
 386         * that remote processors that does not take the remote spinlock still
 387         * gets a consistent view of the linked list.
 388         */
 389        wmb();
 390        le32_add_cpu(&phdr->offset_free_uncached, alloc_size);
 391
 392        return 0;
 393bad_canary:
 394        dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n",
 395                le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1));
 396
 397        return -EINVAL;
 398}
 399
 400static int qcom_smem_alloc_global(struct qcom_smem *smem,
 401                                  unsigned item,
 402                                  size_t size)
 403{
 404        struct smem_global_entry *entry;
 405        struct smem_header *header;
 406
 407        header = smem->regions[0].virt_base;
 408        entry = &header->toc[item];
 409        if (entry->allocated)
 410                return -EEXIST;
 411
 412        size = ALIGN(size, 8);
 413        if (WARN_ON(size > le32_to_cpu(header->available)))
 414                return -ENOMEM;
 415
 416        entry->offset = header->free_offset;
 417        entry->size = cpu_to_le32(size);
 418
 419        /*
 420         * Ensure the header is consistent before we mark the item allocated,
 421         * so that remote processors will get a consistent view of the item
 422         * even though they do not take the spinlock on read.
 423         */
 424        wmb();
 425        entry->allocated = cpu_to_le32(1);
 426
 427        le32_add_cpu(&header->free_offset, size);
 428        le32_add_cpu(&header->available, -size);
 429
 430        return 0;
 431}
 432
 433/**
 434 * qcom_smem_alloc() - allocate space for a smem item
 435 * @host:       remote processor id, or -1
 436 * @item:       smem item handle
 437 * @size:       number of bytes to be allocated
 438 *
 439 * Allocate space for a given smem item of size @size, given that the item is
 440 * not yet allocated.
 441 */
 442int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
 443{
 444        struct smem_partition_header *phdr;
 445        unsigned long flags;
 446        int ret;
 447
 448        if (!__smem)
 449                return -EPROBE_DEFER;
 450
 451        if (item < SMEM_ITEM_LAST_FIXED) {
 452                dev_err(__smem->dev,
 453                        "Rejecting allocation of static entry %d\n", item);
 454                return -EINVAL;
 455        }
 456
 457        if (WARN_ON(item >= __smem->item_count))
 458                return -EINVAL;
 459
 460        ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
 461                                          HWSPINLOCK_TIMEOUT,
 462                                          &flags);
 463        if (ret)
 464                return ret;
 465
 466        if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
 467                phdr = __smem->partitions[host];
 468                ret = qcom_smem_alloc_private(__smem, phdr, item, size);
 469        } else if (__smem->global_partition) {
 470                phdr = __smem->global_partition;
 471                ret = qcom_smem_alloc_private(__smem, phdr, item, size);
 472        } else {
 473                ret = qcom_smem_alloc_global(__smem, item, size);
 474        }
 475
 476        hwspin_unlock_irqrestore(__smem->hwlock, &flags);
 477
 478        return ret;
 479}
 480EXPORT_SYMBOL(qcom_smem_alloc);
 481
 482static void *qcom_smem_get_global(struct qcom_smem *smem,
 483                                  unsigned item,
 484                                  size_t *size)
 485{
 486        struct smem_header *header;
 487        struct smem_region *region;
 488        struct smem_global_entry *entry;
 489        u32 aux_base;
 490        unsigned i;
 491
 492        header = smem->regions[0].virt_base;
 493        entry = &header->toc[item];
 494        if (!entry->allocated)
 495                return ERR_PTR(-ENXIO);
 496
 497        aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK;
 498
 499        for (i = 0; i < smem->num_regions; i++) {
 500                region = &smem->regions[i];
 501
 502                if (region->aux_base == aux_base || !aux_base) {
 503                        if (size != NULL)
 504                                *size = le32_to_cpu(entry->size);
 505                        return region->virt_base + le32_to_cpu(entry->offset);
 506                }
 507        }
 508
 509        return ERR_PTR(-ENOENT);
 510}
 511
 512static void *qcom_smem_get_private(struct qcom_smem *smem,
 513                                   struct smem_partition_header *phdr,
 514                                   size_t cacheline,
 515                                   unsigned item,
 516                                   size_t *size)
 517{
 518        struct smem_private_entry *e, *end;
 519
 520        e = phdr_to_first_uncached_entry(phdr);
 521        end = phdr_to_last_uncached_entry(phdr);
 522
 523        while (e < end) {
 524                if (e->canary != SMEM_PRIVATE_CANARY)
 525                        goto invalid_canary;
 526
 527                if (le16_to_cpu(e->item) == item) {
 528                        if (size != NULL)
 529                                *size = le32_to_cpu(e->size) -
 530                                        le16_to_cpu(e->padding_data);
 531
 532                        return uncached_entry_to_item(e);
 533                }
 534
 535                e = uncached_entry_next(e);
 536        }
 537
 538        /* Item was not found in the uncached list, search the cached list */
 539
 540        e = phdr_to_first_cached_entry(phdr, cacheline);
 541        end = phdr_to_last_cached_entry(phdr);
 542
 543        while (e > end) {
 544                if (e->canary != SMEM_PRIVATE_CANARY)
 545                        goto invalid_canary;
 546
 547                if (le16_to_cpu(e->item) == item) {
 548                        if (size != NULL)
 549                                *size = le32_to_cpu(e->size) -
 550                                        le16_to_cpu(e->padding_data);
 551
 552                        return cached_entry_to_item(e);
 553                }
 554
 555                e = cached_entry_next(e, cacheline);
 556        }
 557
 558        return ERR_PTR(-ENOENT);
 559
 560invalid_canary:
 561        dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n",
 562                        le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1));
 563
 564        return ERR_PTR(-EINVAL);
 565}
 566
 567/**
 568 * qcom_smem_get() - resolve ptr of size of a smem item
 569 * @host:       the remote processor, or -1
 570 * @item:       smem item handle
 571 * @size:       pointer to be filled out with size of the item
 572 *
 573 * Looks up smem item and returns pointer to it. Size of smem
 574 * item is returned in @size.
 575 */
 576void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
 577{
 578        struct smem_partition_header *phdr;
 579        unsigned long flags;
 580        size_t cacheln;
 581        int ret;
 582        void *ptr = ERR_PTR(-EPROBE_DEFER);
 583
 584        if (!__smem)
 585                return ptr;
 586
 587        if (WARN_ON(item >= __smem->item_count))
 588                return ERR_PTR(-EINVAL);
 589
 590        ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
 591                                          HWSPINLOCK_TIMEOUT,
 592                                          &flags);
 593        if (ret)
 594                return ERR_PTR(ret);
 595
 596        if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
 597                phdr = __smem->partitions[host];
 598                cacheln = __smem->cacheline[host];
 599                ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
 600        } else if (__smem->global_partition) {
 601                phdr = __smem->global_partition;
 602                cacheln = __smem->global_cacheline;
 603                ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
 604        } else {
 605                ptr = qcom_smem_get_global(__smem, item, size);
 606        }
 607
 608        hwspin_unlock_irqrestore(__smem->hwlock, &flags);
 609
 610        return ptr;
 611
 612}
 613EXPORT_SYMBOL(qcom_smem_get);
 614
 615/**
 616 * qcom_smem_get_free_space() - retrieve amount of free space in a partition
 617 * @host:       the remote processor identifying a partition, or -1
 618 *
 619 * To be used by smem clients as a quick way to determine if any new
 620 * allocations has been made.
 621 */
 622int qcom_smem_get_free_space(unsigned host)
 623{
 624        struct smem_partition_header *phdr;
 625        struct smem_header *header;
 626        unsigned ret;
 627
 628        if (!__smem)
 629                return -EPROBE_DEFER;
 630
 631        if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
 632                phdr = __smem->partitions[host];
 633                ret = le32_to_cpu(phdr->offset_free_cached) -
 634                      le32_to_cpu(phdr->offset_free_uncached);
 635        } else if (__smem->global_partition) {
 636                phdr = __smem->global_partition;
 637                ret = le32_to_cpu(phdr->offset_free_cached) -
 638                      le32_to_cpu(phdr->offset_free_uncached);
 639        } else {
 640                header = __smem->regions[0].virt_base;
 641                ret = le32_to_cpu(header->available);
 642        }
 643
 644        return ret;
 645}
 646EXPORT_SYMBOL(qcom_smem_get_free_space);
 647
 648/**
 649 * qcom_smem_virt_to_phys() - return the physical address associated
 650 * with an smem item pointer (previously returned by qcom_smem_get()
 651 * @p:  the virtual address to convert
 652 *
 653 * Returns 0 if the pointer provided is not within any smem region.
 654 */
 655phys_addr_t qcom_smem_virt_to_phys(void *p)
 656{
 657        unsigned i;
 658
 659        for (i = 0; i < __smem->num_regions; i++) {
 660                struct smem_region *region = &__smem->regions[i];
 661
 662                if (p < region->virt_base)
 663                        continue;
 664                if (p < region->virt_base + region->size) {
 665                        u64 offset = p - region->virt_base;
 666
 667                        return (phys_addr_t)region->aux_base + offset;
 668                }
 669        }
 670
 671        return 0;
 672}
 673EXPORT_SYMBOL(qcom_smem_virt_to_phys);
 674
 675static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
 676{
 677        struct smem_header *header;
 678        __le32 *versions;
 679
 680        header = smem->regions[0].virt_base;
 681        versions = header->version;
 682
 683        return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]);
 684}
 685
 686static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem)
 687{
 688        struct smem_ptable *ptable;
 689        u32 version;
 690
 691        ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
 692        if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
 693                return ERR_PTR(-ENOENT);
 694
 695        version = le32_to_cpu(ptable->version);
 696        if (version != 1) {
 697                dev_err(smem->dev,
 698                        "Unsupported partition header version %d\n", version);
 699                return ERR_PTR(-EINVAL);
 700        }
 701        return ptable;
 702}
 703
 704static u32 qcom_smem_get_item_count(struct qcom_smem *smem)
 705{
 706        struct smem_ptable *ptable;
 707        struct smem_info *info;
 708
 709        ptable = qcom_smem_get_ptable(smem);
 710        if (IS_ERR_OR_NULL(ptable))
 711                return SMEM_ITEM_COUNT;
 712
 713        info = (struct smem_info *)&ptable->entry[ptable->num_entries];
 714        if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic)))
 715                return SMEM_ITEM_COUNT;
 716
 717        return le16_to_cpu(info->num_items);
 718}
 719
 720/*
 721 * Validate the partition header for a partition whose partition
 722 * table entry is supplied.  Returns a pointer to its header if
 723 * valid, or a null pointer otherwise.
 724 */
 725static struct smem_partition_header *
 726qcom_smem_partition_header(struct qcom_smem *smem,
 727                struct smem_ptable_entry *entry, u16 host0, u16 host1)
 728{
 729        struct smem_partition_header *header;
 730        u32 size;
 731
 732        header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
 733
 734        if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) {
 735                dev_err(smem->dev, "bad partition magic %4ph\n", header->magic);
 736                return NULL;
 737        }
 738
 739        if (host0 != le16_to_cpu(header->host0)) {
 740                dev_err(smem->dev, "bad host0 (%hu != %hu)\n",
 741                                host0, le16_to_cpu(header->host0));
 742                return NULL;
 743        }
 744        if (host1 != le16_to_cpu(header->host1)) {
 745                dev_err(smem->dev, "bad host1 (%hu != %hu)\n",
 746                                host1, le16_to_cpu(header->host1));
 747                return NULL;
 748        }
 749
 750        size = le32_to_cpu(header->size);
 751        if (size != le32_to_cpu(entry->size)) {
 752                dev_err(smem->dev, "bad partition size (%u != %u)\n",
 753                        size, le32_to_cpu(entry->size));
 754                return NULL;
 755        }
 756
 757        if (le32_to_cpu(header->offset_free_uncached) > size) {
 758                dev_err(smem->dev, "bad partition free uncached (%u > %u)\n",
 759                        le32_to_cpu(header->offset_free_uncached), size);
 760                return NULL;
 761        }
 762
 763        return header;
 764}
 765
 766static int qcom_smem_set_global_partition(struct qcom_smem *smem)
 767{
 768        struct smem_partition_header *header;
 769        struct smem_ptable_entry *entry;
 770        struct smem_ptable *ptable;
 771        bool found = false;
 772        int i;
 773
 774        if (smem->global_partition) {
 775                dev_err(smem->dev, "Already found the global partition\n");
 776                return -EINVAL;
 777        }
 778
 779        ptable = qcom_smem_get_ptable(smem);
 780        if (IS_ERR(ptable))
 781                return PTR_ERR(ptable);
 782
 783        for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
 784                entry = &ptable->entry[i];
 785                if (!le32_to_cpu(entry->offset))
 786                        continue;
 787                if (!le32_to_cpu(entry->size))
 788                        continue;
 789
 790                if (le16_to_cpu(entry->host0) != SMEM_GLOBAL_HOST)
 791                        continue;
 792
 793                if (le16_to_cpu(entry->host1) == SMEM_GLOBAL_HOST) {
 794                        found = true;
 795                        break;
 796                }
 797        }
 798
 799        if (!found) {
 800                dev_err(smem->dev, "Missing entry for global partition\n");
 801                return -EINVAL;
 802        }
 803
 804        header = qcom_smem_partition_header(smem, entry,
 805                                SMEM_GLOBAL_HOST, SMEM_GLOBAL_HOST);
 806        if (!header)
 807                return -EINVAL;
 808
 809        smem->global_partition = header;
 810        smem->global_cacheline = le32_to_cpu(entry->cacheline);
 811
 812        return 0;
 813}
 814
 815static int
 816qcom_smem_enumerate_partitions(struct qcom_smem *smem, u16 local_host)
 817{
 818        struct smem_partition_header *header;
 819        struct smem_ptable_entry *entry;
 820        struct smem_ptable *ptable;
 821        unsigned int remote_host;
 822        u16 host0, host1;
 823        int i;
 824
 825        ptable = qcom_smem_get_ptable(smem);
 826        if (IS_ERR(ptable))
 827                return PTR_ERR(ptable);
 828
 829        for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
 830                entry = &ptable->entry[i];
 831                if (!le32_to_cpu(entry->offset))
 832                        continue;
 833                if (!le32_to_cpu(entry->size))
 834                        continue;
 835
 836                host0 = le16_to_cpu(entry->host0);
 837                host1 = le16_to_cpu(entry->host1);
 838                if (host0 == local_host)
 839                        remote_host = host1;
 840                else if (host1 == local_host)
 841                        remote_host = host0;
 842                else
 843                        continue;
 844
 845                if (remote_host >= SMEM_HOST_COUNT) {
 846                        dev_err(smem->dev, "bad host %hu\n", remote_host);
 847                        return -EINVAL;
 848                }
 849
 850                if (smem->partitions[remote_host]) {
 851                        dev_err(smem->dev, "duplicate host %hu\n", remote_host);
 852                        return -EINVAL;
 853                }
 854
 855                header = qcom_smem_partition_header(smem, entry, host0, host1);
 856                if (!header)
 857                        return -EINVAL;
 858
 859                smem->partitions[remote_host] = header;
 860                smem->cacheline[remote_host] = le32_to_cpu(entry->cacheline);
 861        }
 862
 863        return 0;
 864}
 865
 866static int qcom_smem_map_memory(struct qcom_smem *smem, struct device *dev,
 867                                const char *name, int i)
 868{
 869        struct device_node *np;
 870        struct resource r;
 871        resource_size_t size;
 872        int ret;
 873
 874        np = of_parse_phandle(dev->of_node, name, 0);
 875        if (!np) {
 876                dev_err(dev, "No %s specified\n", name);
 877                return -EINVAL;
 878        }
 879
 880        ret = of_address_to_resource(np, 0, &r);
 881        of_node_put(np);
 882        if (ret)
 883                return ret;
 884        size = resource_size(&r);
 885
 886        smem->regions[i].virt_base = devm_ioremap_wc(dev, r.start, size);
 887        if (!smem->regions[i].virt_base)
 888                return -ENOMEM;
 889        smem->regions[i].aux_base = (u32)r.start;
 890        smem->regions[i].size = size;
 891
 892        return 0;
 893}
 894
 895static int qcom_smem_probe(struct platform_device *pdev)
 896{
 897        struct smem_header *header;
 898        struct qcom_smem *smem;
 899        size_t array_size;
 900        int num_regions;
 901        int hwlock_id;
 902        u32 version;
 903        int ret;
 904
 905        num_regions = 1;
 906        if (of_find_property(pdev->dev.of_node, "qcom,rpm-msg-ram", NULL))
 907                num_regions++;
 908
 909        array_size = num_regions * sizeof(struct smem_region);
 910        smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL);
 911        if (!smem)
 912                return -ENOMEM;
 913
 914        smem->dev = &pdev->dev;
 915        smem->num_regions = num_regions;
 916
 917        ret = qcom_smem_map_memory(smem, &pdev->dev, "memory-region", 0);
 918        if (ret)
 919                return ret;
 920
 921        if (num_regions > 1 && (ret = qcom_smem_map_memory(smem, &pdev->dev,
 922                                        "qcom,rpm-msg-ram", 1)))
 923                return ret;
 924
 925        header = smem->regions[0].virt_base;
 926        if (le32_to_cpu(header->initialized) != 1 ||
 927            le32_to_cpu(header->reserved)) {
 928                dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
 929                return -EINVAL;
 930        }
 931
 932        version = qcom_smem_get_sbl_version(smem);
 933        switch (version >> 16) {
 934        case SMEM_GLOBAL_PART_VERSION:
 935                ret = qcom_smem_set_global_partition(smem);
 936                if (ret < 0)
 937                        return ret;
 938                smem->item_count = qcom_smem_get_item_count(smem);
 939                break;
 940        case SMEM_GLOBAL_HEAP_VERSION:
 941                smem->item_count = SMEM_ITEM_COUNT;
 942                break;
 943        default:
 944                dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version);
 945                return -EINVAL;
 946        }
 947
 948        BUILD_BUG_ON(SMEM_HOST_APPS >= SMEM_HOST_COUNT);
 949        ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
 950        if (ret < 0 && ret != -ENOENT)
 951                return ret;
 952
 953        hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
 954        if (hwlock_id < 0) {
 955                if (hwlock_id != -EPROBE_DEFER)
 956                        dev_err(&pdev->dev, "failed to retrieve hwlock\n");
 957                return hwlock_id;
 958        }
 959
 960        smem->hwlock = hwspin_lock_request_specific(hwlock_id);
 961        if (!smem->hwlock)
 962                return -ENXIO;
 963
 964        __smem = smem;
 965
 966        smem->socinfo = platform_device_register_data(&pdev->dev, "qcom-socinfo",
 967                                                      PLATFORM_DEVID_NONE, NULL,
 968                                                      0);
 969        if (IS_ERR(smem->socinfo))
 970                dev_dbg(&pdev->dev, "failed to register socinfo device\n");
 971
 972        return 0;
 973}
 974
 975static int qcom_smem_remove(struct platform_device *pdev)
 976{
 977        platform_device_unregister(__smem->socinfo);
 978
 979        hwspin_lock_free(__smem->hwlock);
 980        __smem = NULL;
 981
 982        return 0;
 983}
 984
 985static const struct of_device_id qcom_smem_of_match[] = {
 986        { .compatible = "qcom,smem" },
 987        {}
 988};
 989MODULE_DEVICE_TABLE(of, qcom_smem_of_match);
 990
 991static struct platform_driver qcom_smem_driver = {
 992        .probe = qcom_smem_probe,
 993        .remove = qcom_smem_remove,
 994        .driver  = {
 995                .name = "qcom-smem",
 996                .of_match_table = qcom_smem_of_match,
 997                .suppress_bind_attrs = true,
 998        },
 999};
1000
1001static int __init qcom_smem_init(void)
1002{
1003        return platform_driver_register(&qcom_smem_driver);
1004}
1005arch_initcall(qcom_smem_init);
1006
1007static void __exit qcom_smem_exit(void)
1008{
1009        platform_driver_unregister(&qcom_smem_driver);
1010}
1011module_exit(qcom_smem_exit)
1012
1013MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
1014MODULE_DESCRIPTION("Qualcomm Shared Memory Manager");
1015MODULE_LICENSE("GPL v2");
1016