linux/arch/sparc/kernel/adi_64.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* adi_64.c: support for ADI (Application Data Integrity) feature on
   3 * sparc m7 and newer processors. This feature is also known as
   4 * SSM (Silicon Secured Memory).
   5 *
   6 * Copyright (C) 2016 Oracle and/or its affiliates. All rights reserved.
   7 * Author: Khalid Aziz (khalid.aziz@oracle.com)
   8 */
   9#include <linux/init.h>
  10#include <linux/slab.h>
  11#include <linux/mm_types.h>
  12#include <asm/mdesc.h>
  13#include <asm/adi_64.h>
  14#include <asm/mmu_64.h>
  15#include <asm/pgtable_64.h>
  16
  17/* Each page of storage for ADI tags can accommodate tags for 128
  18 * pages. When ADI enabled pages are being swapped out, it would be
  19 * prudent to allocate at least enough tag storage space to accommodate
  20 * SWAPFILE_CLUSTER number of pages. Allocate enough tag storage to
  21 * store tags for four SWAPFILE_CLUSTER pages to reduce need for
  22 * further allocations for same vma.
  23 */
  24#define TAG_STORAGE_PAGES       8
  25
  26struct adi_config adi_state;
  27EXPORT_SYMBOL(adi_state);
  28
  29/* mdesc_adi_init() : Parse machine description provided by the
  30 *      hypervisor to detect ADI capabilities
  31 *
  32 * Hypervisor reports ADI capabilities of platform in "hwcap-list" property
  33 * for "cpu" node. If the platform supports ADI, "hwcap-list" property
  34 * contains the keyword "adp". If the platform supports ADI, "platform"
  35 * node will contain "adp-blksz", "adp-nbits" and "ue-on-adp" properties
  36 * to describe the ADI capabilities.
  37 */
  38void __init mdesc_adi_init(void)
  39{
  40        struct mdesc_handle *hp = mdesc_grab();
  41        const char *prop;
  42        u64 pn, *val;
  43        int len;
  44
  45        if (!hp)
  46                goto adi_not_found;
  47
  48        pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "cpu");
  49        if (pn == MDESC_NODE_NULL)
  50                goto adi_not_found;
  51
  52        prop = mdesc_get_property(hp, pn, "hwcap-list", &len);
  53        if (!prop)
  54                goto adi_not_found;
  55
  56        /*
  57         * Look for "adp" keyword in hwcap-list which would indicate
  58         * ADI support
  59         */
  60        adi_state.enabled = false;
  61        while (len) {
  62                int plen;
  63
  64                if (!strcmp(prop, "adp")) {
  65                        adi_state.enabled = true;
  66                        break;
  67                }
  68
  69                plen = strlen(prop) + 1;
  70                prop += plen;
  71                len -= plen;
  72        }
  73
  74        if (!adi_state.enabled)
  75                goto adi_not_found;
  76
  77        /* Find the ADI properties in "platform" node. If all ADI
  78         * properties are not found, ADI support is incomplete and
  79         * do not enable ADI in the kernel.
  80         */
  81        pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform");
  82        if (pn == MDESC_NODE_NULL)
  83                goto adi_not_found;
  84
  85        val = (u64 *) mdesc_get_property(hp, pn, "adp-blksz", &len);
  86        if (!val)
  87                goto adi_not_found;
  88        adi_state.caps.blksz = *val;
  89
  90        val = (u64 *) mdesc_get_property(hp, pn, "adp-nbits", &len);
  91        if (!val)
  92                goto adi_not_found;
  93        adi_state.caps.nbits = *val;
  94
  95        val = (u64 *) mdesc_get_property(hp, pn, "ue-on-adp", &len);
  96        if (!val)
  97                goto adi_not_found;
  98        adi_state.caps.ue_on_adi = *val;
  99
 100        /* Some of the code to support swapping ADI tags is written
 101         * assumption that two ADI tags can fit inside one byte. If
 102         * this assumption is broken by a future architecture change,
 103         * that code will have to be revisited. If that were to happen,
 104         * disable ADI support so we do not get unpredictable results
 105         * with programs trying to use ADI and their pages getting
 106         * swapped out
 107         */
 108        if (adi_state.caps.nbits > 4) {
 109                pr_warn("WARNING: ADI tag size >4 on this platform. Disabling AADI support\n");
 110                adi_state.enabled = false;
 111        }
 112
 113        mdesc_release(hp);
 114        return;
 115
 116adi_not_found:
 117        adi_state.enabled = false;
 118        adi_state.caps.blksz = 0;
 119        adi_state.caps.nbits = 0;
 120        if (hp)
 121                mdesc_release(hp);
 122}
 123
 124tag_storage_desc_t *find_tag_store(struct mm_struct *mm,
 125                                   struct vm_area_struct *vma,
 126                                   unsigned long addr)
 127{
 128        tag_storage_desc_t *tag_desc = NULL;
 129        unsigned long i, max_desc, flags;
 130
 131        /* Check if this vma already has tag storage descriptor
 132         * allocated for it.
 133         */
 134        max_desc = PAGE_SIZE/sizeof(tag_storage_desc_t);
 135        if (mm->context.tag_store) {
 136                tag_desc = mm->context.tag_store;
 137                spin_lock_irqsave(&mm->context.tag_lock, flags);
 138                for (i = 0; i < max_desc; i++) {
 139                        if ((addr >= tag_desc->start) &&
 140                            ((addr + PAGE_SIZE - 1) <= tag_desc->end))
 141                                break;
 142                        tag_desc++;
 143                }
 144                spin_unlock_irqrestore(&mm->context.tag_lock, flags);
 145
 146                /* If no matching entries were found, this must be a
 147                 * freshly allocated page
 148                 */
 149                if (i >= max_desc)
 150                        tag_desc = NULL;
 151        }
 152
 153        return tag_desc;
 154}
 155
 156tag_storage_desc_t *alloc_tag_store(struct mm_struct *mm,
 157                                    struct vm_area_struct *vma,
 158                                    unsigned long addr)
 159{
 160        unsigned char *tags;
 161        unsigned long i, size, max_desc, flags;
 162        tag_storage_desc_t *tag_desc, *open_desc;
 163        unsigned long end_addr, hole_start, hole_end;
 164
 165        max_desc = PAGE_SIZE/sizeof(tag_storage_desc_t);
 166        open_desc = NULL;
 167        hole_start = 0;
 168        hole_end = ULONG_MAX;
 169        end_addr = addr + PAGE_SIZE - 1;
 170
 171        /* Check if this vma already has tag storage descriptor
 172         * allocated for it.
 173         */
 174        spin_lock_irqsave(&mm->context.tag_lock, flags);
 175        if (mm->context.tag_store) {
 176                tag_desc = mm->context.tag_store;
 177
 178                /* Look for a matching entry for this address. While doing
 179                 * that, look for the first open slot as well and find
 180                 * the hole in already allocated range where this request
 181                 * will fit in.
 182                 */
 183                for (i = 0; i < max_desc; i++) {
 184                        if (tag_desc->tag_users == 0) {
 185                                if (open_desc == NULL)
 186                                        open_desc = tag_desc;
 187                        } else {
 188                                if ((addr >= tag_desc->start) &&
 189                                    (tag_desc->end >= (addr + PAGE_SIZE - 1))) {
 190                                        tag_desc->tag_users++;
 191                                        goto out;
 192                                }
 193                        }
 194                        if ((tag_desc->start > end_addr) &&
 195                            (tag_desc->start < hole_end))
 196                                hole_end = tag_desc->start;
 197                        if ((tag_desc->end < addr) &&
 198                            (tag_desc->end > hole_start))
 199                                hole_start = tag_desc->end;
 200                        tag_desc++;
 201                }
 202
 203        } else {
 204                size = sizeof(tag_storage_desc_t)*max_desc;
 205                mm->context.tag_store = kzalloc(size, GFP_NOWAIT|__GFP_NOWARN);
 206                if (mm->context.tag_store == NULL) {
 207                        tag_desc = NULL;
 208                        goto out;
 209                }
 210                tag_desc = mm->context.tag_store;
 211                for (i = 0; i < max_desc; i++, tag_desc++)
 212                        tag_desc->tag_users = 0;
 213                open_desc = mm->context.tag_store;
 214                i = 0;
 215        }
 216
 217        /* Check if we ran out of tag storage descriptors */
 218        if (open_desc == NULL) {
 219                tag_desc = NULL;
 220                goto out;
 221        }
 222
 223        /* Mark this tag descriptor slot in use and then initialize it */
 224        tag_desc = open_desc;
 225        tag_desc->tag_users = 1;
 226
 227        /* Tag storage has not been allocated for this vma and space
 228         * is available in tag storage descriptor. Since this page is
 229         * being swapped out, there is high probability subsequent pages
 230         * in the VMA will be swapped out as well. Allocate pages to
 231         * store tags for as many pages in this vma as possible but not
 232         * more than TAG_STORAGE_PAGES. Each byte in tag space holds
 233         * two ADI tags since each ADI tag is 4 bits. Each ADI tag
 234         * covers adi_blksize() worth of addresses. Check if the hole is
 235         * big enough to accommodate full address range for using
 236         * TAG_STORAGE_PAGES number of tag pages.
 237         */
 238        size = TAG_STORAGE_PAGES * PAGE_SIZE;
 239        end_addr = addr + (size*2*adi_blksize()) - 1;
 240        /* Check for overflow. If overflow occurs, allocate only one page */
 241        if (end_addr < addr) {
 242                size = PAGE_SIZE;
 243                end_addr = addr + (size*2*adi_blksize()) - 1;
 244                /* If overflow happens with the minimum tag storage
 245                 * allocation as well, adjust ending address for this
 246                 * tag storage.
 247                 */
 248                if (end_addr < addr)
 249                        end_addr = ULONG_MAX;
 250        }
 251        if (hole_end < end_addr) {
 252                /* Available hole is too small on the upper end of
 253                 * address. Can we expand the range towards the lower
 254                 * address and maximize use of this slot?
 255                 */
 256                unsigned long tmp_addr;
 257
 258                end_addr = hole_end - 1;
 259                tmp_addr = end_addr - (size*2*adi_blksize()) + 1;
 260                /* Check for underflow. If underflow occurs, allocate
 261                 * only one page for storing ADI tags
 262                 */
 263                if (tmp_addr > addr) {
 264                        size = PAGE_SIZE;
 265                        tmp_addr = end_addr - (size*2*adi_blksize()) - 1;
 266                        /* If underflow happens with the minimum tag storage
 267                         * allocation as well, adjust starting address for
 268                         * this tag storage.
 269                         */
 270                        if (tmp_addr > addr)
 271                                tmp_addr = 0;
 272                }
 273                if (tmp_addr < hole_start) {
 274                        /* Available hole is restricted on lower address
 275                         * end as well
 276                         */
 277                        tmp_addr = hole_start + 1;
 278                }
 279                addr = tmp_addr;
 280                size = (end_addr + 1 - addr)/(2*adi_blksize());
 281                size = (size + (PAGE_SIZE-adi_blksize()))/PAGE_SIZE;
 282                size = size * PAGE_SIZE;
 283        }
 284        tags = kzalloc(size, GFP_NOWAIT|__GFP_NOWARN);
 285        if (tags == NULL) {
 286                tag_desc->tag_users = 0;
 287                tag_desc = NULL;
 288                goto out;
 289        }
 290        tag_desc->start = addr;
 291        tag_desc->tags = tags;
 292        tag_desc->end = end_addr;
 293
 294out:
 295        spin_unlock_irqrestore(&mm->context.tag_lock, flags);
 296        return tag_desc;
 297}
 298
 299void del_tag_store(tag_storage_desc_t *tag_desc, struct mm_struct *mm)
 300{
 301        unsigned long flags;
 302        unsigned char *tags = NULL;
 303
 304        spin_lock_irqsave(&mm->context.tag_lock, flags);
 305        tag_desc->tag_users--;
 306        if (tag_desc->tag_users == 0) {
 307                tag_desc->start = tag_desc->end = 0;
 308                /* Do not free up the tag storage space allocated
 309                 * by the first descriptor. This is persistent
 310                 * emergency tag storage space for the task.
 311                 */
 312                if (tag_desc != mm->context.tag_store) {
 313                        tags = tag_desc->tags;
 314                        tag_desc->tags = NULL;
 315                }
 316        }
 317        spin_unlock_irqrestore(&mm->context.tag_lock, flags);
 318        kfree(tags);
 319}
 320
 321#define tag_start(addr, tag_desc)               \
 322        ((tag_desc)->tags + ((addr - (tag_desc)->start)/(2*adi_blksize())))
 323
 324/* Retrieve any saved ADI tags for the page being swapped back in and
 325 * restore these tags to the newly allocated physical page.
 326 */
 327void adi_restore_tags(struct mm_struct *mm, struct vm_area_struct *vma,
 328                      unsigned long addr, pte_t pte)
 329{
 330        unsigned char *tag;
 331        tag_storage_desc_t *tag_desc;
 332        unsigned long paddr, tmp, version1, version2;
 333
 334        /* Check if the swapped out page has an ADI version
 335         * saved. If yes, restore version tag to the newly
 336         * allocated page.
 337         */
 338        tag_desc = find_tag_store(mm, vma, addr);
 339        if (tag_desc == NULL)
 340                return;
 341
 342        tag = tag_start(addr, tag_desc);
 343        paddr = pte_val(pte) & _PAGE_PADDR_4V;
 344        for (tmp = paddr; tmp < (paddr+PAGE_SIZE); tmp += adi_blksize()) {
 345                version1 = (*tag) >> 4;
 346                version2 = (*tag) & 0x0f;
 347                *tag++ = 0;
 348                asm volatile("stxa %0, [%1] %2\n\t"
 349                        :
 350                        : "r" (version1), "r" (tmp),
 351                          "i" (ASI_MCD_REAL));
 352                tmp += adi_blksize();
 353                asm volatile("stxa %0, [%1] %2\n\t"
 354                        :
 355                        : "r" (version2), "r" (tmp),
 356                          "i" (ASI_MCD_REAL));
 357        }
 358        asm volatile("membar #Sync\n\t");
 359
 360        /* Check and mark this tag space for release later if
 361         * the swapped in page was the last user of tag space
 362         */
 363        del_tag_store(tag_desc, mm);
 364}
 365
 366/* A page is about to be swapped out. Save any ADI tags associated with
 367 * this physical page so they can be restored later when the page is swapped
 368 * back in.
 369 */
 370int adi_save_tags(struct mm_struct *mm, struct vm_area_struct *vma,
 371                  unsigned long addr, pte_t oldpte)
 372{
 373        unsigned char *tag;
 374        tag_storage_desc_t *tag_desc;
 375        unsigned long version1, version2, paddr, tmp;
 376
 377        tag_desc = alloc_tag_store(mm, vma, addr);
 378        if (tag_desc == NULL)
 379                return -1;
 380
 381        tag = tag_start(addr, tag_desc);
 382        paddr = pte_val(oldpte) & _PAGE_PADDR_4V;
 383        for (tmp = paddr; tmp < (paddr+PAGE_SIZE); tmp += adi_blksize()) {
 384                asm volatile("ldxa [%1] %2, %0\n\t"
 385                                : "=r" (version1)
 386                                : "r" (tmp), "i" (ASI_MCD_REAL));
 387                tmp += adi_blksize();
 388                asm volatile("ldxa [%1] %2, %0\n\t"
 389                                : "=r" (version2)
 390                                : "r" (tmp), "i" (ASI_MCD_REAL));
 391                *tag = (version1 << 4) | version2;
 392                tag++;
 393        }
 394
 395        return 0;
 396}
 397