linux/drivers/gpu/drm/drm_cache.c
<<
>>
Prefs
   1/**************************************************************************
   2 *
   3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27/*
  28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
  29 */
  30
  31#include <linux/export.h>
  32#include <linux/highmem.h>
  33
  34#include <drm/drm_cache.h>
  35
  36#if defined(CONFIG_X86)
  37#include <asm/smp.h>
  38
  39/*
  40 * clflushopt is an unordered instruction which needs fencing with mfence or
  41 * sfence to avoid ordering issues.  For drm_clflush_page this fencing happens
  42 * in the caller.
  43 */
  44static void
  45drm_clflush_page(struct page *page)
  46{
  47        uint8_t *page_virtual;
  48        unsigned int i;
  49        const int size = boot_cpu_data.x86_clflush_size;
  50
  51        if (unlikely(page == NULL))
  52                return;
  53
  54        page_virtual = kmap_atomic(page);
  55        for (i = 0; i < PAGE_SIZE; i += size)
  56                clflushopt(page_virtual + i);
  57        kunmap_atomic(page_virtual);
  58}
  59
  60static void drm_cache_flush_clflush(struct page *pages[],
  61                                    unsigned long num_pages)
  62{
  63        unsigned long i;
  64
  65        mb();
  66        for (i = 0; i < num_pages; i++)
  67                drm_clflush_page(*pages++);
  68        mb();
  69}
  70#endif
  71
  72/**
  73 * drm_clflush_pages - Flush dcache lines of a set of pages.
  74 * @pages: List of pages to be flushed.
  75 * @num_pages: Number of pages in the array.
  76 *
  77 * Flush every data cache line entry that points to an address belonging
  78 * to a page in the array.
  79 */
  80void
  81drm_clflush_pages(struct page *pages[], unsigned long num_pages)
  82{
  83
  84#if defined(CONFIG_X86)
  85        if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
  86                drm_cache_flush_clflush(pages, num_pages);
  87                return;
  88        }
  89
  90        if (wbinvd_on_all_cpus())
  91                pr_err("Timed out waiting for cache flush\n");
  92
  93#elif defined(__powerpc__)
  94        unsigned long i;
  95        for (i = 0; i < num_pages; i++) {
  96                struct page *page = pages[i];
  97                void *page_virtual;
  98
  99                if (unlikely(page == NULL))
 100                        continue;
 101
 102                page_virtual = kmap_atomic(page);
 103                flush_dcache_range((unsigned long)page_virtual,
 104                                   (unsigned long)page_virtual + PAGE_SIZE);
 105                kunmap_atomic(page_virtual);
 106        }
 107#else
 108        pr_err("Architecture has no drm_cache.c support\n");
 109        WARN_ON_ONCE(1);
 110#endif
 111}
 112EXPORT_SYMBOL(drm_clflush_pages);
 113
 114/**
 115 * drm_clflush_sg - Flush dcache lines pointing to a scather-gather.
 116 * @st: struct sg_table.
 117 *
 118 * Flush every data cache line entry that points to an address in the
 119 * sg.
 120 */
 121void
 122drm_clflush_sg(struct sg_table *st)
 123{
 124#if defined(CONFIG_X86)
 125        if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
 126                struct sg_page_iter sg_iter;
 127
 128                mb();
 129                for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
 130                        drm_clflush_page(sg_page_iter_page(&sg_iter));
 131                mb();
 132
 133                return;
 134        }
 135
 136        if (wbinvd_on_all_cpus())
 137                pr_err("Timed out waiting for cache flush\n");
 138#else
 139        pr_err("Architecture has no drm_cache.c support\n");
 140        WARN_ON_ONCE(1);
 141#endif
 142}
 143EXPORT_SYMBOL(drm_clflush_sg);
 144
 145/**
 146 * drm_clflush_virt_range - Flush dcache lines of a region
 147 * @addr: Initial kernel memory address.
 148 * @length: Region size.
 149 *
 150 * Flush every data cache line entry that points to an address in the
 151 * region requested.
 152 */
 153void
 154drm_clflush_virt_range(void *addr, unsigned long length)
 155{
 156#if defined(CONFIG_X86)
 157        if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
 158                const int size = boot_cpu_data.x86_clflush_size;
 159                void *end = addr + length;
 160                addr = (void *)(((unsigned long)addr) & -size);
 161                mb();
 162                for (; addr < end; addr += size)
 163                        clflushopt(addr);
 164                clflushopt(end - 1); /* force serialisation */
 165                mb();
 166                return;
 167        }
 168
 169        if (wbinvd_on_all_cpus())
 170                pr_err("Timed out waiting for cache flush\n");
 171#else
 172        pr_err("Architecture has no drm_cache.c support\n");
 173        WARN_ON_ONCE(1);
 174#endif
 175}
 176EXPORT_SYMBOL(drm_clflush_virt_range);
 177