linux/drivers/gpu/drm/drm_cache.c
<<
>>
Prefs
   1/**************************************************************************
   2 *
   3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27/*
  28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
  29 */
  30
  31#include <linux/export.h>
  32#include <drm/drmP.h>
  33
  34#if defined(CONFIG_X86)
  35
  36/*
  37 * clflushopt is an unordered instruction which needs fencing with mfence or
  38 * sfence to avoid ordering issues.  For drm_clflush_page this fencing happens
  39 * in the caller.
  40 */
  41static void
  42drm_clflush_page(struct page *page)
  43{
  44        uint8_t *page_virtual;
  45        unsigned int i;
  46        const int size = boot_cpu_data.x86_clflush_size;
  47
  48        if (unlikely(page == NULL))
  49                return;
  50
  51        page_virtual = kmap_atomic(page);
  52        for (i = 0; i < PAGE_SIZE; i += size)
  53                clflushopt(page_virtual + i);
  54        kunmap_atomic(page_virtual);
  55}
  56
  57static void drm_cache_flush_clflush(struct page *pages[],
  58                                    unsigned long num_pages)
  59{
  60        unsigned long i;
  61
  62        mb();
  63        for (i = 0; i < num_pages; i++)
  64                drm_clflush_page(*pages++);
  65        mb();
  66}
  67
  68static void
  69drm_clflush_ipi_handler(void *null)
  70{
  71        wbinvd();
  72}
  73#endif
  74
  75void
  76drm_clflush_pages(struct page *pages[], unsigned long num_pages)
  77{
  78
  79#if defined(CONFIG_X86)
  80        if (cpu_has_clflush) {
  81                drm_cache_flush_clflush(pages, num_pages);
  82                return;
  83        }
  84
  85        if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
  86                printk(KERN_ERR "Timed out waiting for cache flush.\n");
  87
  88#elif defined(__powerpc__)
  89        unsigned long i;
  90        for (i = 0; i < num_pages; i++) {
  91                struct page *page = pages[i];
  92                void *page_virtual;
  93
  94                if (unlikely(page == NULL))
  95                        continue;
  96
  97                page_virtual = kmap_atomic(page);
  98                flush_dcache_range((unsigned long)page_virtual,
  99                                   (unsigned long)page_virtual + PAGE_SIZE);
 100                kunmap_atomic(page_virtual);
 101        }
 102#else
 103        printk(KERN_ERR "Architecture has no drm_cache.c support\n");
 104        WARN_ON_ONCE(1);
 105#endif
 106}
 107EXPORT_SYMBOL(drm_clflush_pages);
 108
 109void
 110drm_clflush_sg(struct sg_table *st)
 111{
 112#if defined(CONFIG_X86)
 113        if (cpu_has_clflush) {
 114                struct sg_page_iter sg_iter;
 115
 116                mb();
 117                for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
 118                        drm_clflush_page(sg_page_iter_page(&sg_iter));
 119                mb();
 120
 121                return;
 122        }
 123
 124        if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
 125                printk(KERN_ERR "Timed out waiting for cache flush.\n");
 126#else
 127        printk(KERN_ERR "Architecture has no drm_cache.c support\n");
 128        WARN_ON_ONCE(1);
 129#endif
 130}
 131EXPORT_SYMBOL(drm_clflush_sg);
 132
 133void
 134drm_clflush_virt_range(void *addr, unsigned long length)
 135{
 136#if defined(CONFIG_X86)
 137        if (cpu_has_clflush) {
 138                void *end = addr + length;
 139                mb();
 140                for (; addr < end; addr += boot_cpu_data.x86_clflush_size)
 141                        clflushopt(addr);
 142                clflushopt(end - 1);
 143                mb();
 144                return;
 145        }
 146
 147        if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
 148                printk(KERN_ERR "Timed out waiting for cache flush.\n");
 149#else
 150        printk(KERN_ERR "Architecture has no drm_cache.c support\n");
 151        WARN_ON_ONCE(1);
 152#endif
 153}
 154EXPORT_SYMBOL(drm_clflush_virt_range);
 155