linux/drivers/staging/media/atomisp/include/hmm/hmm_bo.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Support for Medifield PNW Camera Imaging ISP subsystem.
   4 *
   5 * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
   6 *
   7 * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License version
  11 * 2 as published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 *
  18 *
  19 */
  20
  21#ifndef __HMM_BO_H__
  22#define __HMM_BO_H__
  23
  24#include <linux/kernel.h>
  25#include <linux/slab.h>
  26#include <linux/list.h>
  27#include <linux/spinlock.h>
  28#include <linux/mutex.h>
  29#include "mmu/isp_mmu.h"
  30#include "hmm/hmm_common.h"
  31#include "ia_css_types.h"
  32
  33#define check_bodev_null_return(bdev, exp)      \
  34                check_null_return(bdev, exp, \
  35                        "NULL hmm_bo_device.\n")
  36
  37#define check_bodev_null_return_void(bdev)      \
  38                check_null_return_void(bdev, \
  39                        "NULL hmm_bo_device.\n")
  40
  41#define check_bo_status_yes_goto(bo, _status, label) \
  42        var_not_equal_goto((bo->status & (_status)), (_status), \
  43                        label, \
  44                        "HMM buffer status not contain %s.\n", \
  45                        #_status)
  46
  47#define check_bo_status_no_goto(bo, _status, label) \
  48        var_equal_goto((bo->status & (_status)), (_status), \
  49                        label, \
  50                        "HMM buffer status contains %s.\n", \
  51                        #_status)
  52
  53#define rbtree_node_to_hmm_bo(root_node)        \
  54        container_of((root_node), struct hmm_buffer_object, node)
  55
  56#define list_to_hmm_bo(list_ptr)        \
  57        list_entry((list_ptr), struct hmm_buffer_object, list)
  58
  59#define kref_to_hmm_bo(kref_ptr)        \
  60        list_entry((kref_ptr), struct hmm_buffer_object, kref)
  61
  62#define check_bo_null_return(bo, exp)   \
  63        check_null_return(bo, exp, "NULL hmm buffer object.\n")
  64
  65#define check_bo_null_return_void(bo)   \
  66        check_null_return_void(bo, "NULL hmm buffer object.\n")
  67
  68#define HMM_MAX_ORDER           3
  69#define HMM_MIN_ORDER           0
  70
  71#define ISP_VM_START    0x0
  72#define ISP_VM_SIZE     (0x7FFFFFFF)    /* 2G address space */
  73#define ISP_PTR_NULL    NULL
  74
  75#define HMM_BO_DEVICE_INITED    0x1
  76
  77enum hmm_bo_type {
  78        HMM_BO_PRIVATE,
  79        HMM_BO_SHARE,
  80        HMM_BO_USER,
  81        HMM_BO_LAST,
  82};
  83
  84enum hmm_page_type {
  85        HMM_PAGE_TYPE_RESERVED,
  86        HMM_PAGE_TYPE_DYNAMIC,
  87        HMM_PAGE_TYPE_GENERAL,
  88};
  89
  90#define HMM_BO_MASK             0x1
  91#define HMM_BO_FREE             0x0
  92#define HMM_BO_ALLOCED  0x1
  93#define HMM_BO_PAGE_ALLOCED     0x2
  94#define HMM_BO_BINDED           0x4
  95#define HMM_BO_MMAPED           0x8
  96#define HMM_BO_VMAPED           0x10
  97#define HMM_BO_VMAPED_CACHED    0x20
  98#define HMM_BO_ACTIVE           0x1000
  99#define HMM_BO_MEM_TYPE_USER     0x1
 100#define HMM_BO_MEM_TYPE_PFN      0x2
 101
 102struct hmm_bo_device {
 103        struct isp_mmu          mmu;
 104
 105        /* start/pgnr/size is used to record the virtual memory of this bo */
 106        unsigned int start;
 107        unsigned int pgnr;
 108        unsigned int size;
 109
 110        /* list lock is used to protect the entire_bo_list */
 111        spinlock_t      list_lock;
 112        int flag;
 113
 114        /* linked list for entire buffer object */
 115        struct list_head entire_bo_list;
 116        /* rbtree for maintain entire allocated vm */
 117        struct rb_root allocated_rbtree;
 118        /* rbtree for maintain entire free vm */
 119        struct rb_root free_rbtree;
 120        struct mutex rbtree_mutex;
 121        struct kmem_cache *bo_cache;
 122};
 123
 124struct hmm_page_object {
 125        struct page             *page;
 126        enum hmm_page_type      type;
 127};
 128
 129struct hmm_buffer_object {
 130        struct hmm_bo_device    *bdev;
 131        struct list_head        list;
 132        struct kref     kref;
 133
 134        struct page **pages;
 135
 136        /* mutex protecting this BO */
 137        struct mutex            mutex;
 138        enum hmm_bo_type        type;
 139        struct hmm_page_object  *page_obj;      /* physical pages */
 140        int             from_highmem;
 141        int             mmap_count;
 142        int             status;
 143        int             mem_type;
 144        void            *vmap_addr; /* kernel virtual address by vmap */
 145
 146        struct rb_node  node;
 147        unsigned int    start;
 148        unsigned int    end;
 149        unsigned int    pgnr;
 150        /*
 151         * When insert a bo which has the same pgnr with an existed
 152         * bo node in the free_rbtree, using "prev & next" pointer
 153         * to maintain a bo linked list instead of insert this bo
 154         * into free_rbtree directly, it will make sure each node
 155         * in free_rbtree has different pgnr.
 156         * "prev & next" default is NULL.
 157         */
 158        struct hmm_buffer_object        *prev;
 159        struct hmm_buffer_object        *next;
 160};
 161
 162struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev,
 163                                       unsigned int pgnr);
 164
 165void hmm_bo_release(struct hmm_buffer_object *bo);
 166
 167int hmm_bo_device_init(struct hmm_bo_device *bdev,
 168                       struct isp_mmu_client *mmu_driver,
 169                       unsigned int vaddr_start, unsigned int size);
 170
 171/*
 172 * clean up all hmm_bo_device related things.
 173 */
 174void hmm_bo_device_exit(struct hmm_bo_device *bdev);
 175
 176/*
 177 * whether the bo device is inited or not.
 178 */
 179int hmm_bo_device_inited(struct hmm_bo_device *bdev);
 180
 181/*
 182 * increse buffer object reference.
 183 */
 184void hmm_bo_ref(struct hmm_buffer_object *bo);
 185
 186/*
 187 * decrese buffer object reference. if reference reaches 0,
 188 * release function of the buffer object will be called.
 189 *
 190 * this call is also used to release hmm_buffer_object or its
 191 * upper level object with it embedded in. you need to call
 192 * this function when it is no longer used.
 193 *
 194 * Note:
 195 *
 196 * user dont need to care about internal resource release of
 197 * the buffer object in the release callback, it will be
 198 * handled internally.
 199 *
 200 * this call will only release internal resource of the buffer
 201 * object but will not free the buffer object itself, as the
 202 * buffer object can be both pre-allocated statically or
 203 * dynamically allocated. so user need to deal with the release
 204 * of the buffer object itself manually. below example shows
 205 * the normal case of using the buffer object.
 206 *
 207 *      struct hmm_buffer_object *bo = hmm_bo_create(bdev, pgnr);
 208 *      ......
 209 *      hmm_bo_unref(bo);
 210 *
 211 * or:
 212 *
 213 *      struct hmm_buffer_object bo;
 214 *
 215 *      hmm_bo_init(bdev, &bo, pgnr, NULL);
 216 *      ...
 217 *      hmm_bo_unref(&bo);
 218 */
 219void hmm_bo_unref(struct hmm_buffer_object *bo);
 220
 221/*
 222 * allocate/free physical pages for the bo. will try to alloc mem
 223 * from highmem if from_highmem is set, and type indicate that the
 224 * pages will be allocated by using video driver (for share buffer)
 225 * or by ISP driver itself.
 226 */
 227
 228int hmm_bo_allocated(struct hmm_buffer_object *bo);
 229
 230/*
 231 * allocate/free physical pages for the bo. will try to alloc mem
 232 * from highmem if from_highmem is set, and type indicate that the
 233 * pages will be allocated by using video driver (for share buffer)
 234 * or by ISP driver itself.
 235 */
 236int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
 237                       enum hmm_bo_type type, int from_highmem,
 238                       const void __user *userptr, bool cached);
 239void hmm_bo_free_pages(struct hmm_buffer_object *bo);
 240int hmm_bo_page_allocated(struct hmm_buffer_object *bo);
 241
 242/*
 243 * get physical page info of the bo.
 244 */
 245int hmm_bo_get_page_info(struct hmm_buffer_object *bo,
 246                         struct hmm_page_object **page_obj, int *pgnr);
 247
 248/*
 249 * bind/unbind the physical pages to a virtual address space.
 250 */
 251int hmm_bo_bind(struct hmm_buffer_object *bo);
 252void hmm_bo_unbind(struct hmm_buffer_object *bo);
 253int hmm_bo_binded(struct hmm_buffer_object *bo);
 254
 255/*
 256 * vmap buffer object's pages to contiguous kernel virtual address.
 257 * if the buffer has been vmaped, return the virtual address directly.
 258 */
 259void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached);
 260
 261/*
 262 * flush the cache for the vmapped buffer object's pages,
 263 * if the buffer has not been vmapped, return directly.
 264 */
 265void hmm_bo_flush_vmap(struct hmm_buffer_object *bo);
 266
 267/*
 268 * vunmap buffer object's kernel virtual address.
 269 */
 270void hmm_bo_vunmap(struct hmm_buffer_object *bo);
 271
 272/*
 273 * mmap the bo's physical pages to specific vma.
 274 *
 275 * vma's address space size must be the same as bo's size,
 276 * otherwise it will return -EINVAL.
 277 *
 278 * vma->vm_flags will be set to (VM_RESERVED | VM_IO).
 279 */
 280int hmm_bo_mmap(struct vm_area_struct *vma,
 281                struct hmm_buffer_object *bo);
 282
 283extern struct hmm_pool  dynamic_pool;
 284extern struct hmm_pool  reserved_pool;
 285
 286/*
 287 * find the buffer object by its virtual address vaddr.
 288 * return NULL if no such buffer object found.
 289 */
 290struct hmm_buffer_object *hmm_bo_device_search_start(
 291    struct hmm_bo_device *bdev, ia_css_ptr vaddr);
 292
 293/*
 294 * find the buffer object by its virtual address.
 295 * it does not need to be the start address of one bo,
 296 * it can be an address within the range of one bo.
 297 * return NULL if no such buffer object found.
 298 */
 299struct hmm_buffer_object *hmm_bo_device_search_in_range(
 300    struct hmm_bo_device *bdev, ia_css_ptr vaddr);
 301
 302/*
 303 * find the buffer object with kernel virtual address vaddr.
 304 * return NULL if no such buffer object found.
 305 */
 306struct hmm_buffer_object *hmm_bo_device_search_vmap_start(
 307    struct hmm_bo_device *bdev, const void *vaddr);
 308
 309#endif
 310