linux/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Support for Medifield PNW Camera Imaging ISP subsystem.
   4 *
   5 * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
   6 *
   7 * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License version
  11 * 2 as published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 *
  18 *
  19 */
  20/*
  21 * This file contains functions for buffer object structure management
  22 */
  23#include <linux/kernel.h>
  24#include <linux/types.h>
  25#include <linux/gfp.h>          /* for GFP_ATOMIC */
  26#include <linux/mm.h>
  27#include <linux/mm_types.h>
  28#include <linux/hugetlb.h>
  29#include <linux/highmem.h>
  30#include <linux/slab.h>         /* for kmalloc */
  31#include <linux/module.h>
  32#include <linux/moduleparam.h>
  33#include <linux/string.h>
  34#include <linux/list.h>
  35#include <linux/errno.h>
  36#include <linux/io.h>
  37#include <asm/current.h>
  38#include <linux/sched/signal.h>
  39#include <linux/file.h>
  40
  41#include <asm/set_memory.h>
  42
  43#include "atomisp_internal.h"
  44#include "hmm/hmm_common.h"
  45#include "hmm/hmm_pool.h"
  46#include "hmm/hmm_bo.h"
  47
  48static unsigned int order_to_nr(unsigned int order)
  49{
  50        return 1U << order;
  51}
  52
  53static unsigned int nr_to_order_bottom(unsigned int nr)
  54{
  55        return fls(nr) - 1;
  56}
  57
  58static int __bo_init(struct hmm_bo_device *bdev, struct hmm_buffer_object *bo,
  59                     unsigned int pgnr)
  60{
  61        check_bodev_null_return(bdev, -EINVAL);
  62        var_equal_return(hmm_bo_device_inited(bdev), 0, -EINVAL,
  63                         "hmm_bo_device not inited yet.\n");
  64        /* prevent zero size buffer object */
  65        if (pgnr == 0) {
  66                dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
  67                return -EINVAL;
  68        }
  69
  70        memset(bo, 0, sizeof(*bo));
  71        mutex_init(&bo->mutex);
  72
  73        /* init the bo->list HEAD as an element of entire_bo_list */
  74        INIT_LIST_HEAD(&bo->list);
  75
  76        bo->bdev = bdev;
  77        bo->vmap_addr = NULL;
  78        bo->status = HMM_BO_FREE;
  79        bo->start = bdev->start;
  80        bo->pgnr = pgnr;
  81        bo->end = bo->start + pgnr_to_size(pgnr);
  82        bo->prev = NULL;
  83        bo->next = NULL;
  84
  85        return 0;
  86}
  87
  88static struct hmm_buffer_object *__bo_search_and_remove_from_free_rbtree(
  89    struct rb_node *node, unsigned int pgnr)
  90{
  91        struct hmm_buffer_object *this, *ret_bo, *temp_bo;
  92
  93        this = rb_entry(node, struct hmm_buffer_object, node);
  94        if (this->pgnr == pgnr ||
  95            (this->pgnr > pgnr && !this->node.rb_left)) {
  96                goto remove_bo_and_return;
  97        } else {
  98                if (this->pgnr < pgnr) {
  99                        if (!this->node.rb_right)
 100                                return NULL;
 101                        ret_bo = __bo_search_and_remove_from_free_rbtree(
 102                                     this->node.rb_right, pgnr);
 103                } else {
 104                        ret_bo = __bo_search_and_remove_from_free_rbtree(
 105                                     this->node.rb_left, pgnr);
 106                }
 107                if (!ret_bo) {
 108                        if (this->pgnr > pgnr)
 109                                goto remove_bo_and_return;
 110                        else
 111                                return NULL;
 112                }
 113                return ret_bo;
 114        }
 115
 116remove_bo_and_return:
 117        /* NOTE: All nodes on free rbtree have a 'prev' that points to NULL.
 118         * 1. check if 'this->next' is NULL:
 119         *      yes: erase 'this' node and rebalance rbtree, return 'this'.
 120         */
 121        if (!this->next) {
 122                rb_erase(&this->node, &this->bdev->free_rbtree);
 123                return this;
 124        }
 125        /* NOTE: if 'this->next' is not NULL, always return 'this->next' bo.
 126         * 2. check if 'this->next->next' is NULL:
 127         *      yes: change the related 'next/prev' pointer,
 128         *              return 'this->next' but the rbtree stays unchanged.
 129         */
 130        temp_bo = this->next;
 131        this->next = temp_bo->next;
 132        if (temp_bo->next)
 133                temp_bo->next->prev = this;
 134        temp_bo->next = NULL;
 135        temp_bo->prev = NULL;
 136        return temp_bo;
 137}
 138
 139static struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root,
 140        ia_css_ptr start)
 141{
 142        struct rb_node *n = root->rb_node;
 143        struct hmm_buffer_object *bo;
 144
 145        do {
 146                bo = rb_entry(n, struct hmm_buffer_object, node);
 147
 148                if (bo->start > start) {
 149                        if (!n->rb_left)
 150                                return NULL;
 151                        n = n->rb_left;
 152                } else if (bo->start < start) {
 153                        if (!n->rb_right)
 154                                return NULL;
 155                        n = n->rb_right;
 156                } else {
 157                        return bo;
 158                }
 159        } while (n);
 160
 161        return NULL;
 162}
 163
 164static struct hmm_buffer_object *__bo_search_by_addr_in_range(
 165    struct rb_root *root, unsigned int start)
 166{
 167        struct rb_node *n = root->rb_node;
 168        struct hmm_buffer_object *bo;
 169
 170        do {
 171                bo = rb_entry(n, struct hmm_buffer_object, node);
 172
 173                if (bo->start > start) {
 174                        if (!n->rb_left)
 175                                return NULL;
 176                        n = n->rb_left;
 177                } else {
 178                        if (bo->end > start)
 179                                return bo;
 180                        if (!n->rb_right)
 181                                return NULL;
 182                        n = n->rb_right;
 183                }
 184        } while (n);
 185
 186        return NULL;
 187}
 188
 189static void __bo_insert_to_free_rbtree(struct rb_root *root,
 190                                       struct hmm_buffer_object *bo)
 191{
 192        struct rb_node **new = &root->rb_node;
 193        struct rb_node *parent = NULL;
 194        struct hmm_buffer_object *this;
 195        unsigned int pgnr = bo->pgnr;
 196
 197        while (*new) {
 198                parent = *new;
 199                this = container_of(*new, struct hmm_buffer_object, node);
 200
 201                if (pgnr < this->pgnr) {
 202                        new = &((*new)->rb_left);
 203                } else if (pgnr > this->pgnr) {
 204                        new = &((*new)->rb_right);
 205                } else {
 206                        bo->prev = this;
 207                        bo->next = this->next;
 208                        if (this->next)
 209                                this->next->prev = bo;
 210                        this->next = bo;
 211                        bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
 212                        return;
 213                }
 214        }
 215
 216        bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
 217
 218        rb_link_node(&bo->node, parent, new);
 219        rb_insert_color(&bo->node, root);
 220}
 221
 222static void __bo_insert_to_alloc_rbtree(struct rb_root *root,
 223                                        struct hmm_buffer_object *bo)
 224{
 225        struct rb_node **new = &root->rb_node;
 226        struct rb_node *parent = NULL;
 227        struct hmm_buffer_object *this;
 228        unsigned int start = bo->start;
 229
 230        while (*new) {
 231                parent = *new;
 232                this = container_of(*new, struct hmm_buffer_object, node);
 233
 234                if (start < this->start)
 235                        new = &((*new)->rb_left);
 236                else
 237                        new = &((*new)->rb_right);
 238        }
 239
 240        kref_init(&bo->kref);
 241        bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_ALLOCED;
 242
 243        rb_link_node(&bo->node, parent, new);
 244        rb_insert_color(&bo->node, root);
 245}
 246
 247static struct hmm_buffer_object *__bo_break_up(struct hmm_bo_device *bdev,
 248        struct hmm_buffer_object *bo,
 249        unsigned int pgnr)
 250{
 251        struct hmm_buffer_object *new_bo;
 252        unsigned long flags;
 253        int ret;
 254
 255        new_bo = kmem_cache_alloc(bdev->bo_cache, GFP_KERNEL);
 256        if (!new_bo) {
 257                dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
 258                return NULL;
 259        }
 260        ret = __bo_init(bdev, new_bo, pgnr);
 261        if (ret) {
 262                dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
 263                kmem_cache_free(bdev->bo_cache, new_bo);
 264                return NULL;
 265        }
 266
 267        new_bo->start = bo->start;
 268        new_bo->end = new_bo->start + pgnr_to_size(pgnr);
 269        bo->start = new_bo->end;
 270        bo->pgnr = bo->pgnr - pgnr;
 271
 272        spin_lock_irqsave(&bdev->list_lock, flags);
 273        list_add_tail(&new_bo->list, &bo->list);
 274        spin_unlock_irqrestore(&bdev->list_lock, flags);
 275
 276        return new_bo;
 277}
 278
 279static void __bo_take_off_handling(struct hmm_buffer_object *bo)
 280{
 281        struct hmm_bo_device *bdev = bo->bdev;
 282        /* There are 4 situations when we take off a known bo from free rbtree:
 283         * 1. if bo->next && bo->prev == NULL, bo is a rbtree node
 284         *      and does not have a linked list after bo, to take off this bo,
 285         *      we just need erase bo directly and rebalance the free rbtree
 286         */
 287        if (!bo->prev && !bo->next) {
 288                rb_erase(&bo->node, &bdev->free_rbtree);
 289                /* 2. when bo->next != NULL && bo->prev == NULL, bo is a rbtree node,
 290                 *      and has a linked list,to take off this bo we need erase bo
 291                 *      first, then, insert bo->next into free rbtree and rebalance
 292                 *      the free rbtree
 293                 */
 294        } else if (!bo->prev && bo->next) {
 295                bo->next->prev = NULL;
 296                rb_erase(&bo->node, &bdev->free_rbtree);
 297                __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo->next);
 298                bo->next = NULL;
 299                /* 3. when bo->prev != NULL && bo->next == NULL, bo is not a rbtree
 300                 *      node, bo is the last element of the linked list after rbtree
 301                 *      node, to take off this bo, we just need set the "prev/next"
 302                 *      pointers to NULL, the free rbtree stays unchaged
 303                 */
 304        } else if (bo->prev && !bo->next) {
 305                bo->prev->next = NULL;
 306                bo->prev = NULL;
 307                /* 4. when bo->prev != NULL && bo->next != NULL ,bo is not a rbtree
 308                 *      node, bo is in the middle of the linked list after rbtree node,
 309                 *      to take off this bo, we just set take the "prev/next" pointers
 310                 *      to NULL, the free rbtree stays unchaged
 311                 */
 312        } else if (bo->prev && bo->next) {
 313                bo->next->prev = bo->prev;
 314                bo->prev->next = bo->next;
 315                bo->next = NULL;
 316                bo->prev = NULL;
 317        }
 318}
 319
 320static struct hmm_buffer_object *__bo_merge(struct hmm_buffer_object *bo,
 321        struct hmm_buffer_object *next_bo)
 322{
 323        struct hmm_bo_device *bdev;
 324        unsigned long flags;
 325
 326        bdev = bo->bdev;
 327        next_bo->start = bo->start;
 328        next_bo->pgnr = next_bo->pgnr + bo->pgnr;
 329
 330        spin_lock_irqsave(&bdev->list_lock, flags);
 331        list_del(&bo->list);
 332        spin_unlock_irqrestore(&bdev->list_lock, flags);
 333
 334        kmem_cache_free(bo->bdev->bo_cache, bo);
 335
 336        return next_bo;
 337}
 338
 339/*
 340 * hmm_bo_device functions.
 341 */
 342int hmm_bo_device_init(struct hmm_bo_device *bdev,
 343                       struct isp_mmu_client *mmu_driver,
 344                       unsigned int vaddr_start,
 345                       unsigned int size)
 346{
 347        struct hmm_buffer_object *bo;
 348        unsigned long flags;
 349        int ret;
 350
 351        check_bodev_null_return(bdev, -EINVAL);
 352
 353        ret = isp_mmu_init(&bdev->mmu, mmu_driver);
 354        if (ret) {
 355                dev_err(atomisp_dev, "isp_mmu_init failed.\n");
 356                return ret;
 357        }
 358
 359        bdev->start = vaddr_start;
 360        bdev->pgnr = size_to_pgnr_ceil(size);
 361        bdev->size = pgnr_to_size(bdev->pgnr);
 362
 363        spin_lock_init(&bdev->list_lock);
 364        mutex_init(&bdev->rbtree_mutex);
 365
 366        bdev->flag = HMM_BO_DEVICE_INITED;
 367
 368        INIT_LIST_HEAD(&bdev->entire_bo_list);
 369        bdev->allocated_rbtree = RB_ROOT;
 370        bdev->free_rbtree = RB_ROOT;
 371
 372        bdev->bo_cache = kmem_cache_create("bo_cache",
 373                                           sizeof(struct hmm_buffer_object), 0, 0, NULL);
 374        if (!bdev->bo_cache) {
 375                dev_err(atomisp_dev, "%s: create cache failed!\n", __func__);
 376                isp_mmu_exit(&bdev->mmu);
 377                return -ENOMEM;
 378        }
 379
 380        bo = kmem_cache_alloc(bdev->bo_cache, GFP_KERNEL);
 381        if (!bo) {
 382                dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
 383                isp_mmu_exit(&bdev->mmu);
 384                return -ENOMEM;
 385        }
 386
 387        ret = __bo_init(bdev, bo, bdev->pgnr);
 388        if (ret) {
 389                dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
 390                kmem_cache_free(bdev->bo_cache, bo);
 391                isp_mmu_exit(&bdev->mmu);
 392                return -EINVAL;
 393        }
 394
 395        spin_lock_irqsave(&bdev->list_lock, flags);
 396        list_add_tail(&bo->list, &bdev->entire_bo_list);
 397        spin_unlock_irqrestore(&bdev->list_lock, flags);
 398
 399        __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
 400
 401        return 0;
 402}
 403
 404struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev,
 405                                       unsigned int pgnr)
 406{
 407        struct hmm_buffer_object *bo, *new_bo;
 408        struct rb_root *root = &bdev->free_rbtree;
 409
 410        check_bodev_null_return(bdev, NULL);
 411        var_equal_return(hmm_bo_device_inited(bdev), 0, NULL,
 412                         "hmm_bo_device not inited yet.\n");
 413
 414        if (pgnr == 0) {
 415                dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
 416                return NULL;
 417        }
 418
 419        mutex_lock(&bdev->rbtree_mutex);
 420        bo = __bo_search_and_remove_from_free_rbtree(root->rb_node, pgnr);
 421        if (!bo) {
 422                mutex_unlock(&bdev->rbtree_mutex);
 423                dev_err(atomisp_dev, "%s: Out of Memory! hmm_bo_alloc failed",
 424                        __func__);
 425                return NULL;
 426        }
 427
 428        if (bo->pgnr > pgnr) {
 429                new_bo = __bo_break_up(bdev, bo, pgnr);
 430                if (!new_bo) {
 431                        mutex_unlock(&bdev->rbtree_mutex);
 432                        dev_err(atomisp_dev, "%s: __bo_break_up failed!\n",
 433                                __func__);
 434                        return NULL;
 435                }
 436
 437                __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, new_bo);
 438                __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
 439
 440                mutex_unlock(&bdev->rbtree_mutex);
 441                return new_bo;
 442        }
 443
 444        __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, bo);
 445
 446        mutex_unlock(&bdev->rbtree_mutex);
 447        return bo;
 448}
 449
 450void hmm_bo_release(struct hmm_buffer_object *bo)
 451{
 452        struct hmm_bo_device *bdev = bo->bdev;
 453        struct hmm_buffer_object *next_bo, *prev_bo;
 454
 455        mutex_lock(&bdev->rbtree_mutex);
 456
 457        /*
 458         * FIX ME:
 459         *
 460         * how to destroy the bo when it is stilled MMAPED?
 461         *
 462         * ideally, this will not happened as hmm_bo_release
 463         * will only be called when kref reaches 0, and in mmap
 464         * operation the hmm_bo_ref will eventually be called.
 465         * so, if this happened, something goes wrong.
 466         */
 467        if (bo->status & HMM_BO_MMAPED) {
 468                mutex_unlock(&bdev->rbtree_mutex);
 469                dev_dbg(atomisp_dev, "destroy bo which is MMAPED, do nothing\n");
 470                return;
 471        }
 472
 473        if (bo->status & HMM_BO_BINDED) {
 474                dev_warn(atomisp_dev, "the bo is still binded, unbind it first...\n");
 475                hmm_bo_unbind(bo);
 476        }
 477
 478        if (bo->status & HMM_BO_PAGE_ALLOCED) {
 479                dev_warn(atomisp_dev, "the pages is not freed, free pages first\n");
 480                hmm_bo_free_pages(bo);
 481        }
 482        if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
 483                dev_warn(atomisp_dev, "the vunmap is not done, do it...\n");
 484                hmm_bo_vunmap(bo);
 485        }
 486
 487        rb_erase(&bo->node, &bdev->allocated_rbtree);
 488
 489        prev_bo = list_entry(bo->list.prev, struct hmm_buffer_object, list);
 490        next_bo = list_entry(bo->list.next, struct hmm_buffer_object, list);
 491
 492        if (bo->list.prev != &bdev->entire_bo_list &&
 493            prev_bo->end == bo->start &&
 494            (prev_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
 495                __bo_take_off_handling(prev_bo);
 496                bo = __bo_merge(prev_bo, bo);
 497        }
 498
 499        if (bo->list.next != &bdev->entire_bo_list &&
 500            next_bo->start == bo->end &&
 501            (next_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
 502                __bo_take_off_handling(next_bo);
 503                bo = __bo_merge(bo, next_bo);
 504        }
 505
 506        __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
 507
 508        mutex_unlock(&bdev->rbtree_mutex);
 509        return;
 510}
 511
 512void hmm_bo_device_exit(struct hmm_bo_device *bdev)
 513{
 514        struct hmm_buffer_object *bo;
 515        unsigned long flags;
 516
 517        dev_dbg(atomisp_dev, "%s: entering!\n", __func__);
 518
 519        check_bodev_null_return_void(bdev);
 520
 521        /*
 522         * release all allocated bos even they a in use
 523         * and all bos will be merged into a big bo
 524         */
 525        while (!RB_EMPTY_ROOT(&bdev->allocated_rbtree))
 526                hmm_bo_release(
 527                    rbtree_node_to_hmm_bo(bdev->allocated_rbtree.rb_node));
 528
 529        dev_dbg(atomisp_dev, "%s: finished releasing all allocated bos!\n",
 530                __func__);
 531
 532        /* free all bos to release all ISP virtual memory */
 533        while (!list_empty(&bdev->entire_bo_list)) {
 534                bo = list_to_hmm_bo(bdev->entire_bo_list.next);
 535
 536                spin_lock_irqsave(&bdev->list_lock, flags);
 537                list_del(&bo->list);
 538                spin_unlock_irqrestore(&bdev->list_lock, flags);
 539
 540                kmem_cache_free(bdev->bo_cache, bo);
 541        }
 542
 543        dev_dbg(atomisp_dev, "%s: finished to free all bos!\n", __func__);
 544
 545        kmem_cache_destroy(bdev->bo_cache);
 546
 547        isp_mmu_exit(&bdev->mmu);
 548}
 549
 550int hmm_bo_device_inited(struct hmm_bo_device *bdev)
 551{
 552        check_bodev_null_return(bdev, -EINVAL);
 553
 554        return bdev->flag == HMM_BO_DEVICE_INITED;
 555}
 556
 557int hmm_bo_allocated(struct hmm_buffer_object *bo)
 558{
 559        check_bo_null_return(bo, 0);
 560
 561        return bo->status & HMM_BO_ALLOCED;
 562}
 563
 564struct hmm_buffer_object *hmm_bo_device_search_start(
 565    struct hmm_bo_device *bdev, ia_css_ptr vaddr)
 566{
 567        struct hmm_buffer_object *bo;
 568
 569        check_bodev_null_return(bdev, NULL);
 570
 571        mutex_lock(&bdev->rbtree_mutex);
 572        bo = __bo_search_by_addr(&bdev->allocated_rbtree, vaddr);
 573        if (!bo) {
 574                mutex_unlock(&bdev->rbtree_mutex);
 575                dev_err(atomisp_dev, "%s can not find bo with addr: 0x%x\n",
 576                        __func__, vaddr);
 577                return NULL;
 578        }
 579        mutex_unlock(&bdev->rbtree_mutex);
 580
 581        return bo;
 582}
 583
 584struct hmm_buffer_object *hmm_bo_device_search_in_range(
 585    struct hmm_bo_device *bdev, unsigned int vaddr)
 586{
 587        struct hmm_buffer_object *bo;
 588
 589        check_bodev_null_return(bdev, NULL);
 590
 591        mutex_lock(&bdev->rbtree_mutex);
 592        bo = __bo_search_by_addr_in_range(&bdev->allocated_rbtree, vaddr);
 593        if (!bo) {
 594                mutex_unlock(&bdev->rbtree_mutex);
 595                dev_err(atomisp_dev, "%s can not find bo contain addr: 0x%x\n",
 596                        __func__, vaddr);
 597                return NULL;
 598        }
 599        mutex_unlock(&bdev->rbtree_mutex);
 600
 601        return bo;
 602}
 603
 604struct hmm_buffer_object *hmm_bo_device_search_vmap_start(
 605    struct hmm_bo_device *bdev, const void *vaddr)
 606{
 607        struct list_head *pos;
 608        struct hmm_buffer_object *bo;
 609        unsigned long flags;
 610
 611        check_bodev_null_return(bdev, NULL);
 612
 613        spin_lock_irqsave(&bdev->list_lock, flags);
 614        list_for_each(pos, &bdev->entire_bo_list) {
 615                bo = list_to_hmm_bo(pos);
 616                /* pass bo which has no vm_node allocated */
 617                if ((bo->status & HMM_BO_MASK) == HMM_BO_FREE)
 618                        continue;
 619                if (bo->vmap_addr == vaddr)
 620                        goto found;
 621        }
 622        spin_unlock_irqrestore(&bdev->list_lock, flags);
 623        return NULL;
 624found:
 625        spin_unlock_irqrestore(&bdev->list_lock, flags);
 626        return bo;
 627}
 628
 629static void free_private_bo_pages(struct hmm_buffer_object *bo,
 630                                  struct hmm_pool *dypool,
 631                                  struct hmm_pool *repool,
 632                                  int free_pgnr)
 633{
 634        int i, ret;
 635
 636        for (i = 0; i < free_pgnr; i++) {
 637                switch (bo->page_obj[i].type) {
 638                case HMM_PAGE_TYPE_RESERVED:
 639                        if (repool->pops
 640                            && repool->pops->pool_free_pages) {
 641                                repool->pops->pool_free_pages(repool->pool_info,
 642                                                              &bo->page_obj[i]);
 643                                hmm_mem_stat.res_cnt--;
 644                        }
 645                        break;
 646                /*
 647                 * HMM_PAGE_TYPE_GENERAL indicates that pages are from system
 648                 * memory, so when free them, they should be put into dynamic
 649                 * pool.
 650                 */
 651                case HMM_PAGE_TYPE_DYNAMIC:
 652                case HMM_PAGE_TYPE_GENERAL:
 653                        if (dypool->pops
 654                            && dypool->pops->pool_inited
 655                            && dypool->pops->pool_inited(dypool->pool_info)) {
 656                                if (dypool->pops->pool_free_pages)
 657                                        dypool->pops->pool_free_pages(
 658                                            dypool->pool_info,
 659                                            &bo->page_obj[i]);
 660                                break;
 661                        }
 662
 663                        fallthrough;
 664
 665                /*
 666                 * if dynamic memory pool doesn't exist, need to free
 667                 * pages to system directly.
 668                 */
 669                default:
 670                        ret = set_pages_wb(bo->page_obj[i].page, 1);
 671                        if (ret)
 672                                dev_err(atomisp_dev,
 673                                        "set page to WB err ...ret = %d\n",
 674                                        ret);
 675                        /*
 676                        W/A: set_pages_wb seldom return value = -EFAULT
 677                        indicate that address of page is not in valid
 678                        range(0xffff880000000000~0xffffc7ffffffffff)
 679                        then, _free_pages would panic; Do not know why page
 680                        address be valid,it maybe memory corruption by lowmemory
 681                        */
 682                        if (!ret) {
 683                                __free_pages(bo->page_obj[i].page, 0);
 684                                hmm_mem_stat.sys_size--;
 685                        }
 686                        break;
 687                }
 688        }
 689
 690        return;
 691}
 692
 693/*Allocate pages which will be used only by ISP*/
 694static int alloc_private_pages(struct hmm_buffer_object *bo,
 695                               int from_highmem,
 696                               bool cached,
 697                               struct hmm_pool *dypool,
 698                               struct hmm_pool *repool)
 699{
 700        int ret;
 701        unsigned int pgnr, order, blk_pgnr, alloc_pgnr;
 702        struct page *pages;
 703        gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; /* REVISIT: need __GFP_FS too? */
 704        int i, j;
 705        int failure_number = 0;
 706        bool reduce_order = false;
 707        bool lack_mem = true;
 708
 709        if (from_highmem)
 710                gfp |= __GFP_HIGHMEM;
 711
 712        pgnr = bo->pgnr;
 713
 714        bo->page_obj = kmalloc_array(pgnr, sizeof(struct hmm_page_object),
 715                                     GFP_KERNEL);
 716        if (unlikely(!bo->page_obj))
 717                return -ENOMEM;
 718
 719        i = 0;
 720        alloc_pgnr = 0;
 721
 722        /*
 723         * get physical pages from dynamic pages pool.
 724         */
 725        if (dypool->pops && dypool->pops->pool_alloc_pages) {
 726                alloc_pgnr = dypool->pops->pool_alloc_pages(dypool->pool_info,
 727                             bo->page_obj, pgnr,
 728                             cached);
 729                hmm_mem_stat.dyc_size -= alloc_pgnr;
 730
 731                if (alloc_pgnr == pgnr)
 732                        return 0;
 733        }
 734
 735        pgnr -= alloc_pgnr;
 736        i += alloc_pgnr;
 737
 738        /*
 739         * get physical pages from reserved pages pool for atomisp.
 740         */
 741        if (repool->pops && repool->pops->pool_alloc_pages) {
 742                alloc_pgnr = repool->pops->pool_alloc_pages(repool->pool_info,
 743                             &bo->page_obj[i], pgnr,
 744                             cached);
 745                hmm_mem_stat.res_cnt += alloc_pgnr;
 746                if (alloc_pgnr == pgnr)
 747                        return 0;
 748        }
 749
 750        pgnr -= alloc_pgnr;
 751        i += alloc_pgnr;
 752
 753        while (pgnr) {
 754                order = nr_to_order_bottom(pgnr);
 755                /*
 756                 * if be short of memory, we will set order to 0
 757                 * everytime.
 758                 */
 759                if (lack_mem)
 760                        order = HMM_MIN_ORDER;
 761                else if (order > HMM_MAX_ORDER)
 762                        order = HMM_MAX_ORDER;
 763retry:
 764                /*
 765                 * When order > HMM_MIN_ORDER, for performance reasons we don't
 766                 * want alloc_pages() to sleep. In case it fails and fallbacks
 767                 * to HMM_MIN_ORDER or in case the requested order is originally
 768                 * the minimum value, we can allow alloc_pages() to sleep for
 769                 * robustness purpose.
 770                 *
 771                 * REVISIT: why __GFP_FS is necessary?
 772                 */
 773                if (order == HMM_MIN_ORDER) {
 774                        gfp &= ~GFP_NOWAIT;
 775                        gfp |= __GFP_RECLAIM | __GFP_FS;
 776                }
 777
 778                pages = alloc_pages(gfp, order);
 779                if (unlikely(!pages)) {
 780                        /*
 781                         * in low memory case, if allocation page fails,
 782                         * we turn to try if order=0 allocation could
 783                         * succeed. if order=0 fails too, that means there is
 784                         * no memory left.
 785                         */
 786                        if (order == HMM_MIN_ORDER) {
 787                                dev_err(atomisp_dev,
 788                                        "%s: cannot allocate pages\n",
 789                                        __func__);
 790                                goto cleanup;
 791                        }
 792                        order = HMM_MIN_ORDER;
 793                        failure_number++;
 794                        reduce_order = true;
 795                        /*
 796                         * if fail two times continuously, we think be short
 797                         * of memory now.
 798                         */
 799                        if (failure_number == 2) {
 800                                lack_mem = true;
 801                                failure_number = 0;
 802                        }
 803                        goto retry;
 804                } else {
 805                        blk_pgnr = order_to_nr(order);
 806
 807                        if (!cached) {
 808                                /*
 809                                 * set memory to uncacheable -- UC_MINUS
 810                                 */
 811                                ret = set_pages_uc(pages, blk_pgnr);
 812                                if (ret) {
 813                                        dev_err(atomisp_dev,
 814                                                "set page uncacheablefailed.\n");
 815
 816                                        __free_pages(pages, order);
 817
 818                                        goto cleanup;
 819                                }
 820                        }
 821
 822                        for (j = 0; j < blk_pgnr; j++) {
 823                                bo->page_obj[i].page = pages + j;
 824                                bo->page_obj[i++].type = HMM_PAGE_TYPE_GENERAL;
 825                        }
 826
 827                        pgnr -= blk_pgnr;
 828                        hmm_mem_stat.sys_size += blk_pgnr;
 829
 830                        /*
 831                         * if order is not reduced this time, clear
 832                         * failure_number.
 833                         */
 834                        if (reduce_order)
 835                                reduce_order = false;
 836                        else
 837                                failure_number = 0;
 838                }
 839        }
 840
 841        return 0;
 842cleanup:
 843        alloc_pgnr = i;
 844        free_private_bo_pages(bo, dypool, repool, alloc_pgnr);
 845
 846        kfree(bo->page_obj);
 847
 848        return -ENOMEM;
 849}
 850
 851static void free_private_pages(struct hmm_buffer_object *bo,
 852                               struct hmm_pool *dypool,
 853                               struct hmm_pool *repool)
 854{
 855        free_private_bo_pages(bo, dypool, repool, bo->pgnr);
 856
 857        kfree(bo->page_obj);
 858}
 859
 860static void free_user_pages(struct hmm_buffer_object *bo,
 861                            unsigned int page_nr)
 862{
 863        int i;
 864
 865        hmm_mem_stat.usr_size -= bo->pgnr;
 866
 867        if (bo->mem_type == HMM_BO_MEM_TYPE_PFN) {
 868                unpin_user_pages(bo->pages, page_nr);
 869        } else {
 870                for (i = 0; i < page_nr; i++)
 871                        put_page(bo->pages[i]);
 872        }
 873        kfree(bo->pages);
 874        kfree(bo->page_obj);
 875}
 876
 877/*
 878 * Convert user space virtual address into pages list
 879 */
 880static int alloc_user_pages(struct hmm_buffer_object *bo,
 881                            const void __user *userptr, bool cached)
 882{
 883        int page_nr;
 884        int i;
 885        struct vm_area_struct *vma;
 886        struct page **pages;
 887
 888        pages = kmalloc_array(bo->pgnr, sizeof(struct page *), GFP_KERNEL);
 889        if (unlikely(!pages))
 890                return -ENOMEM;
 891
 892        bo->page_obj = kmalloc_array(bo->pgnr, sizeof(struct hmm_page_object),
 893                                     GFP_KERNEL);
 894        if (unlikely(!bo->page_obj)) {
 895                kfree(pages);
 896                return -ENOMEM;
 897        }
 898
 899        mutex_unlock(&bo->mutex);
 900        mmap_read_lock(current->mm);
 901        vma = find_vma(current->mm, (unsigned long)userptr);
 902        mmap_read_unlock(current->mm);
 903        if (!vma) {
 904                dev_err(atomisp_dev, "find_vma failed\n");
 905                kfree(bo->page_obj);
 906                kfree(pages);
 907                mutex_lock(&bo->mutex);
 908                return -EFAULT;
 909        }
 910        mutex_lock(&bo->mutex);
 911        /*
 912         * Handle frame buffer allocated in other kerenl space driver
 913         * and map to user space
 914         */
 915
 916        userptr = untagged_addr(userptr);
 917
 918        bo->pages = pages;
 919
 920        if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
 921                page_nr = pin_user_pages((unsigned long)userptr, bo->pgnr,
 922                                         FOLL_LONGTERM | FOLL_WRITE,
 923                                         pages, NULL);
 924                bo->mem_type = HMM_BO_MEM_TYPE_PFN;
 925        } else {
 926                /*Handle frame buffer allocated in user space*/
 927                mutex_unlock(&bo->mutex);
 928                page_nr = get_user_pages_fast((unsigned long)userptr,
 929                                              (int)(bo->pgnr), 1, pages);
 930                mutex_lock(&bo->mutex);
 931                bo->mem_type = HMM_BO_MEM_TYPE_USER;
 932        }
 933
 934        dev_dbg(atomisp_dev, "%s: %d %s pages were allocated as 0x%08x\n",
 935                __func__,
 936                bo->pgnr,
 937                bo->mem_type == HMM_BO_MEM_TYPE_USER ? "user" : "pfn", page_nr);
 938
 939        hmm_mem_stat.usr_size += bo->pgnr;
 940
 941        /* can be written by caller, not forced */
 942        if (page_nr != bo->pgnr) {
 943                dev_err(atomisp_dev,
 944                        "get_user_pages err: bo->pgnr = %d, pgnr actually pinned = %d.\n",
 945                        bo->pgnr, page_nr);
 946                if (page_nr < 0)
 947                        page_nr = 0;
 948                goto out_of_mem;
 949        }
 950
 951        for (i = 0; i < bo->pgnr; i++) {
 952                bo->page_obj[i].page = pages[i];
 953                bo->page_obj[i].type = HMM_PAGE_TYPE_GENERAL;
 954        }
 955
 956        return 0;
 957
 958out_of_mem:
 959
 960        free_user_pages(bo, page_nr);
 961
 962        return -ENOMEM;
 963}
 964
 965/*
 966 * allocate/free physical pages for the bo.
 967 *
 968 * type indicate where are the pages from. currently we have 3 types
 969 * of memory: HMM_BO_PRIVATE, HMM_BO_USER, HMM_BO_SHARE.
 970 *
 971 * from_highmem is only valid when type is HMM_BO_PRIVATE, it will
 972 * try to alloc memory from highmem if from_highmem is set.
 973 *
 974 * userptr is only valid when type is HMM_BO_USER, it indicates
 975 * the start address from user space task.
 976 *
 977 * from_highmem and userptr will both be ignored when type is
 978 * HMM_BO_SHARE.
 979 */
 980int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
 981                       enum hmm_bo_type type, int from_highmem,
 982                       const void __user *userptr, bool cached)
 983{
 984        int ret = -EINVAL;
 985
 986        check_bo_null_return(bo, -EINVAL);
 987
 988        mutex_lock(&bo->mutex);
 989        check_bo_status_no_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
 990
 991        /*
 992         * TO DO:
 993         * add HMM_BO_USER type
 994         */
 995        if (type == HMM_BO_PRIVATE) {
 996                ret = alloc_private_pages(bo, from_highmem,
 997                                          cached, &dynamic_pool, &reserved_pool);
 998        } else if (type == HMM_BO_USER) {
 999                ret = alloc_user_pages(bo, userptr, cached);
1000        } else {
1001                dev_err(atomisp_dev, "invalid buffer type.\n");
1002                ret = -EINVAL;
1003        }
1004        if (ret)
1005                goto alloc_err;
1006
1007        bo->type = type;
1008
1009        bo->status |= HMM_BO_PAGE_ALLOCED;
1010
1011        mutex_unlock(&bo->mutex);
1012
1013        return 0;
1014
1015alloc_err:
1016        mutex_unlock(&bo->mutex);
1017        dev_err(atomisp_dev, "alloc pages err...\n");
1018        return ret;
1019status_err:
1020        mutex_unlock(&bo->mutex);
1021        dev_err(atomisp_dev,
1022                "buffer object has already page allocated.\n");
1023        return -EINVAL;
1024}
1025
1026/*
1027 * free physical pages of the bo.
1028 */
1029void hmm_bo_free_pages(struct hmm_buffer_object *bo)
1030{
1031        check_bo_null_return_void(bo);
1032
1033        mutex_lock(&bo->mutex);
1034
1035        check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err2);
1036
1037        /* clear the flag anyway. */
1038        bo->status &= (~HMM_BO_PAGE_ALLOCED);
1039
1040        if (bo->type == HMM_BO_PRIVATE)
1041                free_private_pages(bo, &dynamic_pool, &reserved_pool);
1042        else if (bo->type == HMM_BO_USER)
1043                free_user_pages(bo, bo->pgnr);
1044        else
1045                dev_err(atomisp_dev, "invalid buffer type.\n");
1046        mutex_unlock(&bo->mutex);
1047
1048        return;
1049
1050status_err2:
1051        mutex_unlock(&bo->mutex);
1052        dev_err(atomisp_dev,
1053                "buffer object not page allocated yet.\n");
1054}
1055
1056int hmm_bo_page_allocated(struct hmm_buffer_object *bo)
1057{
1058        check_bo_null_return(bo, 0);
1059
1060        return bo->status & HMM_BO_PAGE_ALLOCED;
1061}
1062
1063/*
1064 * get physical page info of the bo.
1065 */
1066int hmm_bo_get_page_info(struct hmm_buffer_object *bo,
1067                         struct hmm_page_object **page_obj, int *pgnr)
1068{
1069        check_bo_null_return(bo, -EINVAL);
1070
1071        mutex_lock(&bo->mutex);
1072
1073        check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1074
1075        *page_obj = bo->page_obj;
1076        *pgnr = bo->pgnr;
1077
1078        mutex_unlock(&bo->mutex);
1079
1080        return 0;
1081
1082status_err:
1083        dev_err(atomisp_dev,
1084                "buffer object not page allocated yet.\n");
1085        mutex_unlock(&bo->mutex);
1086        return -EINVAL;
1087}
1088
1089/*
1090 * bind the physical pages to a virtual address space.
1091 */
1092int hmm_bo_bind(struct hmm_buffer_object *bo)
1093{
1094        int ret;
1095        unsigned int virt;
1096        struct hmm_bo_device *bdev;
1097        unsigned int i;
1098
1099        check_bo_null_return(bo, -EINVAL);
1100
1101        mutex_lock(&bo->mutex);
1102
1103        check_bo_status_yes_goto(bo,
1104                                 HMM_BO_PAGE_ALLOCED | HMM_BO_ALLOCED,
1105                                 status_err1);
1106
1107        check_bo_status_no_goto(bo, HMM_BO_BINDED, status_err2);
1108
1109        bdev = bo->bdev;
1110
1111        virt = bo->start;
1112
1113        for (i = 0; i < bo->pgnr; i++) {
1114                ret =
1115                    isp_mmu_map(&bdev->mmu, virt,
1116                                page_to_phys(bo->page_obj[i].page), 1);
1117                if (ret)
1118                        goto map_err;
1119                virt += (1 << PAGE_SHIFT);
1120        }
1121
1122        /*
1123         * flush TBL here.
1124         *
1125         * theoretically, we donot need to flush TLB as we didnot change
1126         * any existed address mappings, but for Silicon Hive's MMU, its
1127         * really a bug here. I guess when fetching PTEs (page table entity)
1128         * to TLB, its MMU will fetch additional INVALID PTEs automatically
1129         * for performance issue. EX, we only set up 1 page address mapping,
1130         * meaning updating 1 PTE, but the MMU fetches 4 PTE at one time,
1131         * so the additional 3 PTEs are invalid.
1132         */
1133        if (bo->start != 0x0)
1134                isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
1135                                        (bo->pgnr << PAGE_SHIFT));
1136
1137        bo->status |= HMM_BO_BINDED;
1138
1139        mutex_unlock(&bo->mutex);
1140
1141        return 0;
1142
1143map_err:
1144        /* unbind the physical pages with related virtual address space */
1145        virt = bo->start;
1146        for ( ; i > 0; i--) {
1147                isp_mmu_unmap(&bdev->mmu, virt, 1);
1148                virt += pgnr_to_size(1);
1149        }
1150
1151        mutex_unlock(&bo->mutex);
1152        dev_err(atomisp_dev,
1153                "setup MMU address mapping failed.\n");
1154        return ret;
1155
1156status_err2:
1157        mutex_unlock(&bo->mutex);
1158        dev_err(atomisp_dev, "buffer object already binded.\n");
1159        return -EINVAL;
1160status_err1:
1161        mutex_unlock(&bo->mutex);
1162        dev_err(atomisp_dev,
1163                "buffer object vm_node or page not allocated.\n");
1164        return -EINVAL;
1165}
1166
1167/*
1168 * unbind the physical pages with related virtual address space.
1169 */
1170void hmm_bo_unbind(struct hmm_buffer_object *bo)
1171{
1172        unsigned int virt;
1173        struct hmm_bo_device *bdev;
1174        unsigned int i;
1175
1176        check_bo_null_return_void(bo);
1177
1178        mutex_lock(&bo->mutex);
1179
1180        check_bo_status_yes_goto(bo,
1181                                 HMM_BO_PAGE_ALLOCED |
1182                                 HMM_BO_ALLOCED |
1183                                 HMM_BO_BINDED, status_err);
1184
1185        bdev = bo->bdev;
1186
1187        virt = bo->start;
1188
1189        for (i = 0; i < bo->pgnr; i++) {
1190                isp_mmu_unmap(&bdev->mmu, virt, 1);
1191                virt += pgnr_to_size(1);
1192        }
1193
1194        /*
1195         * flush TLB as the address mapping has been removed and
1196         * related TLBs should be invalidated.
1197         */
1198        isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
1199                                (bo->pgnr << PAGE_SHIFT));
1200
1201        bo->status &= (~HMM_BO_BINDED);
1202
1203        mutex_unlock(&bo->mutex);
1204
1205        return;
1206
1207status_err:
1208        mutex_unlock(&bo->mutex);
1209        dev_err(atomisp_dev,
1210                "buffer vm or page not allocated or not binded yet.\n");
1211}
1212
1213int hmm_bo_binded(struct hmm_buffer_object *bo)
1214{
1215        int ret;
1216
1217        check_bo_null_return(bo, 0);
1218
1219        mutex_lock(&bo->mutex);
1220
1221        ret = bo->status & HMM_BO_BINDED;
1222
1223        mutex_unlock(&bo->mutex);
1224
1225        return ret;
1226}
1227
1228void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached)
1229{
1230        struct page **pages;
1231        int i;
1232
1233        check_bo_null_return(bo, NULL);
1234
1235        mutex_lock(&bo->mutex);
1236        if (((bo->status & HMM_BO_VMAPED) && !cached) ||
1237            ((bo->status & HMM_BO_VMAPED_CACHED) && cached)) {
1238                mutex_unlock(&bo->mutex);
1239                return bo->vmap_addr;
1240        }
1241
1242        /* cached status need to be changed, so vunmap first */
1243        if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
1244                vunmap(bo->vmap_addr);
1245                bo->vmap_addr = NULL;
1246                bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
1247        }
1248
1249        pages = kmalloc_array(bo->pgnr, sizeof(*pages), GFP_KERNEL);
1250        if (unlikely(!pages)) {
1251                mutex_unlock(&bo->mutex);
1252                return NULL;
1253        }
1254
1255        for (i = 0; i < bo->pgnr; i++)
1256                pages[i] = bo->page_obj[i].page;
1257
1258        bo->vmap_addr = vmap(pages, bo->pgnr, VM_MAP,
1259                             cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE);
1260        if (unlikely(!bo->vmap_addr)) {
1261                kfree(pages);
1262                mutex_unlock(&bo->mutex);
1263                dev_err(atomisp_dev, "vmap failed...\n");
1264                return NULL;
1265        }
1266        bo->status |= (cached ? HMM_BO_VMAPED_CACHED : HMM_BO_VMAPED);
1267
1268        kfree(pages);
1269
1270        mutex_unlock(&bo->mutex);
1271        return bo->vmap_addr;
1272}
1273
1274void hmm_bo_flush_vmap(struct hmm_buffer_object *bo)
1275{
1276        check_bo_null_return_void(bo);
1277
1278        mutex_lock(&bo->mutex);
1279        if (!(bo->status & HMM_BO_VMAPED_CACHED) || !bo->vmap_addr) {
1280                mutex_unlock(&bo->mutex);
1281                return;
1282        }
1283
1284        clflush_cache_range(bo->vmap_addr, bo->pgnr * PAGE_SIZE);
1285        mutex_unlock(&bo->mutex);
1286}
1287
1288void hmm_bo_vunmap(struct hmm_buffer_object *bo)
1289{
1290        check_bo_null_return_void(bo);
1291
1292        mutex_lock(&bo->mutex);
1293        if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
1294                vunmap(bo->vmap_addr);
1295                bo->vmap_addr = NULL;
1296                bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
1297        }
1298
1299        mutex_unlock(&bo->mutex);
1300        return;
1301}
1302
1303void hmm_bo_ref(struct hmm_buffer_object *bo)
1304{
1305        check_bo_null_return_void(bo);
1306
1307        kref_get(&bo->kref);
1308}
1309
1310static void kref_hmm_bo_release(struct kref *kref)
1311{
1312        if (!kref)
1313                return;
1314
1315        hmm_bo_release(kref_to_hmm_bo(kref));
1316}
1317
1318void hmm_bo_unref(struct hmm_buffer_object *bo)
1319{
1320        check_bo_null_return_void(bo);
1321
1322        kref_put(&bo->kref, kref_hmm_bo_release);
1323}
1324
1325static void hmm_bo_vm_open(struct vm_area_struct *vma)
1326{
1327        struct hmm_buffer_object *bo =
1328            (struct hmm_buffer_object *)vma->vm_private_data;
1329
1330        check_bo_null_return_void(bo);
1331
1332        hmm_bo_ref(bo);
1333
1334        mutex_lock(&bo->mutex);
1335
1336        bo->status |= HMM_BO_MMAPED;
1337
1338        bo->mmap_count++;
1339
1340        mutex_unlock(&bo->mutex);
1341}
1342
1343static void hmm_bo_vm_close(struct vm_area_struct *vma)
1344{
1345        struct hmm_buffer_object *bo =
1346            (struct hmm_buffer_object *)vma->vm_private_data;
1347
1348        check_bo_null_return_void(bo);
1349
1350        hmm_bo_unref(bo);
1351
1352        mutex_lock(&bo->mutex);
1353
1354        bo->mmap_count--;
1355
1356        if (!bo->mmap_count) {
1357                bo->status &= (~HMM_BO_MMAPED);
1358                vma->vm_private_data = NULL;
1359        }
1360
1361        mutex_unlock(&bo->mutex);
1362}
1363
1364static const struct vm_operations_struct hmm_bo_vm_ops = {
1365        .open = hmm_bo_vm_open,
1366        .close = hmm_bo_vm_close,
1367};
1368
1369/*
1370 * mmap the bo to user space.
1371 */
1372int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo)
1373{
1374        unsigned int start, end;
1375        unsigned int virt;
1376        unsigned int pgnr, i;
1377        unsigned int pfn;
1378
1379        check_bo_null_return(bo, -EINVAL);
1380
1381        check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1382
1383        pgnr = bo->pgnr;
1384        start = vma->vm_start;
1385        end = vma->vm_end;
1386
1387        /*
1388         * check vma's virtual address space size and buffer object's size.
1389         * must be the same.
1390         */
1391        if ((start + pgnr_to_size(pgnr)) != end) {
1392                dev_warn(atomisp_dev,
1393                         "vma's address space size not equal to buffer object's size");
1394                return -EINVAL;
1395        }
1396
1397        virt = vma->vm_start;
1398        for (i = 0; i < pgnr; i++) {
1399                pfn = page_to_pfn(bo->page_obj[i].page);
1400                if (remap_pfn_range(vma, virt, pfn, PAGE_SIZE, PAGE_SHARED)) {
1401                        dev_warn(atomisp_dev,
1402                                 "remap_pfn_range failed: virt = 0x%x, pfn = 0x%x, mapped_pgnr = %d\n",
1403                                 virt, pfn, 1);
1404                        return -EINVAL;
1405                }
1406                virt += PAGE_SIZE;
1407        }
1408
1409        vma->vm_private_data = bo;
1410
1411        vma->vm_ops = &hmm_bo_vm_ops;
1412        vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
1413
1414        /*
1415         * call hmm_bo_vm_open explicitly.
1416         */
1417        hmm_bo_vm_open(vma);
1418
1419        return 0;
1420
1421status_err:
1422        dev_err(atomisp_dev, "buffer page not allocated yet.\n");
1423        return -EINVAL;
1424}
1425