linux/drivers/staging/mali/DX910-SW-99002-r5p2-00rel0/driver/src/devicedrv/mali/linux/mali_memory_cow.c
<<
>>
Prefs
   1/*

   2 * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
   3 * 
   4 * This program is free software and is provided to you under the terms of the GNU General Public License version 2
   5 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
   6 * 
   7 * A copy of the licence is included with the program, and can also be obtained from Free Software
   8 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
   9 */
  10#include <linux/mm.h>
  11#include <linux/list.h>
  12#include <linux/mm_types.h>
  13#include <linux/fs.h>
  14#include <linux/dma-mapping.h>
  15#include <linux/highmem.h>
  16#include <asm/cacheflush.h>
  17#include <linux/sched.h>
  18#ifdef CONFIG_ARM
  19#include <asm/outercache.h>
  20#endif
  21#include <asm/dma-mapping.h>
  22
  23#include "mali_memory.h"
  24#include "mali_kernel_common.h"
  25#include "mali_uk_types.h"
  26#include "mali_osk.h"
  27#include "mali_kernel_linux.h"
  28#include "mali_memory_cow.h"
  29#include "mali_memory_block_alloc.h"
  30
  31/**

  32* allocate pages for COW backend and flush cache

  33*/
  34static struct page *mali_mem_cow_alloc_page(void)
  35
  36{
  37        mali_mem_os_mem os_mem;
  38        struct mali_page_node *node;
  39        struct page *new_page;
  40
  41        int ret = 0;
  42        /* allocate pages from os mem */
  43        ret = mali_mem_os_alloc_pages(&os_mem, _MALI_OSK_MALI_PAGE_SIZE);
  44
  45        if (ret) {
  46                return NULL;
  47        }
  48
  49        MALI_DEBUG_ASSERT(1 == os_mem.count);
  50
  51        node = _MALI_OSK_CONTAINER_OF(os_mem.pages.next, struct mali_page_node, list);
  52        new_page = node->page;
  53        node->page = NULL;
  54        list_del(&node->list);
  55        kfree(node);
  56
  57        return new_page;
  58}
  59
  60
  61static struct list_head *_mali_memory_cow_get_node_list(mali_mem_backend *target_bk,
  62                u32 target_offset,
  63                u32 target_size)
  64{
  65        MALI_DEBUG_ASSERT(MALI_MEM_OS == target_bk->type || MALI_MEM_COW == target_bk->type ||
  66                          MALI_MEM_BLOCK == target_bk->type);
  67
  68        if (MALI_MEM_OS == target_bk->type) {
  69                MALI_DEBUG_ASSERT(&target_bk->os_mem);
  70                MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->os_mem.count);
  71                return &target_bk->os_mem.pages;
  72        } else if (MALI_MEM_COW == target_bk->type) {
  73                MALI_DEBUG_ASSERT(&target_bk->cow_mem);
  74                MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->cow_mem.count);
  75                return  &target_bk->cow_mem.pages;
  76        } else if (MALI_MEM_BLOCK == target_bk->type) {
  77                MALI_DEBUG_ASSERT(&target_bk->block_mem);
  78                MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->block_mem.count);
  79                return  &target_bk->block_mem.pfns;
  80        }
  81
  82        return NULL;
  83}
  84
  85/**

  86* Do COW for os memory - support do COW for memory from bank memory

  87* The range_start/size can be zero, which means it will call cow_modify_range

  88* latter.

  89* This function allocate new pages for COW backend from os mem for a modified range

  90* It will keep the page which not in the modified range and Add ref to it

  91*

  92* @target_bk - target allocation's backend(the allocation need to do COW)

  93* @target_offset - the offset in target allocation to do COW(for support COW  a memory allocated from memory_bank, 4K align)

  94* @target_size - size of target allocation to do COW (for support memory bank)

  95* @backend -COW backend

  96* @range_start - offset of modified range (4K align)

  97* @range_size - size of modified range

  98*/
  99_mali_osk_errcode_t mali_memory_cow_os_memory(mali_mem_backend *target_bk,
 100                u32 target_offset,
 101                u32 target_size,
 102                mali_mem_backend *backend,
 103                u32 range_start,
 104                u32 range_size)
 105{
 106        mali_mem_cow *cow = &backend->cow_mem;
 107        struct mali_page_node *m_page, *m_tmp, *page_node;
 108        int target_page = 0;
 109        struct page *new_page;
 110        struct list_head *pages = NULL;
 111
 112        pages = _mali_memory_cow_get_node_list(target_bk, target_offset, target_size);
 113
 114        if (NULL == pages) {
 115                MALI_DEBUG_ASSERT(0);
 116                return _MALI_OSK_ERR_FAULT;
 117        }
 118
 119        MALI_DEBUG_ASSERT(0 == cow->count);
 120
 121        INIT_LIST_HEAD(&cow->pages);
 122        mutex_lock(&target_bk->mutex);
 123        list_for_each_entry_safe(m_page, m_tmp, pages, list) {
 124                /* add page from (target_offset,target_offset+size) to cow backend */
 125                if ((target_page >= target_offset / _MALI_OSK_MALI_PAGE_SIZE) &&
 126                    (target_page < ((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE))) {
 127
 128                        /* allocate a new page node, alway use OS memory for COW */
 129                        page_node = _mali_page_node_allocate(MALI_PAGE_NODE_OS);
 130
 131                        if (NULL == page_node) {
 132                                mutex_unlock(&target_bk->mutex);
 133                                goto error;
 134                        }
 135
 136                        INIT_LIST_HEAD(&page_node->list);
 137
 138                        /* check if in the modified range*/
 139                        if ((cow->count >= range_start / _MALI_OSK_MALI_PAGE_SIZE) &&
 140                            (cow->count < (range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE)) {
 141                                /* need to allocate a new page */
 142                                /* To simplify the case, All COW memory is allocated from os memory ?*/
 143                                new_page = mali_mem_cow_alloc_page();
 144
 145                                if (NULL == new_page) {
 146                                        kfree(page_node);
 147                                        mutex_unlock(&target_bk->mutex);
 148                                        goto error;
 149                                }
 150
 151                                _mali_page_node_add_page(page_node, new_page);
 152                        } else {
 153                                /*Add Block memory case*/
 154                                if (m_page->type != MALI_PAGE_NODE_BLOCK) {
 155                                        _mali_page_node_add_page(page_node, m_page->page);
 156                                } else {
 157                                        page_node->type = MALI_PAGE_NODE_BLOCK;
 158                                        _mali_page_node_add_block_item(page_node, m_page->blk_it);
 159                                }
 160
 161                                /* add ref to this page */
 162                                _mali_page_node_ref(m_page);
 163                        }
 164
 165                        /* add it to COW backend page list */
 166                        list_add_tail(&page_node->list, &cow->pages);
 167                        cow->count++;
 168                }
 169                target_page++;
 170        }
 171        mutex_unlock(&target_bk->mutex);
 172        return _MALI_OSK_ERR_OK;
 173error:
 174        mali_mem_cow_release(backend, MALI_FALSE);
 175        return _MALI_OSK_ERR_FAULT;
 176}
 177
 178
 179_mali_osk_errcode_t _mali_mem_put_page_node(mali_page_node *node)
 180{
 181        if (node->type == MALI_PAGE_NODE_OS) {
 182                return mali_mem_os_put_page(node->page);
 183        } else if (node->type == MALI_PAGE_NODE_BLOCK) {
 184                return mali_mem_block_unref_node(node);
 185        } else
 186                MALI_DEBUG_ASSERT(0);
 187        return _MALI_OSK_ERR_FAULT;
 188}
 189
 190
 191/**

 192* Modify a range of a exist COW backend

 193* @backend -COW backend

 194* @range_start - offset of modified range (4K align)

 195* @range_size - size of modified range(in byte)

 196*/
 197_mali_osk_errcode_t mali_memory_cow_modify_range(mali_mem_backend *backend,
 198                u32 range_start,
 199                u32 range_size)
 200{
 201        mali_mem_allocation *alloc = NULL;
 202        mali_mem_cow *cow = &backend->cow_mem;
 203        struct mali_page_node *m_page, *m_tmp;
 204        LIST_HEAD(pages);
 205        struct page *new_page;
 206        u32 count = 0;
 207        s32 change_pages_nr = 0;
 208
 209        if (range_start % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
 210        if (range_size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
 211
 212        alloc = backend->mali_allocation;
 213        MALI_DEBUG_ASSERT_POINTER(alloc);
 214
 215        MALI_DEBUG_ASSERT(MALI_MEM_COW == backend->type);
 216        MALI_DEBUG_ASSERT(((range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE) <= cow->count);
 217
 218        mutex_lock(&backend->mutex);
 219
 220        /* free pages*/
 221        list_for_each_entry_safe(m_page, m_tmp, &cow->pages, list) {
 222
 223                /* check if in the modified range*/
 224                if ((count >= range_start / _MALI_OSK_MALI_PAGE_SIZE) &&
 225                    (count < (range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE)) {
 226                        new_page = mali_mem_cow_alloc_page();
 227
 228                        if (NULL == new_page) {
 229                                goto error;
 230                        }
 231                        if (1 != _mali_page_node_get_ref_count(m_page))
 232                                change_pages_nr++;
 233                        /* unref old page*/
 234                        if (_mali_mem_put_page_node(m_page)) {
 235                                __free_page(new_page);
 236                                goto error;
 237                        }
 238                        /* add new page*/
 239                        /* always use OS for COW*/
 240                        m_page->type = MALI_PAGE_NODE_OS;
 241                        _mali_page_node_add_page(m_page, new_page);
 242
 243                }
 244                count++;
 245        }
 246        cow->change_pages_nr  = change_pages_nr;
 247        mutex_unlock(&backend->mutex);
 248
 249        MALI_DEBUG_ASSERT(MALI_MEM_COW == alloc->type);
 250
 251        /* ZAP cpu mapping(modified range) if have

 252         * those cpu mapping will be handled in page fault

 253         */
 254        if (0 != alloc->cpu_mapping.addr) {
 255                MALI_DEBUG_ASSERT(0 != alloc->backend_handle);
 256                MALI_DEBUG_ASSERT(NULL != alloc->cpu_mapping.vma);
 257                MALI_DEBUG_ASSERT(alloc->cpu_mapping.vma->vm_end - alloc->cpu_mapping.vma->vm_start >= range_size);
 258                zap_vma_ptes(alloc->cpu_mapping.vma, alloc->cpu_mapping.vma->vm_start + range_start, range_size);
 259        }
 260        return _MALI_OSK_ERR_OK;
 261error:
 262        mutex_unlock(&backend->mutex);
 263        return _MALI_OSK_ERR_FAULT;
 264
 265}
 266
 267
 268/**

 269* Allocate pages for COW backend

 270* @alloc  -allocation for COW allocation

 271* @target_bk - target allocation's backend(the allocation need to do COW)

 272* @target_offset - the offset in target allocation to do COW(for support COW  a memory allocated from memory_bank, 4K align)

 273* @target_size - size of target allocation to do COW (for support memory bank)(in byte)

 274* @backend -COW backend

 275* @range_start - offset of modified range (4K align)

 276* @range_size - size of modified range(in byte)

 277*/
 278_mali_osk_errcode_t mali_memory_do_cow(mali_mem_backend *target_bk,
 279                                       u32 target_offset,
 280                                       u32 target_size,
 281                                       mali_mem_backend *backend,
 282                                       u32 range_start,
 283                                       u32 range_size)
 284{
 285        struct mali_session_data *session = backend->mali_allocation->session;
 286
 287        MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
 288
 289        /* size & offset must be a multiple of the system page size */
 290        if (target_size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
 291        if (range_size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
 292        if (target_offset % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
 293        if (range_start % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
 294
 295        /* check backend type */
 296        MALI_DEBUG_ASSERT(MALI_MEM_COW == backend->type);
 297
 298        switch (target_bk->type) {
 299        case MALI_MEM_OS:
 300        case MALI_MEM_COW:
 301        case MALI_MEM_BLOCK:
 302                return mali_memory_cow_os_memory(target_bk, target_offset, target_size, backend, range_start, range_size);
 303                break;
 304        case MALI_MEM_EXTERNAL:
 305                /*NOT support yet*/
 306                MALI_DEBUG_ASSERT(0);
 307                break;
 308        case MALI_MEM_DMA_BUF:
 309                /*NOT support yet*/
 310                MALI_DEBUG_ASSERT(0);
 311                break;
 312        case MALI_MEM_UMP:
 313                /*NOT support yet*/
 314                MALI_DEBUG_ASSERT(0);
 315                break;
 316        default:
 317                /*Not support yet*/
 318                MALI_DEBUG_ASSERT(0);
 319                break;
 320        }
 321        return _MALI_OSK_ERR_OK;
 322}
 323
 324
 325/**

 326* Map COW backend memory to mali

 327* Support OS/BLOCK for mali_page_node

 328*/
 329int mali_mem_cow_mali_map(mali_mem_backend *mem_bkend, u32 range_start, u32 range_size)
 330{
 331        mali_mem_allocation *cow_alloc;
 332        struct mali_page_node *m_page;
 333        struct mali_session_data *session;
 334        struct mali_page_directory *pagedir;
 335        u32 virt, start;
 336
 337        cow_alloc = mem_bkend->mali_allocation;
 338        virt = cow_alloc->mali_vma_node.vm_node.start;
 339        start = virt;
 340
 341        MALI_DEBUG_ASSERT_POINTER(mem_bkend);
 342        MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);
 343        MALI_DEBUG_ASSERT_POINTER(cow_alloc);
 344
 345        session = cow_alloc->session;
 346        pagedir = session->page_directory;
 347        MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS);
 348        list_for_each_entry(m_page, &mem_bkend->cow_mem.pages, list) {
 349                if ((virt - start >= range_start) && (virt - start < range_start + range_size)) {
 350                        dma_addr_t phys = _mali_page_node_get_phy_addr(m_page);
 351#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
 352                        MALI_DEBUG_ASSERT(0 == (phys >> 32));
 353#endif
 354                        /* need to zap page table cache!*/
 355                        mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys,
 356                                                MALI_MMU_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
 357                }
 358                virt += MALI_MMU_PAGE_SIZE;
 359        }
 360        return 0;
 361}
 362
 363/**

 364* Map COW backend to cpu

 365* support OS/BLOCK memory

 366*/
 367int mali_mem_cow_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma)
 368{
 369        mali_mem_cow *cow = &mem_bkend->cow_mem;
 370        struct mali_page_node *m_page;
 371        int ret;
 372        unsigned long addr = vma->vm_start;
 373        MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_COW);
 374
 375        list_for_each_entry(m_page, &cow->pages, list) {
 376                /* We should use vm_insert_page, but it does a dcache

 377                 * flush which makes it way slower than remap_pfn_range or vm_insert_pfn.

 378                ret = vm_insert_page(vma, addr, page);

 379                */
 380                ret = vm_insert_pfn(vma, addr, _mali_page_node_get_pfn(m_page));
 381
 382                if (unlikely(0 != ret)) {
 383                        return ret;
 384                }
 385                addr += _MALI_OSK_MALI_PAGE_SIZE;
 386        }
 387
 388        return 0;
 389}
 390
 391/**

 392* Map some pages(COW backend) to CPU vma@vaddr

 393*@ mem_bkend - COW backend

 394*@ vma

 395*@ vaddr -start CPU vaddr mapped to

 396*@ num - max number of pages to map to CPU vaddr

 397*/
 398_mali_osk_errcode_t mali_mem_cow_cpu_map_pages_locked(mali_mem_backend *mem_bkend,
 399                struct vm_area_struct *vma,
 400                unsigned long vaddr,
 401                int num)
 402{
 403        mali_mem_cow *cow = &mem_bkend->cow_mem;
 404        struct mali_page_node *m_page;
 405        int ret;
 406        int offset;
 407        int count ;
 408        unsigned long vstart = vma->vm_start;
 409        count = 0;
 410        MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_COW);
 411        MALI_DEBUG_ASSERT(0 == vaddr % _MALI_OSK_MALI_PAGE_SIZE);
 412        MALI_DEBUG_ASSERT(0 == vstart % _MALI_OSK_MALI_PAGE_SIZE);
 413        offset = (vaddr - vstart) / _MALI_OSK_MALI_PAGE_SIZE;
 414
 415        list_for_each_entry(m_page, &cow->pages, list) {
 416                if ((count >= offset) && (count < offset + num)) {
 417                        ret = vm_insert_pfn(vma, vaddr, _mali_page_node_get_pfn(m_page));
 418
 419                        if (unlikely(0 != ret)) {
 420                                if (count == offset) {
 421                                        return _MALI_OSK_ERR_FAULT;
 422                                } else {
 423                                        /* ret is EBUSY when page isn't in modify range, but now it's OK*/
 424                                        return _MALI_OSK_ERR_OK;
 425                                }
 426                        }
 427                        vaddr += _MALI_OSK_MALI_PAGE_SIZE;
 428                }
 429                count++;
 430        }
 431        return _MALI_OSK_ERR_OK;
 432}
 433
 434/**

 435* Release COW backend memory

 436* free it directly(put_page--unref page), not put into pool

 437*/
 438u32 mali_mem_cow_release(mali_mem_backend *mem_bkend, mali_bool is_mali_mapped)
 439{
 440        mali_mem_allocation *alloc;
 441        u32 free_pages_nr = 0;
 442        MALI_DEBUG_ASSERT_POINTER(mem_bkend);
 443        MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);
 444        alloc = mem_bkend->mali_allocation;
 445        MALI_DEBUG_ASSERT_POINTER(alloc);
 446        /* Unmap the memory from the mali virtual address space. */
 447        if (MALI_TRUE == is_mali_mapped)
 448                mali_mem_os_mali_unmap(alloc);
 449        /* free cow backend list*/
 450        free_pages_nr = mali_mem_os_free(&mem_bkend->cow_mem.pages, mem_bkend->cow_mem.count, MALI_TRUE);
 451        free_pages_nr += mali_mem_block_free_list(&mem_bkend->cow_mem.pages);
 452        MALI_DEBUG_ASSERT(list_empty(&mem_bkend->cow_mem.pages));
 453
 454        MALI_DEBUG_PRINT(4, ("COW Mem free : allocated size = 0x%x, free size = 0x%x\n", mem_bkend->cow_mem.count * _MALI_OSK_MALI_PAGE_SIZE,
 455                             free_pages_nr * _MALI_OSK_MALI_PAGE_SIZE));
 456
 457        mem_bkend->cow_mem.count = 0;
 458        return free_pages_nr;
 459}
 460
 461
 462/*dst alway be OS memory*/
 463void _mali_mem_cow_copy_page(mali_page_node *src_node, struct page *new_page)
 464{
 465        void *dst, *src;
 466        MALI_DEBUG_ASSERT(src_node != NULL);
 467
 468        dma_unmap_page(&mali_platform_device->dev, page_private(new_page),
 469                       _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
 470        /* map it , and copy the content*/
 471
 472        dst = kmap_atomic(new_page);
 473
 474        if (src_node->type == MALI_PAGE_NODE_OS) {
 475                struct page *src_page = src_node->page;
 476                /*clear cache */
 477                dma_unmap_page(&mali_platform_device->dev, page_private(src_page),
 478                               _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
 479                src = kmap_atomic(src_page);
 480#ifdef CONFIG_ARM
 481                /* It seem have cache coherence issue if we use

 482                * kmap to map the src_page. we need to invlidate L2 cache here

 483                */
 484                outer_inv_range(page_to_phys(src_page), page_to_phys(src_page) + _MALI_OSK_MALI_PAGE_SIZE);
 485#else
 486                /* use sync for CPU for arm64 becasue no HIGMEM in aarch 64,

 487                * So this function can work

 488                */
 489                dma_sync_single_for_cpu(&mali_platform_device->dev, page_private(src_page),
 490                                        _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL);
 491#endif
 492                memcpy(dst, src , _MALI_OSK_MALI_PAGE_SIZE);
 493                kunmap_atomic(src);
 494                dma_map_page(&mali_platform_device->dev, src_page,
 495                             0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
 496        } else if (src_node->type == MALI_PAGE_NODE_BLOCK) {
 497                /*

 498                * use ioremap to map src for BLOCK memory

 499                */
 500                src = ioremap_nocache(_mali_page_node_get_phy_addr(src_node), _MALI_OSK_MALI_PAGE_SIZE);
 501                memcpy(dst, src , _MALI_OSK_MALI_PAGE_SIZE);
 502                iounmap(src);
 503        }
 504
 505        kunmap_atomic(dst);
 506        dma_map_page(&mali_platform_device->dev, new_page,
 507                     0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE);
 508
 509}
 510
 511
 512/*

 513* allocate page on demand when CPU access it,

 514* THis used in page fault handler

 515*/
 516_mali_osk_errcode_t mali_mem_cow_allocate_on_demand(mali_mem_backend *mem_bkend, u32 offset_page)
 517{
 518        struct page *new_page = NULL;
 519        int i = 0;
 520        struct mali_page_node *m_page, *found_node = NULL;
 521        struct  mali_session_data *session = NULL;
 522        mali_mem_cow *cow = &mem_bkend->cow_mem;
 523        MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);
 524        MALI_DEBUG_ASSERT(offset_page < mem_bkend->size / _MALI_OSK_MALI_PAGE_SIZE);
 525        MALI_DEBUG_PRINT(4, ("mali_mem_cow_allocate_on_demand !, offset_page =0x%x\n", offset_page));
 526
 527        /* allocate new page here */
 528        new_page = mali_mem_cow_alloc_page();
 529        if (!new_page)
 530                return _MALI_OSK_ERR_NOMEM;
 531
 532        /* find the page in backend*/
 533        list_for_each_entry(m_page, &cow->pages, list) {
 534                if (i == offset_page) {
 535                        found_node = m_page;
 536                        break;
 537                }
 538                i++;
 539        }
 540        MALI_DEBUG_ASSERT(found_node);
 541        if (NULL == found_node) {
 542                __free_page(new_page);
 543                return _MALI_OSK_ERR_ITEM_NOT_FOUND;
 544        }
 545        /* Copy the src page's content to new page */
 546        _mali_mem_cow_copy_page(found_node, new_page);
 547
 548        MALI_DEBUG_ASSERT_POINTER(mem_bkend->mali_allocation);
 549        session = mem_bkend->mali_allocation->session;
 550        MALI_DEBUG_ASSERT_POINTER(session);
 551        if (1 != _mali_page_node_get_ref_count(found_node)) {
 552                atomic_add(1, &session->mali_mem_allocated_pages);
 553                if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) {
 554                        session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE;
 555                }
 556                mem_bkend->cow_mem.change_pages_nr++;
 557        }
 558        if (_mali_mem_put_page_node(found_node)) {
 559                __free_page(new_page);
 560                return _MALI_OSK_ERR_NOMEM;
 561        }
 562        /* always use OS for COW*/
 563        found_node->type = MALI_PAGE_NODE_OS;
 564        _mali_page_node_add_page(found_node, new_page);
 565        /* map to GPU side*/
 566
 567        _mali_osk_mutex_wait(session->memory_lock);
 568        mali_mem_cow_mali_map(mem_bkend, offset_page * _MALI_OSK_MALI_PAGE_SIZE, _MALI_OSK_MALI_PAGE_SIZE);
 569        _mali_osk_mutex_signal(session->memory_lock);
 570        return _MALI_OSK_ERR_OK;
 571}
 572