| // SPDX-License-Identifier: GPL-2.0-only |
| /* |
| * Copyright (c) 2015, Linaro Limited |
| * Copyright (c) 2017, EPAM Systems |
| */ |
| #include <linux/device.h> |
| #include <linux/dma-buf.h> |
| #include <linux/genalloc.h> |
| #include <linux/slab.h> |
| #include <linux/tee_drv.h> |
| #include <linux/cma.h> |
| #include "optee_private.h" |
| #include "optee_smc.h" |
| #include "shm_pool.h" |
| #include <linux/dma-mapping.h> |
| |
| #define CMA_POOL_NAME "reserved" |
| static int shm_cma_it(struct cma *cma, void *data) |
| { |
| const char *name = cma_get_name(cma); |
| if (name && strcmp(name, CMA_POOL_NAME) == 0) { |
| *(struct cma **)data = cma; |
| } |
| return 0; |
| } |
| |
| static int pool_op_alloc(struct tee_shm_pool_mgr *poolm, |
| struct tee_shm *shm, size_t size) |
| { |
| unsigned int order = get_order(size); |
| struct page *page = NULL; |
| int rc = 0; |
| unsigned int nr_pages; |
| struct cma *cma = NULL; |
| |
| if (order >= MAX_ORDER) |
| cma_for_each_area(shm_cma_it, &cma); |
| |
| if (order >= MAX_ORDER && cma) { |
| nr_pages = ((size + PAGE_SIZE-1) >> PAGE_SHIFT); |
| page = cma_alloc(cma, nr_pages, 0, GFP_KERNEL | __GFP_ZERO); |
| shm->cma_page = page; |
| } |
| |
| if (!page) { |
| cma = NULL; |
| nr_pages = 1 << order; |
| page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); |
| } |
| |
| if (!page) |
| return -ENOMEM; |
| |
| shm->kaddr = page_address(page); |
| shm->paddr = page_to_phys(page); |
| shm->size = nr_pages << PAGE_SHIFT; |
| |
| /* |
| * Shared memory private to the OP-TEE driver doesn't need |
| * to be registered with OP-TEE. |
| */ |
| if (!(shm->flags & TEE_SHM_PRIV)) { |
| unsigned int i; |
| struct page **pages; |
| |
| pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL); |
| if (!pages) { |
| rc = -ENOMEM; |
| goto err; |
| } |
| |
| for (i = 0; i < nr_pages; i++) |
| pages[i] = page + i; |
| |
| shm->flags |= TEE_SHM_REGISTER; |
| rc = optee_shm_register(shm->ctx, shm, pages, nr_pages, |
| (unsigned long)shm->kaddr); |
| kfree(pages); |
| if (rc) |
| goto err; |
| } |
| return 0; |
| err: |
| if (shm->cma_page) |
| cma_release(cma, shm->cma_page, shm->size/PAGE_SIZE); |
| else |
| __free_pages(page, order); |
| return rc; |
| } |
| |
| static void pool_op_free(struct tee_shm_pool_mgr *poolm, |
| struct tee_shm *shm) |
| { |
| struct cma *cma = NULL; |
| |
| if (!(shm->flags & TEE_SHM_PRIV)) |
| optee_shm_unregister(shm->ctx, shm); |
| |
| if (shm->cma_page) { |
| cma_for_each_area(shm_cma_it, &cma); |
| BUG_ON(cma == NULL); |
| cma_release(cma, shm->cma_page, shm->size/PAGE_SIZE); |
| } |
| else |
| free_pages((unsigned long)shm->kaddr, get_order(shm->size)); |
| shm->kaddr = NULL; |
| } |
| |
| static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm) |
| { |
| kfree(poolm); |
| } |
| |
| static const struct tee_shm_pool_mgr_ops pool_ops = { |
| .alloc = pool_op_alloc, |
| .free = pool_op_free, |
| .destroy_poolmgr = pool_op_destroy_poolmgr, |
| }; |
| |
| /** |
| * optee_shm_pool_alloc_pages() - create page-based allocator pool |
| * |
| * This pool is used when OP-TEE supports dymanic SHM. In this case |
| * command buffers and such are allocated from kernel's own memory. |
| */ |
| struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void) |
| { |
| struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); |
| |
| if (!mgr) |
| return ERR_PTR(-ENOMEM); |
| |
| mgr->ops = &pool_ops; |
| |
| return mgr; |
| } |