blob: 6a5a6487729e6f21b3c9d0f69e0c438a4b0b1c47 [file] [log] [blame]
/*
* drivers/staging/android/ion/ion_berlin_heap.c
*
* Copyright (C) 2019 Synaptics Incorporated
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/genalloc.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include "ion.h"
#define ION_BERLIN_ALLOCATE_FAIL -1
#define BEST_FIT_ATTRIBUTE BIT(8)
struct ion_berlin_heap {
struct ion_heap heap;
struct gen_pool *pool;
phys_addr_t base;
struct device *dev;
void *priv;
};
struct ion_berlin_info {
int heap_num;
struct ion_platform_heap *heaps_data;
struct ion_heap **heaps;
};
static phys_addr_t ion_berlin_allocate(struct ion_heap *heap,
unsigned long size)
{
struct ion_berlin_heap *berlin_heap =
container_of(heap, struct ion_berlin_heap, heap);
unsigned long offset = gen_pool_alloc(berlin_heap->pool, size);
if (!offset)
return ION_BERLIN_ALLOCATE_FAIL;
return offset;
}
static void ion_berlin_free(struct ion_heap *heap, phys_addr_t addr,
unsigned long size)
{
struct ion_berlin_heap *berlin_heap =
container_of(heap, struct ion_berlin_heap, heap);
if (addr == ION_BERLIN_ALLOCATE_FAIL)
return;
gen_pool_free(berlin_heap->pool, addr, size);
}
static int ion_berlin_heap_allocate(struct ion_heap *heap,
struct ion_buffer *buffer,
unsigned long size,
unsigned long flags)
{
struct sg_table *table;
phys_addr_t paddr;
int ret;
table = kmalloc(sizeof(*table), GFP_KERNEL);
if (!table)
return -ENOMEM;
ret = sg_alloc_table(table, 1, GFP_KERNEL);
if (ret)
goto err_free;
paddr = ion_berlin_allocate(heap, size);
if (paddr == ION_BERLIN_ALLOCATE_FAIL) {
ret = -ENOMEM;
goto err_free_table;
}
sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
buffer->sg_table = table;
return 0;
err_free_table:
sg_free_table(table);
err_free:
kfree(table);
return ret;
}
static void ion_berlin_heap_free(struct ion_buffer *buffer)
{
struct ion_heap *heap = buffer->heap;
struct sg_table *table = buffer->sg_table;
struct page *page = sg_page(table->sgl);
phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
ion_heap_buffer_zero(buffer);
ion_berlin_free(heap, paddr, buffer->size);
sg_free_table(table);
kfree(table);
}
static int ion_berlin_dma_sync(struct ion_heap *heap,
struct ion_buffer *buffer,
struct ion_berlin_data *region)
{
struct ion_berlin_heap *berlin_heap =
container_of(heap, struct ion_berlin_heap, heap);
struct device * dev = berlin_heap->dev;
struct sg_table *table = buffer->sg_table;
unsigned long offset, region_len, op_len;
struct scatterlist *sg;
phys_addr_t phys_addr;
int i;
offset = region->offset;
region_len = region->len;
switch (region->cmd) {
case ION_INVALIDATE_CACHE:
{
mutex_lock(&buffer->lock);
for_each_sg(table->sgl, sg, table->nents, i) {
if (sg->length > offset) {
phys_addr = PFN_PHYS(page_to_pfn(sg_page(sg)));
op_len = sg->length - offset;
op_len = op_len > region_len ? region_len : op_len;
dma_sync_single_for_cpu(dev, phys_addr + offset,
op_len, DMA_FROM_DEVICE);
region_len -= op_len;
if (!region_len)
break;
offset = 0;
} else {
offset -= sg->length;
}
}
mutex_unlock(&buffer->lock);
break;
}
case ION_CLEAN_CACHE:
{
mutex_lock(&buffer->lock);
for_each_sg(table->sgl, sg, table->nents, i) {
if (sg->length > offset) {
phys_addr = PFN_PHYS(page_to_pfn(sg_page(sg)));
op_len = sg->length - offset;
op_len = op_len > region_len ? region_len : op_len;
dma_sync_single_for_device(dev, phys_addr + offset,
op_len, DMA_TO_DEVICE);
region_len -= op_len;
if (!region_len)
break;
offset = 0;
} else {
offset -= sg->length;
}
}
mutex_unlock(&buffer->lock);
break;
}
case ION_FLUSH_CACHE:
{
mutex_lock(&buffer->lock);
for_each_sg(table->sgl, sg, table->nents, i) {
if (sg->length > offset) {
phys_addr = PFN_PHYS(page_to_pfn(sg_page(sg)));
op_len = sg->length - offset;
op_len = op_len > region_len ? region_len : op_len;
dma_sync_single_for_device(dev, phys_addr + offset,
op_len, DMA_TO_DEVICE);
dma_sync_single_for_cpu(dev, phys_addr + offset,
op_len, DMA_FROM_DEVICE);
region_len -= op_len;
if (!region_len)
break;
offset = 0;
} else {
offset -= sg->length;
}
}
mutex_unlock(&buffer->lock);
break;
}
default:
pr_err("%s: Unknown cache command %d\n",
__func__, region->cmd);
return -EINVAL;
}
return 0;
}
static int ion_berlin_get_phy(struct ion_heap *heap,
struct ion_buffer *buffer,
struct ion_berlin_data *region)
{
struct sg_table *table;
struct page *page;
table = buffer->sg_table;
page = sg_page(table->sgl);
region->addr = (unsigned long)PFN_PHYS(page_to_pfn(page));
region->len = buffer->size;
return 0;
}
static int ion_berlin_custom_ioctl(struct ion_heap *heap,
struct ion_buffer *buffer,
struct ion_custom_data *custom_data)
{
int ret = -EINVAL;
struct ion_berlin_data region;
switch (custom_data->cmd) {
case ION_BERLIN_SYNC:
if (copy_from_user(&region, (void __user *)custom_data->arg,
sizeof(region)))
return -EFAULT;
ret = ion_berlin_dma_sync(heap, buffer, &region);
break;
case ION_BERLIN_PHYS:
memset(&region, 0, sizeof(region));
ret = ion_berlin_get_phy(heap, buffer, &region);
if (!ret)
ret = copy_to_user((void __user *)custom_data->arg, &region,
sizeof(region));
break;
default:
pr_err("%s: Unknown ioctl %d\n", __FUNCTION__, custom_data->cmd);
return -EINVAL;
}
return ret;
}
static int ion_berlin_heap_get_attr(struct ion_heap *heap, unsigned int *attr)
{
struct ion_berlin_heap *berlin_heap =
container_of(heap, struct ion_berlin_heap, heap);
struct ion_platform_heap *heap_data =
(struct ion_platform_heap * )berlin_heap->priv;
int ret;
if (heap_data && attr) {
*attr = heap_data->attribute[1];
ret = 0;
} else
ret = -ENOMEM;
return ret;
}
static struct ion_heap_ops berlin_heap_ops = {
.allocate = ion_berlin_heap_allocate,
.free = ion_berlin_heap_free,
.map_user = ion_heap_map_user,
.map_kernel = ion_heap_map_kernel,
.unmap_kernel = ion_heap_unmap_kernel,
.custom_ioctl = ion_berlin_custom_ioctl,
.get_heap_attr = ion_berlin_heap_get_attr,
};
static int ion_berlin_heap_debug_show(struct ion_heap *heap, struct seq_file *s, void *unused)
{
struct ion_berlin_heap *berlin_heap =
container_of(heap, struct ion_berlin_heap, heap);
struct ion_platform_heap *heap_data =
(struct ion_platform_heap * )berlin_heap->priv;
seq_printf(s, "heap %s: base: 0x%08llx, size 0x%lx, name %s, attr: [0x%08x 0x%08x]\n",
heap->name, heap_data->base, heap_data->size, heap_data->name,
heap_data->attribute[0], heap_data->attribute[1]);
return 0;
}
struct ion_heap *ion_berlin_heap_create(struct ion_platform_heap *heap_data,
struct device *dev)
{
struct ion_berlin_heap *berlin_heap;
char *heap_name = "ion_berlin_heap";
berlin_heap = kzalloc(sizeof(*berlin_heap), GFP_KERNEL);
if (!berlin_heap)
return ERR_PTR(-ENOMEM);
berlin_heap->pool = gen_pool_create(PAGE_SHIFT, -1);
if (!berlin_heap->pool) {
kfree(berlin_heap);
return ERR_PTR(-ENOMEM);
}
if (heap_data->attribute[0] & BEST_FIT_ATTRIBUTE)
gen_pool_set_algo(berlin_heap->pool, gen_pool_best_fit, NULL);
berlin_heap->dev = dev;
berlin_heap->base = heap_data->base;
gen_pool_add(berlin_heap->pool, berlin_heap->base, heap_data->size,
-1);
berlin_heap->heap.ops = &berlin_heap_ops;
berlin_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
berlin_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
berlin_heap->heap.name = kmalloc(strlen(heap_name) + 16, GFP_KERNEL);
if (!berlin_heap->heap.name) {
gen_pool_destroy(berlin_heap->pool);
kfree(berlin_heap);
return ERR_PTR(-ENOMEM);
}
sprintf((char *)berlin_heap->heap.name, "%s_%08x", heap_name, (u32)heap_data->base);
berlin_heap->priv = (void *)heap_data;
return &berlin_heap->heap;
}
static void ion_berlin_heap_destroy(struct ion_heap *heap)
{
struct ion_berlin_heap *berlin_heap =
container_of(heap, struct ion_berlin_heap, heap);
gen_pool_destroy(berlin_heap->pool);
kfree(heap->name);
kfree(berlin_heap);
}
static void *ion_berlin_malloc_info(struct device *dev, int heap_num)
{
int size = 0;
unsigned char *p = NULL;
struct ion_berlin_info *info = NULL;
int info_len = sizeof(*info);
int heaps_data_len = sizeof(struct ion_platform_heap);
int heaps_len = sizeof(struct ion_heap *);
size = info_len + heaps_data_len * heap_num
+ heaps_len * heap_num;
p = devm_kzalloc(dev, size, GFP_KERNEL);
if (!p) {
pr_err("ion_berlin_malloc_info fail\n");
return p;
}
info = (struct ion_berlin_info *)(p);
info->heap_num = heap_num;
info->heaps_data = (struct ion_platform_heap *)(p + info_len);
info->heaps = (struct ion_heap **)(p + info_len +
heaps_data_len * heap_num);
return (void *)(p);
}
static int ion_berlin_get_info(struct device *dev,
struct ion_berlin_info **info)
{
int i, res = -ENODEV;
int heap_num = 0;
struct device_node *np;
struct resource r;
struct ion_berlin_info *tmp_info;
int attri_num = 0;
unsigned int *attributes;
np = dev->of_node;
if (!np)
goto err_node;
res = of_property_read_u32(np, "pool-num", &heap_num);
if (res)
goto err_node;
tmp_info = (struct ion_berlin_info *)ion_berlin_malloc_info(dev, heap_num);
if (!tmp_info) {
res = -ENOMEM;
goto err_node;
}
attributes = (unsigned int *)devm_kzalloc(dev, heap_num * sizeof(unsigned int), GFP_KERNEL);
if (!attributes) {
res = -ENOMEM;
goto err_node;
}
res = of_property_read_u32(np, "attributes-num-per-pool",
&attri_num);
if (res)
goto err_node;
res = of_property_read_u32_array(np, "pool-attributes",
attributes, heap_num * attri_num);
if (res) {
pr_err("get mrvl,ion-pool-attributes fail\n");
goto err_node;
}
for (i = 0; i < heap_num; i++) {
res = of_address_to_resource(np, i, &r);
if (res)
goto err_node;
(tmp_info->heaps_data + i)->id = i;
(tmp_info->heaps_data + i)->base = r.start;
(tmp_info->heaps_data + i)->size = resource_size(&r);
(tmp_info->heaps_data + i)->name = r.name;
(tmp_info->heaps_data + i)->attribute[0] = attributes[i*attri_num];
(tmp_info->heaps_data + i)->attribute[1] = attributes[i*attri_num + 1];
}
*info = tmp_info;
return 0;
err_node:
pr_err("ion_berlin_get_info failed (%d)\n", res);
return res;
}
static int ion_berlin_probe(struct platform_device *pdev)
{
int res = 0;
int i = 0;
struct ion_berlin_info *info;
res = ion_berlin_get_info(&pdev->dev, &info);
if (res != 0)
return res;
for (i = 0; i < info->heap_num; i++) {
struct ion_platform_heap *heap_data = (info->heaps_data + i);
info->heaps[i] = ion_berlin_heap_create(heap_data, &pdev->dev);
if (IS_ERR_OR_NULL(info->heaps[i])) {
res = PTR_ERR(info->heaps[i]);
info->heaps[i] = NULL;
goto err_create_heap;
}
info->heaps[i]->debug_show = ion_berlin_heap_debug_show;
ion_device_add_heap(info->heaps[i]);
}
platform_set_drvdata(pdev, info);
dev_info(&pdev->dev, "ion_berlin_probe %d heaps done\n", info->heap_num);
return 0;
err_create_heap:
for (i = 0; i < info->heap_num; i++) {
if (info->heaps[i])
ion_berlin_heap_destroy(info->heaps[i]);
}
return res;
}
static int ion_berlin_remove(struct platform_device *pdev)
{
int i = 0;
struct ion_berlin_info *info;
info = (struct ion_berlin_info *)dev_get_drvdata(&pdev->dev);
for (i = 0; i < info->heap_num; i++) {
if (info->heaps[i])
ion_berlin_heap_destroy(info->heaps[i]);
}
return 0;
}
static const struct of_device_id ion_berlin_heaps_of_match[] = {
{ .compatible = "syna,ion-berlin-heaps", },
{},
};
static struct platform_driver ion_berlin_driver = {
.probe = ion_berlin_probe,
.remove = ion_berlin_remove,
.driver = {
.name = "ion-berlin",
.of_match_table = ion_berlin_heaps_of_match,
},
};
static int __init ion_berlin_init(void)
{
return platform_driver_register(&ion_berlin_driver);
}
device_initcall(ion_berlin_init);
MODULE_LICENSE("GPL v2");