blob: d159993bd37a39adbb869236dd50e3392fdc33c5 [file] [log] [blame]
Googler9398cc32022-12-02 17:21:52 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * DMABUF CMA heap exporter
4 *
5 * Copyright (C) 2012, 2019, 2020 Linaro Ltd.
6 * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
7 *
8 * Also utilizing parts of Andrew Davis' SRAM heap:
9 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
10 * Andrew F. Davis <afd@ti.com>
11 */
12#include <linux/cma.h>
13#include <linux/dma-buf.h>
14#include <linux/dma-heap.h>
15#include <linux/dma-contiguous.h>
16#include <linux/err.h>
17#include <linux/highmem.h>
18#include <linux/io.h>
19#include <linux/mm.h>
20#include <linux/module.h>
21#include <linux/scatterlist.h>
22#include <linux/sched/signal.h>
23#include <linux/slab.h>
24#include <linux/vmalloc.h>
25
26
27struct cma_heap {
28 struct dma_heap *heap;
29 struct cma *cma;
30};
31
32struct cma_heap_buffer {
33 struct cma_heap *heap;
34 struct list_head attachments;
35 struct mutex lock;
36 unsigned long len;
37 struct page *cma_pages;
38 struct page **pages;
39 pgoff_t pagecount;
40 int vmap_cnt;
41 void *vaddr;
42};
43
44struct dma_heap_attachment {
45 struct device *dev;
46 struct sg_table table;
47 struct list_head list;
48 bool mapped;
49};
50
51static int cma_heap_attach(struct dma_buf *dmabuf,
52 struct dma_buf_attachment *attachment)
53{
54 struct cma_heap_buffer *buffer = dmabuf->priv;
55 struct dma_heap_attachment *a;
56 int ret;
57
58 a = kzalloc(sizeof(*a), GFP_KERNEL);
59 if (!a)
60 return -ENOMEM;
61
62 ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
63 buffer->pagecount, 0,
64 buffer->pagecount << PAGE_SHIFT,
65 GFP_KERNEL);
66 if (ret) {
67 kfree(a);
68 return ret;
69 }
70
71 a->dev = attachment->dev;
72 INIT_LIST_HEAD(&a->list);
73 a->mapped = false;
74
75 attachment->priv = a;
76
77 mutex_lock(&buffer->lock);
78 list_add(&a->list, &buffer->attachments);
79 mutex_unlock(&buffer->lock);
80
81 return 0;
82}
83
84static void cma_heap_detach(struct dma_buf *dmabuf,
85 struct dma_buf_attachment *attachment)
86{
87 struct cma_heap_buffer *buffer = dmabuf->priv;
88 struct dma_heap_attachment *a = attachment->priv;
89
90 mutex_lock(&buffer->lock);
91 list_del(&a->list);
92 mutex_unlock(&buffer->lock);
93
94 sg_free_table(&a->table);
95 kfree(a);
96}
97
98static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
99 enum dma_data_direction direction)
100{
101 struct dma_heap_attachment *a = attachment->priv;
102 struct sg_table *table = &a->table;
103 int ret;
104
105 ret = dma_map_sgtable(attachment->dev, table, direction, 0);
106 if (ret)
107 return ERR_PTR(-ENOMEM);
108 a->mapped = true;
109 return table;
110}
111
112static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
113 struct sg_table *table,
114 enum dma_data_direction direction)
115{
116 struct dma_heap_attachment *a = attachment->priv;
117
118 a->mapped = false;
119 dma_unmap_sgtable(attachment->dev, table, direction, 0);
120}
121
122static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
123 enum dma_data_direction direction)
124{
125 struct cma_heap_buffer *buffer = dmabuf->priv;
126 struct dma_heap_attachment *a;
127
128 mutex_lock(&buffer->lock);
129
130 if (buffer->vmap_cnt)
131 invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
132
133 list_for_each_entry(a, &buffer->attachments, list) {
134 if (!a->mapped)
135 continue;
136 dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
137 }
138 mutex_unlock(&buffer->lock);
139
140 return 0;
141}
142
143static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
144 enum dma_data_direction direction)
145{
146 struct cma_heap_buffer *buffer = dmabuf->priv;
147 struct dma_heap_attachment *a;
148
149 mutex_lock(&buffer->lock);
150
151 if (buffer->vmap_cnt)
152 flush_kernel_vmap_range(buffer->vaddr, buffer->len);
153
154 list_for_each_entry(a, &buffer->attachments, list) {
155 if (!a->mapped)
156 continue;
157 dma_sync_sgtable_for_device(a->dev, &a->table, direction);
158 }
159 mutex_unlock(&buffer->lock);
160
161 return 0;
162}
163
164static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
165{
166 struct vm_area_struct *vma = vmf->vma;
167 struct cma_heap_buffer *buffer = vma->vm_private_data;
168
169 if (vmf->pgoff > buffer->pagecount)
170 return VM_FAULT_SIGBUS;
171
172 vmf->page = buffer->pages[vmf->pgoff];
173 get_page(vmf->page);
174
175 return 0;
176}
177
178static const struct vm_operations_struct dma_heap_vm_ops = {
179 .fault = cma_heap_vm_fault,
180};
181
182static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
183{
184 struct cma_heap_buffer *buffer = dmabuf->priv;
185
186 if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
187 return -EINVAL;
188
189 vma->vm_ops = &dma_heap_vm_ops;
190 vma->vm_private_data = buffer;
191
192 return 0;
193}
194
195static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer)
196{
197 void *vaddr;
198
199 vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
200 if (!vaddr)
201 return ERR_PTR(-ENOMEM);
202
203 return vaddr;
204}
205
206static void *cma_heap_vmap(struct dma_buf *dmabuf)
207{
208 struct cma_heap_buffer *buffer = dmabuf->priv;
209 void *vaddr;
210
211 mutex_lock(&buffer->lock);
212 if (buffer->vmap_cnt) {
213 buffer->vmap_cnt++;
214 vaddr = buffer->vaddr;
215 goto out;
216 }
217
218 vaddr = cma_heap_do_vmap(buffer);
219 if (IS_ERR(vaddr))
220 goto out;
221
222 buffer->vaddr = vaddr;
223 buffer->vmap_cnt++;
224out:
225 mutex_unlock(&buffer->lock);
226
227 return vaddr;
228}
229
230static void cma_heap_vunmap(struct dma_buf *dmabuf, void *vaddr)
231{
232 struct cma_heap_buffer *buffer = dmabuf->priv;
233
234 mutex_lock(&buffer->lock);
235 if (!--buffer->vmap_cnt) {
236 vunmap(buffer->vaddr);
237 buffer->vaddr = NULL;
238 }
239 mutex_unlock(&buffer->lock);
240}
241
242static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
243{
244 struct cma_heap_buffer *buffer = dmabuf->priv;
245 struct cma_heap *cma_heap = buffer->heap;
246
247 if (buffer->vmap_cnt > 0) {
248 WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
249 vunmap(buffer->vaddr);
250 }
251
252 /* free page list */
253 kfree(buffer->pages);
254 /* release memory */
255 cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
256 kfree(buffer);
257}
258
259static const struct dma_buf_ops cma_heap_buf_ops = {
260 .attach = cma_heap_attach,
261 .detach = cma_heap_detach,
262 .map_dma_buf = cma_heap_map_dma_buf,
263 .unmap_dma_buf = cma_heap_unmap_dma_buf,
264 .begin_cpu_access = cma_heap_dma_buf_begin_cpu_access,
265 .end_cpu_access = cma_heap_dma_buf_end_cpu_access,
266 .mmap = cma_heap_mmap,
267 .vmap = cma_heap_vmap,
268 .vunmap = cma_heap_vunmap,
269 .release = cma_heap_dma_buf_release,
270};
271
272static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
273 unsigned long len,
274 unsigned long fd_flags,
275 unsigned long heap_flags)
276{
277 struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
278 struct cma_heap_buffer *buffer;
279 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
280 size_t size = PAGE_ALIGN(len);
281 pgoff_t pagecount = size >> PAGE_SHIFT;
282 unsigned long align = get_order(size);
283 struct page *cma_pages;
284 struct dma_buf *dmabuf;
285 int ret = -ENOMEM;
286 pgoff_t pg;
287
288 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
289 if (!buffer)
290 return ERR_PTR(-ENOMEM);
291
292 INIT_LIST_HEAD(&buffer->attachments);
293 mutex_init(&buffer->lock);
294 buffer->len = size;
295
296 if (align > CONFIG_CMA_ALIGNMENT)
297 align = CONFIG_CMA_ALIGNMENT;
298
299 cma_pages = cma_alloc(cma_heap->cma, pagecount, align, false);
300 if (!cma_pages)
301 goto free_buffer;
302
303 /* Clear the cma pages */
304 if (PageHighMem(cma_pages)) {
305 unsigned long nr_clear_pages = pagecount;
306 struct page *page = cma_pages;
307
308 while (nr_clear_pages > 0) {
309 void *vaddr = kmap_atomic(page);
310
311 memset(vaddr, 0, PAGE_SIZE);
312 kunmap_atomic(vaddr);
313 /*
314 * Avoid wasting time zeroing memory if the process
315 * has been killed by by SIGKILL
316 */
317 if (fatal_signal_pending(current))
318 goto free_cma;
319 page++;
320 nr_clear_pages--;
321 }
322 } else {
323 memset(page_address(cma_pages), 0, size);
324 }
325
326 buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL);
327 if (!buffer->pages) {
328 ret = -ENOMEM;
329 goto free_cma;
330 }
331
332 for (pg = 0; pg < pagecount; pg++)
333 buffer->pages[pg] = &cma_pages[pg];
334
335 buffer->cma_pages = cma_pages;
336 buffer->heap = cma_heap;
337 buffer->pagecount = pagecount;
338
339 /* create the dmabuf */
340 exp_info.exp_name = dma_heap_get_name(heap);
341 exp_info.ops = &cma_heap_buf_ops;
342 exp_info.size = buffer->len;
343 exp_info.flags = fd_flags;
344 exp_info.priv = buffer;
345 dmabuf = dma_buf_export(&exp_info);
346 if (IS_ERR(dmabuf)) {
347 ret = PTR_ERR(dmabuf);
348 goto free_pages;
349 }
350
351 return dmabuf;
352
353free_pages:
354 kfree(buffer->pages);
355free_cma:
356 cma_release(cma_heap->cma, cma_pages, pagecount);
357free_buffer:
358 kfree(buffer);
359
360 return ERR_PTR(ret);
361}
362
363static const struct dma_heap_ops cma_heap_ops = {
364 .allocate = cma_heap_allocate,
365};
366
367static int __add_cma_heap(struct cma *cma, void *data)
368{
369 struct cma_heap *cma_heap;
370 struct dma_heap_export_info exp_info;
371
372 cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
373 if (!cma_heap)
374 return -ENOMEM;
375 cma_heap->cma = cma;
376
377 exp_info.name = cma_get_name(cma);
378 exp_info.ops = &cma_heap_ops;
379 exp_info.priv = cma_heap;
380
381 cma_heap->heap = dma_heap_add(&exp_info);
382 if (IS_ERR(cma_heap->heap)) {
383 int ret = PTR_ERR(cma_heap->heap);
384
385 kfree(cma_heap);
386 return ret;
387 }
388
389 return 0;
390}
391
392static int add_default_cma_heap(void)
393{
394 struct cma *default_cma = dev_get_cma_area(NULL);
395 int ret = 0;
396
397 if (default_cma)
398 ret = __add_cma_heap(default_cma, NULL);
399
400 return ret;
401}
402module_init(add_default_cma_heap);
403MODULE_DESCRIPTION("DMA-BUF CMA Heap");
404MODULE_LICENSE("GPL v2");