staging/ion: add Allwinner ION "secure" heap **not for mainline**
authorEtienne Carriere <etienne.carriere@linaro.org>
Thu, 23 Mar 2017 13:02:08 +0000 (14:02 +0100)
committerNitin Garg <nitin.garg@nxp.com>
Tue, 20 Mar 2018 19:56:39 +0000 (14:56 -0500)
Dumped from:
  https://github.com/loboris/OrangePI-Kernel/tree/master/linux-3.4
  0cc8d855adb
  Author: Sunny <sunny@allwinnertech.com> for Allwinner.

Changes made on original "secure heap" implementation:
- minor coding style: fix includes, empty lines and overlong lines,
  indentation, comment layout.
- Original path modified the ion uapi. We do not attempt to modify
  uapi/ion.h. "secure" (or "domain") heaps are under ID
  ION_HEAP_TYPE_CUSTOM + 1 (legacy 'secure heap type' value).

Signed-off-by: Etienne Carriere <etienne.carriere@linaro.org>
Reviewed-by: Joakim Bech <joakim.bech@linaro.org>
(cherry picked from commit e31dd54997b050b6a6965d7cfbc795492256847c
 linaro repo https://github.com/linaro-swg/linux.git
 tag optee-v4.9-20171005)

drivers/staging/android/ion/Makefile
drivers/staging/android/ion/ion_secure_heap.c [new file with mode: 0644]

index 724f014..c54ff10 100644 (file)
@@ -1,6 +1,8 @@
 obj-$(CONFIG_ION) +=   ion.o ion-ioctl.o ion_heap.o \
                        ion_page_pool.o ion_system_heap.o \
                        ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o
+obj-$(CONFIG_ION) +=   ion_secure_heap.o
+
 obj-$(CONFIG_ION_TEST) += ion_test.o
 ifdef CONFIG_COMPAT
 obj-$(CONFIG_ION) += compat_ion.o
diff --git a/drivers/staging/android/ion/ion_secure_heap.c b/drivers/staging/android/ion/ion_secure_heap.c
new file mode 100644 (file)
index 0000000..57a75b3
--- /dev/null
@@ -0,0 +1,178 @@
+/*
+ * drivers/gpu/ion/ion_secure_heap.c
+ *
+ * Copyright (C) 2016-2017 Linaro, Inc. All rigths reserved.
+ * Copyright (C) Allwinner 2014
+ * Author: <sunny@allwinnertech.com> for Allwinner.
+ *
+ * Add secure heap support.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
+
+#include "ion.h"
+#include "ion_priv.h"
+
+#ifndef ION_HEAP_TYPE_SECURE
+#define ION_HEAP_TYPE_SECURE   (ION_HEAP_TYPE_CUSTOM + 1)
+#endif
+
+struct ion_secure_heap {
+       struct ion_heap heap;
+       struct gen_pool *pool;
+       ion_phys_addr_t base;
+       size_t          size;
+};
+
+ion_phys_addr_t ion_secure_allocate(struct ion_heap *heap,
+                                     unsigned long size,
+                                     unsigned long align)
+{
+       struct ion_secure_heap *secure_heap =
+               container_of(heap, struct ion_secure_heap, heap);
+       unsigned long offset = gen_pool_alloc(secure_heap->pool, size);
+
+       if (!offset) {
+               pr_err("%s(%d) err: alloc 0x%08x bytes failed\n",
+                                       __func__, __LINE__, (u32)size);
+               return ION_CARVEOUT_ALLOCATE_FAIL;
+       }
+       return offset;
+}
+
+void ion_secure_free(struct ion_heap *heap, ion_phys_addr_t addr,
+                      unsigned long size)
+{
+       struct ion_secure_heap *secure_heap =
+               container_of(heap, struct ion_secure_heap, heap);
+
+       if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
+               return;
+       gen_pool_free(secure_heap->pool, addr, size);
+}
+
+static int ion_secure_heap_phys(struct ion_heap *heap,
+                                 struct ion_buffer *buffer,
+                                 ion_phys_addr_t *addr, size_t *len)
+{
+       *addr = buffer->priv_phys;
+       *len = buffer->size;
+       return 0;
+}
+
+static int ion_secure_heap_allocate(struct ion_heap *heap,
+                                     struct ion_buffer *buffer,
+                                     unsigned long size, unsigned long align,
+                                     unsigned long flags)
+{
+       buffer->priv_phys = ion_secure_allocate(heap, size, align);
+       return buffer->priv_phys == ION_CARVEOUT_ALLOCATE_FAIL ? -ENOMEM : 0;
+}
+
+static void ion_secure_heap_free(struct ion_buffer *buffer)
+{
+       struct ion_heap *heap = buffer->heap;
+
+       ion_secure_free(heap, buffer->priv_phys, buffer->size);
+       buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL;
+}
+
+struct sg_table *ion_secure_heap_map_dma(struct ion_heap *heap,
+                                             struct ion_buffer *buffer)
+{
+       struct sg_table *table;
+       int ret;
+
+       table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+       if (!table)
+               return ERR_PTR(-ENOMEM);
+       ret = sg_alloc_table(table, 1, GFP_KERNEL);
+       if (ret) {
+               kfree(table);
+               return ERR_PTR(ret);
+       }
+       sg_set_page(table->sgl, phys_to_page(buffer->priv_phys), buffer->size,
+                   0);
+       return table;
+}
+
+void ion_secure_heap_unmap_dma(struct ion_heap *heap,
+                                struct ion_buffer *buffer)
+{
+       sg_free_table(buffer->sg_table);
+       kfree(buffer->sg_table);
+}
+
+int ion_secure_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
+                              struct vm_area_struct *vma)
+{
+       /*
+        * when user call ION_IOC_ALLOC not with ION_FLAG_CACHED, ion_mmap will
+        * change prog to pgprot_writecombine itself, so we donot need change to
+        * pgprot_writecombine here manually.
+        */
+       return remap_pfn_range(vma, vma->vm_start,
+                              __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
+                              vma->vm_end - vma->vm_start,
+                              vma->vm_page_prot);
+}
+
+static struct ion_heap_ops secure_heap_ops = {
+       .allocate = ion_secure_heap_allocate,
+       .free = ion_secure_heap_free,
+       .phys = ion_secure_heap_phys,
+       .map_dma = ion_secure_heap_map_dma,
+       .unmap_dma = ion_secure_heap_unmap_dma,
+       .map_user = ion_secure_heap_map_user,
+       .map_kernel = ion_heap_map_kernel,
+       .unmap_kernel = ion_heap_unmap_kernel,
+};
+
+struct ion_heap *ion_secure_heap_create(struct ion_platform_heap *heap_data)
+{
+       struct ion_secure_heap *secure_heap;
+
+       secure_heap = kzalloc(sizeof(struct ion_secure_heap), GFP_KERNEL);
+       if (!secure_heap)
+               return ERR_PTR(-ENOMEM);
+
+       secure_heap->pool = gen_pool_create(12, -1);
+       if (!secure_heap->pool) {
+               kfree(secure_heap);
+               return ERR_PTR(-ENOMEM);
+       }
+       secure_heap->base = heap_data->base;
+       secure_heap->size = heap_data->size;
+       gen_pool_add(secure_heap->pool, secure_heap->base, heap_data->size, -1);
+       secure_heap->heap.ops = &secure_heap_ops;
+       secure_heap->heap.type = ION_HEAP_TYPE_SECURE;
+
+       return &secure_heap->heap;
+}
+
+void ion_secure_heap_destroy(struct ion_heap *heap)
+{
+       struct ion_secure_heap *secure_heap =
+            container_of(heap, struct  ion_secure_heap, heap);
+
+       gen_pool_destroy(secure_heap->pool);
+       kfree(secure_heap);
+       secure_heap = NULL;
+}