static void __iomem *l2x0_base;
static const struct l2c_init_data *l2x0_data;
-static DEFINE_RAW_SPINLOCK(l2x0_lock);
static u32 l2x0_way_mask; /* Bitmask of active ways */
static u32 l2x0_size;
static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
static bool l2x0_bresp_disable;
static bool l2x0_flz_disable;
+#ifdef CONFIG_OPTEE
+struct l2x0_mutex {
+ arch_rwlock_t *mutex;
+ arch_rwlock_t nomutex;
+};
+
+static struct l2x0_mutex l2x0_lock;
+
+
+#define l2x0_spin_lock(lock, flags) \
+ do { \
+ flags = local_lock(lock); \
+ } while (0)
+
+#define l2x0_spin_unlock(lock, flags) local_unlock(lock, flags)
+
+#define l2x0_spin_lock_init(lock) spinlock_init(lock)
+
+
+static void spinlock_init(struct l2x0_mutex *spinlock)
+{
+ spinlock->mutex = NULL;
+ spinlock->nomutex.lock = 0;
+}
+
+static unsigned long local_lock(struct l2x0_mutex *spinlock)
+{
+ unsigned long flags;
+ arch_rwlock_t *lock = spinlock->mutex;
+
+ if (!lock)
+ lock = &spinlock->nomutex;
+
+ local_irq_save(flags);
+ preempt_disable();
+ arch_write_lock(lock);
+
+ return flags;
+}
+
+static void local_unlock(struct l2x0_mutex *spinlock, unsigned long flags)
+{
+ arch_rwlock_t *lock = spinlock->mutex;
+
+ if (!lock)
+ lock = &spinlock->nomutex;
+
+ arch_write_unlock(lock);
+ local_irq_restore(flags);
+ preempt_enable();
+}
+
+static int l2c_set_mutex(void *mutex)
+{
+ unsigned long flags;
+
+ if (l2x0_lock.mutex != NULL)
+ return -EINVAL;
+
+ /* Ensure the no mutex is released */
+ l2x0_spin_lock(&l2x0_lock, flags);
+ l2x0_lock.mutex = mutex;
+
+ arch_write_unlock(&l2x0_lock.nomutex);
+ local_irq_restore(flags);
+ preempt_enable();
+
+ return 0;
+}
+
+#else
+static DEFINE_RAW_SPINLOCK(l2x0_lock);
+
+#define l2x0_spin_lock(lock, flags) raw_spin_lock_irqsave(lock, flags)
+#define l2x0_spin_unlock(lock, flags) raw_spin_unlock_irqrestore(lock, flags)
+
+#define l2x0_spin_lock_init(lock)
+static int l2c_set_mutex(void *mutex)
+{
+ return -EINVAL;
+}
+
+#endif
+
/*
* Common code for all cache controllers.
*/
{
unsigned long flags;
- raw_spin_lock_irqsave(&l2x0_lock, flags);
+ l2x0_spin_lock(&l2x0_lock, flags);
__l2c_op_way(base + reg);
__l2c220_cache_sync(base);
- raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ l2x0_spin_unlock(&l2x0_lock, flags);
}
static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start,
unsigned long end, unsigned long flags)
{
- raw_spinlock_t *lock = &l2x0_lock;
-
while (start < end) {
unsigned long blk_end = start + min(end - start, 4096UL);
}
if (blk_end < end) {
- raw_spin_unlock_irqrestore(lock, flags);
- raw_spin_lock_irqsave(lock, flags);
+ l2x0_spin_unlock(&l2x0_lock, flags);
+ l2x0_spin_lock(&l2x0_lock, flags);
}
}
void __iomem *base = l2x0_base;
unsigned long flags;
- raw_spin_lock_irqsave(&l2x0_lock, flags);
+ l2x0_spin_lock(&l2x0_lock, flags);
if ((start | end) & (CACHE_LINE_SIZE - 1)) {
if (start & (CACHE_LINE_SIZE - 1)) {
start &= ~(CACHE_LINE_SIZE - 1);
start, end, flags);
l2c_wait_mask(base + L2X0_INV_LINE_PA, 1);
__l2c220_cache_sync(base);
- raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ l2x0_spin_unlock(&l2x0_lock, flags);
}
static void l2c220_clean_range(unsigned long start, unsigned long end)
return;
}
- raw_spin_lock_irqsave(&l2x0_lock, flags);
+ l2x0_spin_lock(&l2x0_lock, flags);
flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA,
start, end, flags);
l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
__l2c220_cache_sync(base);
- raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ l2x0_spin_unlock(&l2x0_lock, flags);
}
static void l2c220_flush_range(unsigned long start, unsigned long end)
return;
}
- raw_spin_lock_irqsave(&l2x0_lock, flags);
+ l2x0_spin_lock(&l2x0_lock, flags);
flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA,
start, end, flags);
l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
__l2c220_cache_sync(base);
- raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ l2x0_spin_unlock(&l2x0_lock, flags);
}
static void l2c220_flush_all(void)
{
unsigned long flags;
- raw_spin_lock_irqsave(&l2x0_lock, flags);
+ l2x0_spin_lock(&l2x0_lock, flags);
__l2c220_cache_sync(l2x0_base);
- raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ l2x0_spin_unlock(&l2x0_lock, flags);
}
static void l2c220_enable(void __iomem *base, unsigned num_lock)
unsigned long flags;
/* Erratum 588369 for both clean+invalidate operations */
- raw_spin_lock_irqsave(&l2x0_lock, flags);
+ l2x0_spin_lock(&l2x0_lock, flags);
l2c_set_debug(base, 0x03);
if (start & (CACHE_LINE_SIZE - 1)) {
}
l2c_set_debug(base, 0x00);
- raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ l2x0_spin_unlock(&l2x0_lock, flags);
}
__l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
static void l2c310_flush_range_erratum(unsigned long start, unsigned long end)
{
- raw_spinlock_t *lock = &l2x0_lock;
unsigned long flags;
void __iomem *base = l2x0_base;
- raw_spin_lock_irqsave(lock, flags);
+ l2x0_spin_lock(&l2x0_lock, flags);
while (start < end) {
unsigned long blk_end = start + min(end - start, 4096UL);
l2c_set_debug(base, 0x00);
if (blk_end < end) {
- raw_spin_unlock_irqrestore(lock, flags);
- raw_spin_lock_irqsave(lock, flags);
+ l2x0_spin_unlock(&l2x0_lock, flags);
+ l2x0_spin_lock(&l2x0_lock, flags);
}
}
- raw_spin_unlock_irqrestore(lock, flags);
+ l2x0_spin_unlock(&l2x0_lock, flags);
__l2c210_cache_sync(base);
}
void __iomem *base = l2x0_base;
unsigned long flags;
- raw_spin_lock_irqsave(&l2x0_lock, flags);
+ l2x0_spin_lock(&l2x0_lock, flags);
l2c_set_debug(base, 0x03);
__l2c_op_way(base + L2X0_CLEAN_INV_WAY);
l2c_set_debug(base, 0x00);
__l2c210_cache_sync(base);
- raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ l2x0_spin_unlock(&l2x0_lock, flags);
}
static void __init l2c310_save(void __iomem *base)
fns.sync = NULL;
}
+#ifdef CONFIG_OPTEE
+ fns.set_mutex = l2c_set_mutex;
+#endif
+
/*
* Check if l2x0 controller is already enabled. If we are booting
* in non-secure mode accessing the below registers will fault.
while (start < end) {
range_end = aurora_range_end(start, end);
- raw_spin_lock_irqsave(&l2x0_lock, flags);
+ l2x0_spin_lock(&l2x0_lock, flags);
writel_relaxed(start, base + AURORA_RANGE_BASE_ADDR_REG);
writel_relaxed(range_end - CACHE_LINE_SIZE, base + offset);
- raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ l2x0_spin_unlock(&l2x0_lock, flags);
writel_relaxed(0, base + AURORA_SYNC_REG);
start = range_end;
unsigned long flags;
/* clean all ways */
- raw_spin_lock_irqsave(&l2x0_lock, flags);
+ l2x0_spin_lock(&l2x0_lock, flags);
__l2c_op_way(base + L2X0_CLEAN_INV_WAY);
- raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ l2x0_spin_unlock(&l2x0_lock, flags);
writel_relaxed(0, base + AURORA_SYNC_REG);
}
void __iomem *base = l2x0_base;
unsigned long flags;
- raw_spin_lock_irqsave(&l2x0_lock, flags);
+ l2x0_spin_lock(&l2x0_lock, flags);
__l2c_op_way(base + L2X0_CLEAN_INV_WAY);
writel_relaxed(0, base + AURORA_SYNC_REG);
l2c_write_sec(0, base, L2X0_CTRL);
dsb(st);
- raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ l2x0_spin_unlock(&l2x0_lock, flags);
}
static void aurora_save(void __iomem *base)
else
cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
+ l2x0_spin_lock_init(&l2x0_lock);
+
return __l2c_init(data, aux_val, aux_mask, cache_id, nosync);
}
#endif
#include "optee_smc.h"
#include "shm_pool.h"
+#ifdef CONFIG_OUTER_CACHE
+#include <asm/outercache.h>
+#endif
+
#define DRIVER_NAME "optee"
#define OPTEE_SHM_NUM_PRIV_PAGES CONFIG_OPTEE_SHM_NUM_PRIV_PAGES
return rc;
}
+#ifdef CONFIG_OUTER_CACHE
+/**
+ * @brief Call the TEE to get a shared mutex between TEE and Linux to
+ * do Outer Cache maintenance
+ *
+ * @param[in] invoke_fn Reference to the SMC call function
+ *
+ * @retval 0 Success
+ * @retval -EINVAL Invalid value
+ * @retval -ENOMEM Not enought memory
+ */
+static int optee_outercache_mutex(optee_invoke_fn *invoke_fn)
+{
+ struct arm_smccc_res res;
+
+ int ret = -EINVAL;
+ void *vaddr = NULL;
+ phys_addr_t paddr = 0;
+
+ /* Get the Physical Address of the mutex allocated in the SHM */
+ invoke_fn(OPTEE_SMC_L2CC_MUTEX,
+ OPTEE_SMC_L2CC_MUTEX_GET_ADDR, 0, 0, 0, 0, 0, 0, &res);
+
+ if (res.a0 != OPTEE_SMC_RETURN_OK) {
+ pr_warn("no TZ l2cc mutex service supported\n");
+ goto out;
+ }
+
+ paddr = (unsigned long)reg_pair_to_ptr(res.a2, res.a3);
+ pr_debug("outer cache shared mutex paddr 0x%lx\n", (unsigned long)paddr);
+
+ /* Remap the Mutex into a cacheable area */
+ vaddr = memremap(paddr, sizeof(u32), MEMREMAP_WB);
+ if (vaddr == NULL) {
+ pr_warn("TZ l2cc mutex: ioremap failed\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ pr_debug("outer cache shared mutex vaddr %p\n", vaddr);
+
+ if (outer_mutex(vaddr)) {
+ pr_warn("TZ l2cc mutex: outer cache refused\n");
+ goto out;
+ }
+
+ invoke_fn(OPTEE_SMC_L2CC_MUTEX,
+ OPTEE_SMC_L2CC_MUTEX_ENABLE, 0, 0, 0, 0, 0, 0, &res);
+
+ if (res.a0 != OPTEE_SMC_RETURN_OK) {
+ pr_warn("TZ l2cc mutex disabled: TZ enable failed\n");
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ pr_info("teetz outer mutex: ret=%d pa=0x%lx va=0x%p\n",
+ ret, (unsigned long)paddr, vaddr);
+
+ return ret;
+}
+#endif
+
/* Simple wrapper functions to be able to use a function pointer */
static void optee_smccc_smc(unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3,
if (IS_ERR(pool))
return (void *)pool;
+#ifdef CONFIG_OUTER_CACHE
+
+ /* Try to get a Share Mutex to do L2 Cache maintenance */
+ if (of_find_compatible_node(NULL, NULL, "arm,pl310-cache")) {
+ rc = optee_outercache_mutex(invoke_fn);
+ if (rc)
+ goto err;
+ }
+
+#endif
+
optee = kzalloc(sizeof(*optee), GFP_KERNEL);
if (!optee) {
rc = -ENOMEM;