ARM: add new non-shareable ioremap
authorPan Jiafei <Jiafei.Pan@nxp.com>
Thu, 17 Mar 2016 02:01:03 +0000 (02:01 +0000)
committerDong Aisheng <aisheng.dong@nxp.com>
Mon, 14 Dec 2020 02:34:21 +0000 (10:34 +0800)
Signed-off-by: Pan Jiafei <Jiafei.Pan@nxp.com>
Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
arch/arm/include/asm/io.h
arch/arm/include/asm/mach/map.h
arch/arm/mm/ioremap.c
arch/arm/mm/mmu.c

index ab2b654..cef06f4 100644 (file)
@@ -123,6 +123,7 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
 #define MT_DEVICE_NONSHARED    1
 #define MT_DEVICE_CACHED       2
 #define MT_DEVICE_WC           3
+#define MT_MEMORY_RW_NS                4
 /*
  * types 4 onwards can be found in asm/mach/map.h and are undefined
  * for ioremap
@@ -395,6 +396,8 @@ void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
 #define ioremap_wc ioremap_wc
 #define ioremap_wt ioremap_wc
 
+void __iomem *ioremap_cache_ns(resource_size_t res_cookie, size_t size);
+
 void iounmap(volatile void __iomem *iomem_cookie);
 #define iounmap iounmap
 
index 9228255..05e1af2 100644 (file)
@@ -18,9 +18,9 @@ struct map_desc {
        unsigned int type;
 };
 
-/* types 0-3 are defined in asm/io.h */
+/* types 0-4 are defined in asm/io.h */
 enum {
-       MT_UNCACHED = 4,
+       MT_UNCACHED = 5,
        MT_CACHECLEAN,
        MT_MINICLEAN,
        MT_LOW_VECTORS,
index 000e821..2f2a954 100644 (file)
@@ -378,6 +378,13 @@ void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
 }
 EXPORT_SYMBOL(ioremap_wc);
 
+void __iomem *ioremap_cache_ns(resource_size_t res_cookie, size_t size)
+{
+       return arch_ioremap_caller(res_cookie, size, MT_MEMORY_RW_NS,
+                                  __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_cache_ns);
+
 /*
  * Remap an arbitrary physical address space into the kernel virtual
  * address space as memory. Needed when the kernel wants to execute
index ab69250..23c6beb 100644 (file)
@@ -292,6 +292,13 @@ static struct mem_type mem_types[] __ro_after_init = {
                .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
                .domain    = DOMAIN_KERNEL,
        },
+       [MT_MEMORY_RW_NS] = {
+               .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+                            L_PTE_XN,
+               .prot_l1   = PMD_TYPE_TABLE,
+               .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_XN,
+               .domain    = DOMAIN_KERNEL,
+       },
        [MT_ROM] = {
                .prot_sect = PMD_TYPE_SECT,
                .domain    = DOMAIN_KERNEL,
@@ -619,6 +626,7 @@ static void __init build_mem_type_table(void)
        }
        kern_pgprot |= PTE_EXT_AF;
        vecs_pgprot |= PTE_EXT_AF;
+       mem_types[MT_MEMORY_RW_NS].prot_pte |= PTE_EXT_AF | cp->pte;
 
        /*
         * Set PXN for user mappings
@@ -644,6 +652,7 @@ static void __init build_mem_type_table(void)
        mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
        mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
        mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
+       mem_types[MT_MEMORY_RW_NS].prot_sect |= ecc_mask | cp->pmd;
        mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
        mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
        mem_types[MT_ROM].prot_sect |= cp->pmd;