MLK-16083 imx8qm/qxp: Setup MMU pagetables dynamically with owned memory regions
authorYe Li <ye.li@nxp.com>
Wed, 26 Jul 2017 01:41:13 +0000 (20:41 -0500)
committerJason Liu <jason.hui.liu@nxp.com>
Thu, 2 Nov 2017 18:37:04 +0000 (02:37 +0800)
Since the memory regions assigned to u-boot partition is dynamically set by
ATF and SCD. We have to setup MMU pagetables according to the owned memory
regions, not set it for unassigned memory.

Signed-off-by: Ye Li <ye.li@nxp.com>
arch/arm/cpu/armv8/imx8/cpu.c

index f855596..ba9733f 100644 (file)
 
 DECLARE_GLOBAL_DATA_PTR;
 
-static struct mm_region imx8_mem_map[] = {
-       {
-               .virt = 0x0UL,
-               .phys = 0x0UL,
-               .size = 0x2000000UL,
-               .attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) |
-                        PTE_BLOCK_OUTER_SHARE
-       }, {
-               .virt = 0x2000000UL,
-               .phys = 0x2000000UL,
-               .size = 0x7E000000UL,
-               .attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
-                        PTE_BLOCK_NON_SHARE |
-                        PTE_BLOCK_PXN | PTE_BLOCK_UXN
-       }, {
-               .virt = 0x80000000UL,
-               .phys = 0x80000000UL,
-               .size = 0x80000000UL,
-               .attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) |
-                        PTE_BLOCK_OUTER_SHARE
-       }, {
-               .virt = 0x100000000UL,
-               .phys = 0x100000000UL,
-               .size = 0x700000000UL,
-               .attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
-                        PTE_BLOCK_NON_SHARE |
-                        PTE_BLOCK_PXN | PTE_BLOCK_UXN
-       }, {
-               .virt = 0x880000000UL,
-               .phys = 0x880000000UL,
-               .size = 0x780000000UL,
-               .attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) |
-                        PTE_BLOCK_OUTER_SHARE
-       }, {
-               /* List terminator */
-               0,
-       }
-};
-struct mm_region *mem_map = imx8_mem_map;
-
 u32 get_cpu_rev(void)
 {
        sc_ipc_t ipcHndl;
@@ -786,9 +746,6 @@ static int get_owned_memreg(sc_rm_mr_t mr, sc_faddr_t *addr_start, sc_faddr_t *a
                                return -EINVAL;
                        } else {
                                debug("0x%llx -- 0x%llx\n", start, end);
-                               start = roundup(start, MEMSTART_ALIGNMENT);
-                               if (start > end) /* Too small memory region, not use it */
-                                       return -EINVAL;
 
                                *addr_start = start;
                                *addr_end = end;
@@ -810,6 +767,10 @@ phys_size_t get_effective_memsize(void)
        for (mr = 0; mr < 64; mr++) {
                err = get_owned_memreg(mr, &start, &end);
                if (!err) {
+                       start = roundup(start, MEMSTART_ALIGNMENT);
+                       if (start > end) /* Too small memory region, not use it */
+                               continue;
+
                        /* Find the memory region runs the u-boot */
                        if (start >= PHYS_SDRAM_1 && start <= ((sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE)
                                && (start <= CONFIG_SYS_TEXT_BASE && CONFIG_SYS_TEXT_BASE <= end)){
@@ -892,6 +853,10 @@ void dram_init_banksize(void)
        for (mr = 0; mr < 64 && i < CONFIG_NR_DRAM_BANKS; mr++) {
                err = get_owned_memreg(mr, &start, &end);
                if (!err) {
+                       start = roundup(start, MEMSTART_ALIGNMENT);
+                       if (start > end) /* Too small memory region, not use it */
+                               continue;
+
                        if (start >= PHYS_SDRAM_1 && start <= ((sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE)) {
                                gd->bd->bi_dram[i].start = start;
 
@@ -925,3 +890,102 @@ void dram_init_banksize(void)
                gd->bd->bi_dram[1].size = PHYS_SDRAM_2_SIZE;
        }
 }
+
+static u64 get_block_attrs(sc_faddr_t addr_start)
+{
+       if ((addr_start >= PHYS_SDRAM_1 && addr_start <= ((sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE))
+               || (addr_start >= PHYS_SDRAM_2 && addr_start <= ((sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE))
+               || (addr_start >= 0x0 && addr_start <= ((sc_faddr_t)0x20000000)))
+               return (PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_OUTER_SHARE);
+
+       return (PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN);
+}
+
+static u64 get_block_size(sc_faddr_t addr_start, sc_faddr_t addr_end)
+{
+       if (addr_start >= PHYS_SDRAM_1 && addr_start <= ((sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE)) {
+               if ((addr_end + 1) > ((sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE))
+                       return PHYS_SDRAM_1_SIZE;
+
+       } else if (addr_start >= PHYS_SDRAM_2 && addr_start <= ((sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE)) {
+
+               if ((addr_end + 1) > ((sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE))
+                       return PHYS_SDRAM_2_SIZE;
+       }
+
+       return (addr_end - addr_start + 1);
+}
+
+#define MAX_PTE_ENTRIES 512
+#define MAX_MEM_MAP_REGIONS 16
+
+static struct mm_region imx8_mem_map[MAX_MEM_MAP_REGIONS];
+struct mm_region *mem_map = imx8_mem_map;
+
+void enable_caches(void)
+{
+       sc_rm_mr_t mr;
+       sc_faddr_t start, end;
+       int err, i;
+
+       /* Create map for registers access from 0x1c000000 to 0x80000000*/
+       imx8_mem_map[0].virt = 0x1c000000UL;
+       imx8_mem_map[0].phys = 0x1c000000UL;
+       imx8_mem_map[0].size = 0x64000000UL;
+       imx8_mem_map[0].attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+                        PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN;
+
+       i = 1;
+       for (mr = 0; mr < 64 && i < MAX_MEM_MAP_REGIONS; mr++) {
+               err = get_owned_memreg(mr, &start, &end);
+               if (!err) {
+                       imx8_mem_map[i].virt = start;
+                       imx8_mem_map[i].phys = start;
+                       imx8_mem_map[i].size = get_block_size(start, end);
+                       imx8_mem_map[i].attrs = get_block_attrs(start);
+                       i++;
+               }
+       }
+
+       if (i < MAX_MEM_MAP_REGIONS) {
+               imx8_mem_map[i].size = 0;
+               imx8_mem_map[i].attrs = 0;
+       } else {
+               printf("Error, need more MEM MAP REGIONS reserved\n");
+               icache_enable();
+               return;
+       }
+
+       for (i = 0;i < MAX_MEM_MAP_REGIONS;i++) {
+               debug("[%d] vir = 0x%llx phys = 0x%llx size = 0x%llx attrs = 0x%llx\n", i,
+                       imx8_mem_map[i].virt, imx8_mem_map[i].phys, imx8_mem_map[i].size, imx8_mem_map[i].attrs);
+       }
+
+       icache_enable();
+       dcache_enable();
+}
+
+#ifndef CONFIG_SYS_DCACHE_OFF
+u64 get_page_table_size(void)
+{
+       u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64);
+       u64 size = 0;
+
+       /* For each memory region, the max table size:  2 level 3 tables + 2 level 2 tables + 1 level 1 table*/
+       size = (2 + 2 + 1) * one_pt * MAX_MEM_MAP_REGIONS + one_pt;
+
+       /*
+        * We need to duplicate our page table once to have an emergency pt to
+        * resort to when splitting page tables later on
+        */
+       size *= 2;
+
+       /*
+        * We may need to split page tables later on if dcache settings change,
+        * so reserve up to 4 (random pick) page tables for that.
+        */
+       size += one_pt * 4;
+
+       return size;
+}
+#endif