void __init imx_gpcv2_check_dt(void);
void imx_gpcv2_set_lpm_mode(enum mxc_cpu_pwr_mode mode);
void imx_gpcv2_set_cpu_power_gate_in_idle(bool pdn);
+void imx_gpcv2_enable_rbc(bool enable);
unsigned long save_ttbr1(void);
void restore_ttbr1(unsigned long ttbr1);
void imx_gpc_mask_all(void);
struct imx7_pm_base anatop_base;
struct imx7_pm_base src_base;
struct imx7_pm_base iomuxc_gpr_base;
+ struct imx7_pm_base gpc_base;
+ struct imx7_pm_base gic_dist_base;
} __aligned(8);
static atomic_t master_lpi = ATOMIC_INIT(0);
},
/* LOW POWER IDLE */
{
- .exit_latency = 150,
- .target_residency = 300,
+ .exit_latency = 500,
+ .target_residency = 1000,
.flags = CPUIDLE_FLAG_TIMER_STOP,
.enter = imx7d_enter_low_power_idle,
.name = "LOW-POWER-IDLE",
cpuidle_pm_info->iomuxc_gpr_base.vbase =
(void __iomem *)IMX_IO_P2V(MX7D_IOMUXC_GPR_BASE_ADDR);
+ cpuidle_pm_info->gpc_base.pbase = MX7D_GPC_BASE_ADDR;
+ cpuidle_pm_info->gpc_base.vbase =
+ (void __iomem *)IMX_IO_P2V(MX7D_GPC_BASE_ADDR);
+
+ cpuidle_pm_info->gic_dist_base.pbase = MX7D_GIC_BASE_ADDR;
+ cpuidle_pm_info->gic_dist_base.vbase =
+ (void __iomem *)IMX_IO_P2V(MX7D_GIC_BASE_ADDR);
+
imx7d_enable_rcosc();
#ifdef CONFIG_HOTPLUG_CPU
#define BM_LPCR_M4_MASK_DSM_TRIGGER 0x80000000
#define BM_SLPCR_EN_DSM 0x80000000
#define BM_SLPCR_RBC_EN 0x40000000
+#define BM_SLPCR_REG_BYPASS_COUNT 0x3f000000
#define BM_SLPCR_VSTBY 0x4
#define BM_SLPCR_SBYOS 0x2
#define BM_SLPCR_BYPASS_PMIC_READY 0x1
writel_relaxed(BM_GPC_PGC_ACK_SEL_A7_DUMMY_PUP_ACK |
BM_GPC_PGC_ACK_SEL_A7_DUMMY_PDN_ACK,
gpc_base + GPC_PGC_ACK_SEL_A7);
+ imx_gpcv2_enable_rbc(false);
}
spin_unlock_irqrestore(&gpcv2_lock, flags);
}
return 0;
}
+void imx_gpcv2_enable_rbc(bool enable)
+{
+ u32 val;
+
+ /*
+ * need to mask all interrupts in GPC before
+ * operating RBC configurations
+ */
+ imx_gpcv2_mask_all();
+
+ /* configure RBC enable bit */
+ val = readl_relaxed(gpc_base + GPC_SLPCR);
+ val &= ~BM_SLPCR_RBC_EN;
+ val |= enable ? BM_SLPCR_RBC_EN : 0;
+ writel_relaxed(val, gpc_base + GPC_SLPCR);
+
+ /* configure RBC count */
+ val = readl_relaxed(gpc_base + GPC_SLPCR);
+ val &= ~BM_SLPCR_REG_BYPASS_COUNT;
+ val |= enable ? BM_SLPCR_REG_BYPASS_COUNT : 0;
+ writel(val, gpc_base + GPC_SLPCR);
+
+ /*
+ * need to delay at least 2 cycles of CKIL(32K)
+ * due to hardware design requirement, which is
+ * ~61us, here we use 65us for safe
+ */
+ udelay(65);
+
+ /* restore GPC interrupt mask settings */
+ imx_gpcv2_restore_all();
+}
+
+
void imx_gpcv2_pre_suspend(bool arm_power_off)
{
void __iomem *reg_imr1 = gpc_base + GPC_IMR1_CORE0;
/* set mega/fast mix in A7 domain */
writel_relaxed(0x1, gpc_base + GPC_PGC_CPU_MAPPING);
/* set SCU timing */
- writel_relaxed((0x59 << 10) | 0x5B | (0x51 << 20),
+ writel_relaxed((0x59 << 10) | 0x5B | (0x2 << 20),
gpc_base + GPC_PGC_SCU_TIMING);
/* set C0/C1 power up timming per design requirement */
val = readl_relaxed(gpc_base + GPC_PGC_C1_PUPSCR);
val &= ~BM_GPC_PGC_CORE_PUPSCR;
- val |= (0x19 << 7);
+ val |= (0x1A << 7);
writel_relaxed(val, gpc_base + GPC_PGC_C1_PUPSCR);
val = readl_relaxed(gpc_base + GPC_SLPCR);
writel_relaxed(BM_GPC_PGC_ACK_SEL_A7_DUMMY_PUP_ACK |
BM_GPC_PGC_ACK_SEL_A7_DUMMY_PDN_ACK,
gpc_base + GPC_PGC_ACK_SEL_A7);
+
+ /* disable RBC */
+ imx_gpcv2_enable_rbc(false);
}
static struct irq_chip imx_gpcv2_chip = {
/* set mega/fast mix in A7 domain */
writel_relaxed(0x1, gpc_base + GPC_PGC_CPU_MAPPING);
/* set SCU timing */
- writel_relaxed((0x59 << 10) | 0x5B | (0x51 << 20),
+ writel_relaxed((0x59 << 10) | 0x5B | (0x2 << 20),
gpc_base + GPC_PGC_SCU_TIMING);
/* set C0/C1 power up timming per design requirement */
val = readl_relaxed(gpc_base + GPC_PGC_C1_PUPSCR);
val &= ~BM_GPC_PGC_CORE_PUPSCR;
- val |= (0x19 << 7);
+ val |= (0x1A << 7);
writel_relaxed(val, gpc_base + GPC_PGC_C1_PUPSCR);
writel_relaxed(BM_GPC_PGC_ACK_SEL_A7_DUMMY_PUP_ACK |
val |= BM_GPC_MLPCR_MEMLP_CTL_DIS;
writel_relaxed(val, gpc_base + GPC_MLPCR);
+ /* disable RBC */
+ imx_gpcv2_enable_rbc(false);
+
return 0;
}
#define PM_INFO_MX7D_SRC_V_OFFSET 0x48
#define PM_INFO_MX7D_IOMUXC_GPR_P_OFFSET 0x4c
#define PM_INFO_MX7D_IOMUXC_GPR_V_OFFSET 0x50
+#define PM_INFO_MX7D_GPC_P_OFFSET 0x54
+#define PM_INFO_MX7D_GPC_V_OFFSET 0x58
+#define PM_INFO_MX7D_GIC_DIST_P_OFFSET 0x5c
+#define PM_INFO_MX7D_GIC_DIST_V_OFFSET 0x60
#define MX7D_SRC_GPR1 0x74
#define MX7D_SRC_GPR2 0x78
#define MX7D_SRC_GPR3 0x7c
#define MX7D_SRC_GPR4 0x80
+#define MX7D_GPC_IMR1 0x30
+#define MX7D_GPC_IMR2 0x34
+#define MX7D_GPC_IMR3 0x38
+#define MX7D_GPC_IMR4 0x3c
#define DDRC_STAT 0x4
#define DDRC_PWRCTL 0x30
#define DDRC_DBG1 0x304
ddrc_enter_self_refresh
ccm_enter_idle
anatop_enter_idle
+
+ ldr r10, [r0, #PM_INFO_MX7D_GIC_DIST_V_OFFSET]
+ ldr r7, =0x0
+ str r7, [r10]
+
+ ldr r10, [r0, #PM_INFO_MX7D_GPC_V_OFFSET]
+ ldr r4, [r10, #MX7D_GPC_IMR1]
+ ldr r5, [r10, #MX7D_GPC_IMR2]
+ ldr r6, [r10, #MX7D_GPC_IMR3]
+ ldr r7, [r10, #MX7D_GPC_IMR4]
+
+ ldr r8, =0xffffffff
+ str r8, [r10, #MX7D_GPC_IMR1]
+ str r8, [r10, #MX7D_GPC_IMR2]
+ str r8, [r10, #MX7D_GPC_IMR3]
+ str r8, [r10, #MX7D_GPC_IMR4]
+
+ /*
+ * enable the RBC bypass counter here
+ * to hold off the interrupts. RBC counter
+ * = 8 (240us). With this setting, the latency
+ * from wakeup interrupt to ARM power up
+ * is ~250uS.
+ */
+ ldr r8, [r10, #0x14]
+ bic r8, r8, #(0x3f << 24)
+ orr r8, r8, #(0x8 << 24)
+ str r8, [r10, #0x14]
+
+ /* enable the counter. */
+ ldr r8, [r10, #0x14]
+ orr r8, r8, #(0x1 << 30)
+ str r8, [r10, #0x14]
+
+ /* unmask all the GPC interrupts. */
+ str r4, [r10, #MX7D_GPC_IMR1]
+ str r5, [r10, #MX7D_GPC_IMR2]
+ str r6, [r10, #MX7D_GPC_IMR3]
+ str r7, [r10, #MX7D_GPC_IMR4]
+
+ /*
+ * now delay for a short while (30usec)
+ * ARM is at 24MHz at this point
+ * so a short loop should be enough.
+ * this delay is required to ensure that
+ * the RBC counter can start counting in
+ * case an interrupt is already pending
+ * or in case an interrupt arrives just
+ * as ARM is about to assert DSM_request.
+ */
+ ldr r4, =5
+rbc_loop:
+ subs r4, r4, #0x1
+ bne rbc_loop
+
/* set low power idle enter flag */
ldr r7, =0x1
str r7, [r0, #PM_INFO_PM_INFO_LPI_ENTER_OFFSET]
anatop_exit_idle
ccm_exit_idle
ddrc_exit_self_refresh
+
+ ldr r10, [r0, #PM_INFO_MX7D_GIC_DIST_V_OFFSET]
+ ldr r7, =0x1
+ str r7, [r10]
/* clear lpi enter flag */
ldr r7, =0x0
str r7, [r0, #PM_INFO_PM_INFO_LPI_ENTER_OFFSET]
#define MX7D_AIPS2_SIZE 0x400000
#define MX7D_AIPS3_BASE_ADDR 0x30900000
#define MX7D_AIPS3_SIZE 0x300000
+#define MX7D_GIC_BASE_ADDR 0x31000000
+#define MX7D_GIC_SIZE 0x100000
#define TT_ATTRIB_NON_CACHEABLE_1M 0x802
#define MX7_IRAM_TLB_SIZE 0x4000
((MX7D_AIPS3_BASE_ADDR + i * 0x100000) & 0xFFF00000) |
TT_ATTRIB_NON_CACHEABLE_1M;
}
+
+ /*
+ * Make sure the GIC virtual address has a mapping in the
+ * IRAM page table.
+ */
+ j = ((IMX_IO_P2V(MX7D_GIC_BASE_ADDR) >> 20) << 2) / 4;
+ *((unsigned long *)iram_tlb_base_addr + j) =
+ (MX7D_GIC_BASE_ADDR & 0xFFF00000) | TT_ATTRIB_NON_CACHEABLE_1M;
}
static int __init imx7_suspend_init(const struct imx7_pm_socdata *socdata)