From 9275937c422b9fc92b1fa548861b4cf4322e905a Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Thu, 31 Aug 2017 10:13:51 +0800 Subject: [PATCH 01/11] core: mmu: export map_memarea_sections Export map_memarea_sections. We need a mmu table dedicated for low power feature, so export map_memarea_sections to create that section mapping. Signed-off-by: Peng Fan Reviewed-by: Jens Wiklander Reviewed-by: Etienne Carriere --- core/arch/arm/include/mm/core_mmu.h | 3 +++ core/arch/arm/mm/core_mmu_v7.c | 3 +-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/core/arch/arm/include/mm/core_mmu.h b/core/arch/arm/include/mm/core_mmu.h index 9f63411e1fd..08896397028 100644 --- a/core/arch/arm/include/mm/core_mmu.h +++ b/core/arch/arm/include/mm/core_mmu.h @@ -497,6 +497,9 @@ static inline TEE_Result cache_op_outer(enum cache_op op __unused, /* Check cpu mmu enabled or not */ bool cpu_mmu_enabled(void); +/* Do section mapping, not support on LPAE */ +void map_memarea_sections(const struct tee_mmap_region *mm, uint32_t *ttb); + /* * Check if platform defines nsec DDR range(s). * Static SHM (MEM_AREA_NSEC_SHM) is not covered by this API as it is diff --git a/core/arch/arm/mm/core_mmu_v7.c b/core/arch/arm/mm/core_mmu_v7.c index 222d33faefe..a1c0fd33bd6 100644 --- a/core/arch/arm/mm/core_mmu_v7.c +++ b/core/arch/arm/mm/core_mmu_v7.c @@ -742,8 +742,7 @@ static void map_page_memarea_in_pgdirs(const struct tee_mmap_region *mm, } } -static void map_memarea_sections(const struct tee_mmap_region *mm, - uint32_t *ttb) +void map_memarea_sections(const struct tee_mmap_region *mm, uint32_t *ttb) { uint32_t attr = mattr_to_desc(1, mm->attr); size_t idx = mm->va >> SECTION_SHIFT; From 732fc431f891b43f2d2fe841ace7ed79388c3512 Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Tue, 12 Sep 2017 16:59:46 +0800 Subject: [PATCH 02/11] core: arm: psci: pass nsec ctx to psci Pass non-secure context to psci functions. When cpu/system suspends, cpu may loose power, so when back to linux from tee, tee needs to return to a linux resume point, not the usual return address after "smc" instruction. So we need to modify the mon_lr value in non-secure context. Psci runs in monitor mode, sm_get_nsec_ctx can not be used, so pass the non-secure context pointer to the psci suspend function. Signed-off-by: Peng Fan Reviewed-by: Jens Wiklander Reviewed-by: Etienne Carriere --- core/arch/arm/include/sm/psci.h | 5 +++-- core/arch/arm/include/sm/std_smc.h | 4 +++- core/arch/arm/sm/psci.c | 7 ++++--- core/arch/arm/sm/sm.c | 2 +- core/arch/arm/sm/std_smc.c | 4 ++-- 5 files changed, 13 insertions(+), 9 deletions(-) diff --git a/core/arch/arm/include/sm/psci.h b/core/arch/arm/include/sm/psci.h index 9b8eb4c1d41..8ae8efea8ff 100644 --- a/core/arch/arm/include/sm/psci.h +++ b/core/arch/arm/include/sm/psci.h @@ -1,4 +1,5 @@ #include +#include #include #define PSCI_FN_BASE (0x84000000U) @@ -44,7 +45,7 @@ uint32_t psci_version(void); int psci_cpu_suspend(uint32_t power_state, uintptr_t entry, - uint32_t context_id); + uint32_t context_id, struct sm_nsec_ctx *nsec); int psci_cpu_off(void); int psci_cpu_on(uint32_t cpu_id, uint32_t entry, uint32_t context_id); int psci_affinity_info(uint32_t affinity, uint32_t lowest_affnity_level); @@ -58,6 +59,6 @@ int psci_node_hw_state(uint32_t cpu_id, uint32_t power_level); int psci_system_suspend(uintptr_t entry, uint32_t context_id); int psci_stat_residency(uint32_t cpu_id, uint32_t power_state); int psci_stat_count(uint32_t cpu_id, uint32_t power_state); -void tee_psci_handler(struct thread_smc_args *args); +void tee_psci_handler(struct thread_smc_args *args, struct sm_nsec_ctx *nsec); void psci_armv7_cpu_off(void); diff --git a/core/arch/arm/include/sm/std_smc.h b/core/arch/arm/include/sm/std_smc.h index 2b2e54d9171..c3d738c8e3b 100644 --- a/core/arch/arm/include/sm/std_smc.h +++ b/core/arch/arm/include/sm/std_smc.h @@ -1,6 +1,8 @@ #ifndef __STD_SMC_H__ #define __STD_SMC_H__ +#include + /* SMC function IDs for Standard Service queries */ #define ARM_STD_SVC_CALL_COUNT 0x8400ff00 @@ -18,5 +20,5 @@ #define is_psci_fid(_fid) \ (((_fid) & PSCI_FID_MASK) == PSCI_FID_VALUE) -void smc_std_handler(struct thread_smc_args *args); +void smc_std_handler(struct thread_smc_args *args, struct sm_nsec_ctx *nsec); #endif diff --git a/core/arch/arm/sm/psci.c b/core/arch/arm/sm/psci.c index b50dc3bab33..ddfc0a6dead 100644 --- a/core/arch/arm/sm/psci.c +++ b/core/arch/arm/sm/psci.c @@ -43,7 +43,8 @@ __weak uint32_t psci_version(void) __weak int psci_cpu_suspend(uint32_t power_state __unused, uintptr_t entry __unused, - uint32_t context_id __unused) + uint32_t context_id __unused, + struct sm_nsec_ctx *nsec __unused) { return PSCI_RET_NOT_SUPPORTED; } @@ -117,7 +118,7 @@ __weak int psci_stat_count(uint32_t cpu_id __unused, return PSCI_RET_NOT_SUPPORTED; } -void tee_psci_handler(struct thread_smc_args *args) +void tee_psci_handler(struct thread_smc_args *args, struct sm_nsec_ctx *nsec) { uint32_t smc_fid = args->a0; uint32_t a1 = args->a1; @@ -129,7 +130,7 @@ void tee_psci_handler(struct thread_smc_args *args) args->a0 = psci_version(); break; case PSCI_CPU_SUSPEND: - args->a0 = psci_cpu_suspend(a1, a2, a3); + args->a0 = psci_cpu_suspend(a1, a2, a3, nsec); break; case PSCI_CPU_OFF: args->a0 = psci_cpu_off(); diff --git a/core/arch/arm/sm/sm.c b/core/arch/arm/sm/sm.c index f3fa4afe958..a5883642726 100644 --- a/core/arch/arm/sm/sm.c +++ b/core/arch/arm/sm/sm.c @@ -44,7 +44,7 @@ bool sm_from_nsec(struct sm_ctx *ctx) #ifdef CFG_PSCI_ARM32 if (OPTEE_SMC_OWNER_NUM(*nsec_r0) == OPTEE_SMC_OWNER_STANDARD) { - smc_std_handler((struct thread_smc_args *)nsec_r0); + smc_std_handler((struct thread_smc_args *)nsec_r0, &ctx->nsec); return false; /* Return to non secure state */ } #endif diff --git a/core/arch/arm/sm/std_smc.c b/core/arch/arm/sm/std_smc.c index 5e5bb81bd83..ab3817f8c85 100644 --- a/core/arch/arm/sm/std_smc.c +++ b/core/arch/arm/sm/std_smc.c @@ -40,12 +40,12 @@ static const TEE_UUID uuid = { {0x98, 0xd2, 0x74, 0xf4, 0x38, 0x27, 0x98, 0xbb}, }; -void smc_std_handler(struct thread_smc_args *args) +void smc_std_handler(struct thread_smc_args *args, struct sm_nsec_ctx *nsec) { uint32_t smc_fid = args->a0; if (is_psci_fid(smc_fid)) { - tee_psci_handler(args); + tee_psci_handler(args, nsec); return; } From 3ddbb7fe3cc85995b15a11bc308d67ce2676ef00 Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Sat, 16 Sep 2017 08:08:37 +0800 Subject: [PATCH 03/11] core: arm: kernel: make thread_core_local public Move the struture of thread_core_local from thread_private.h to thread.h to make it public. Signed-off-by: Peng Fan Reviewed-by: Jens Wiklander Reviewed-by: Etienne Carriere --- core/arch/arm/include/kernel/thread.h | 29 +++++++++++++++++++++++++++ core/arch/arm/kernel/thread.c | 2 +- core/arch/arm/kernel/thread_private.h | 29 --------------------------- 3 files changed, 30 insertions(+), 30 deletions(-) diff --git a/core/arch/arm/include/kernel/thread.h b/core/arch/arm/include/kernel/thread.h index 7d17bb4980f..7a0bb6a1957 100644 --- a/core/arch/arm/include/kernel/thread.h +++ b/core/arch/arm/include/kernel/thread.h @@ -45,6 +45,33 @@ #define THREAD_RPC_MAX_NUM_PARAMS 4 #ifndef ASM + +#ifdef ARM64 +/* + * struct thread_core_local needs to have alignment suitable for a stack + * pointer since SP_EL1 points to this + */ +#define THREAD_CORE_LOCAL_ALIGNED __aligned(16) +#else +#define THREAD_CORE_LOCAL_ALIGNED +#endif + +struct thread_core_local { + vaddr_t tmp_stack_va_end; + int curr_thread; + uint32_t flags; + vaddr_t abt_stack_va_end; +#ifdef ARM32 + uint32_t r[2]; +#endif +#ifdef ARM64 + uint64_t x[4]; +#endif +#ifdef CFG_TEE_CORE_DEBUG + unsigned int locked_count; /* Number of spinlocks held */ +#endif +} THREAD_CORE_LOCAL_ALIGNED; + struct thread_vector_table { uint32_t std_smc_entry; uint32_t fast_smc_entry; @@ -252,6 +279,8 @@ struct thread_handlers { void thread_init_primary(const struct thread_handlers *handlers); void thread_init_per_cpu(void); +struct thread_core_local *thread_get_core_local(void); + /* * Sets the stacks to be used by the different threads. Use THREAD_ID_0 for * first stack, THREAD_ID_0 + 1 for the next and so on. diff --git a/core/arch/arm/kernel/thread.c b/core/arch/arm/kernel/thread.c index aad91a7caa6..c4346baeb70 100644 --- a/core/arch/arm/kernel/thread.c +++ b/core/arch/arm/kernel/thread.c @@ -89,7 +89,7 @@ struct thread_ctx threads[CFG_NUM_THREADS]; -static struct thread_core_local thread_core_local[CFG_TEE_CORE_NB_CORE]; +struct thread_core_local thread_core_local[CFG_TEE_CORE_NB_CORE]; #ifdef CFG_WITH_STACK_CANARIES #ifdef ARM32 diff --git a/core/arch/arm/kernel/thread_private.h b/core/arch/arm/kernel/thread_private.h index 0f78cca13df..f7614d13906 100644 --- a/core/arch/arm/kernel/thread_private.h +++ b/core/arch/arm/kernel/thread_private.h @@ -117,33 +117,6 @@ struct thread_ctx { struct mutex_head mutexes; struct thread_specific_data tsd; }; - -#ifdef ARM64 -/* - * struct thread_core_local need to have alignment suitable for a stack - * pointer since SP_EL1 points to this - */ -#define THREAD_CORE_LOCAL_ALIGNED __aligned(16) -#else -#define THREAD_CORE_LOCAL_ALIGNED -#endif - -struct thread_core_local { - vaddr_t tmp_stack_va_end; - int curr_thread; - uint32_t flags; - vaddr_t abt_stack_va_end; -#ifdef ARM32 - uint32_t r[2]; -#endif -#ifdef ARM64 - uint64_t x[4]; -#endif -#ifdef CFG_TEE_CORE_DEBUG - unsigned int locked_count; /* Number of spinlocks held */ -#endif -} THREAD_CORE_LOCAL_ALIGNED; - #endif /*ASM*/ #ifdef ARM64 @@ -190,8 +163,6 @@ void thread_init_vbar(void); /* Handles a stdcall, r0-r7 holds the parameters */ void thread_std_smc_entry(void); -struct thread_core_local *thread_get_core_local(void); - /* * Resumes execution of currently active thread by restoring context and * jumping to the instruction where to continue execution. From b73bb8ed0a53dbb5884ff65b34d56ef321139dad Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Mon, 18 Sep 2017 16:29:22 +0800 Subject: [PATCH 04/11] core: arm: psci: add suspend resume common functions Add cpu suspend/resume common functions. Platform psci suspend functions need to call sm_pm_cpu_suspend(arg, platform_suspend) to runs into suspend. The i.MX flow is: psci_cpu_suspend->imx7_cpu_suspend->sm_pm_cpu_suspend(arg, func) The "func" runs in on-chip ram that not losing power when system runs into suspend or low power state. Argument "arg" is passed to function "func" as argument through register "r0". Signed-off-by: Peng Fan Reviewed-by: Jens Wiklander Reviewed-by: Etienne Carriere --- core/arch/arm/include/arm32.h | 6 + core/arch/arm/include/arm32_macros.S | 4 + core/arch/arm/include/kernel/thread.h | 1 + core/arch/arm/include/sm/pm.h | 50 ++++++ core/arch/arm/include/sm/sm.h | 2 + core/arch/arm/kernel/asm-defines.c | 6 + core/arch/arm/sm/pm.c | 72 +++++++++ core/arch/arm/sm/pm_a32.S | 225 ++++++++++++++++++++++++++ core/arch/arm/sm/sm_private.h | 4 - core/arch/arm/sm/sub.mk | 2 +- 10 files changed, 367 insertions(+), 5 deletions(-) create mode 100644 core/arch/arm/include/sm/pm.h create mode 100644 core/arch/arm/sm/pm.c create mode 100644 core/arch/arm/sm/pm_a32.S diff --git a/core/arch/arm/include/arm32.h b/core/arch/arm/include/arm32.h index 45890135049..109e64c3dd9 100644 --- a/core/arch/arm/include/arm32.h +++ b/core/arch/arm/include/arm32.h @@ -33,6 +33,12 @@ #include #include +#define CORTEX_A7_PART_NUM 0xC07 +#define CORTEX_A9_PART_NUM 0xC09 + +#define MIDR_PRIMARY_PART_NUM_SHIFT 4 +#define MIDR_PRIMARY_PART_NUM_WIDTH 12 + #define CPSR_MODE_MASK ARM32_CPSR_MODE_MASK #define CPSR_MODE_USR ARM32_CPSR_MODE_USR #define CPSR_MODE_FIQ ARM32_CPSR_MODE_FIQ diff --git a/core/arch/arm/include/arm32_macros.S b/core/arch/arm/include/arm32_macros.S index f3d821d4fd9..f9093834f9a 100644 --- a/core/arch/arm/include/arm32_macros.S +++ b/core/arch/arm/include/arm32_macros.S @@ -27,6 +27,10 @@ /* Please keep them sorted based on the CRn register */ + .macro read_midr reg + mrc p15, 0, \reg, c0, c0, 0 + .endm + .macro read_ctr reg mrc p15, 0, \reg, c0, c0, 1 .endm diff --git a/core/arch/arm/include/kernel/thread.h b/core/arch/arm/include/kernel/thread.h index 7a0bb6a1957..1df7d0ef131 100644 --- a/core/arch/arm/include/kernel/thread.h +++ b/core/arch/arm/include/kernel/thread.h @@ -62,6 +62,7 @@ struct thread_core_local { uint32_t flags; vaddr_t abt_stack_va_end; #ifdef ARM32 + paddr_t sm_pm_ctx_phys; uint32_t r[2]; #endif #ifdef ARM64 diff --git a/core/arch/arm/include/sm/pm.h b/core/arch/arm/include/sm/pm.h new file mode 100644 index 00000000000..7048e2b5edc --- /dev/null +++ b/core/arch/arm/include/sm/pm.h @@ -0,0 +1,50 @@ +/* + * Copyright 2017 NXP + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef SM_PM_H +#define SM_PM_H +#include +#include + +struct sm_pm_ctx { + uint32_t sp; + paddr_t cpu_resume_addr; + uint32_t suspend_regs[16]; +}; + +/* suspend/resume core functions */ +void sm_pm_cpu_suspend_save(struct sm_pm_ctx *ptr, uint32_t sp); +void sm_pm_cpu_do_suspend(uint32_t *ptr); +void sm_pm_cpu_do_resume(void); + +/* + * Exported to platform suspend, arg will be passed to fn as r0 + * Return value: 0 - cpu resumed from suspended state. + * -1 - cpu not suspended. + */ +int sm_pm_cpu_suspend(uint32_t arg, int (*fn)(uint32_t)); +#endif diff --git a/core/arch/arm/include/sm/sm.h b/core/arch/arm/include/sm/sm.h index 3446506fba4..320e26b2d3f 100644 --- a/core/arch/arm/include/sm/sm.h +++ b/core/arch/arm/include/sm/sm.h @@ -134,4 +134,6 @@ static inline bool sm_platform_handler(__unused struct sm_ctx *ctx) bool sm_platform_handler(struct sm_ctx *ctx); #endif +void sm_save_modes_regs(struct sm_mode_regs *regs); +void sm_restore_modes_regs(struct sm_mode_regs *regs); #endif /*SM_SM_H*/ diff --git a/core/arch/arm/kernel/asm-defines.c b/core/arch/arm/kernel/asm-defines.c index 1e3ed4037e4..a08ced3d4ca 100644 --- a/core/arch/arm/kernel/asm-defines.c +++ b/core/arch/arm/kernel/asm-defines.c @@ -26,6 +26,7 @@ */ #include +#include #include #include #include "thread_private.h" @@ -55,6 +56,11 @@ DEFINES /* struct thread_core_local */ DEFINE(THREAD_CORE_LOCAL_R0, offsetof(struct thread_core_local, r[0])); + DEFINE(THREAD_CORE_LOCAL_SM_PM_CTX_PHYS, + offsetof(struct thread_core_local, sm_pm_ctx_phys)); + DEFINE(THREAD_CORE_LOCAL_SIZE, sizeof(struct thread_core_local)); + + DEFINE(SM_PM_CTX_SIZE, sizeof(struct sm_pm_ctx)); #endif /*ARM32*/ #ifdef ARM64 diff --git a/core/arch/arm/sm/pm.c b/core/arch/arm/sm/pm.c new file mode 100644 index 00000000000..cc97dd5debe --- /dev/null +++ b/core/arch/arm/sm/pm.c @@ -0,0 +1,72 @@ +/* + * Copyright 2017 NXP + * + * Peng Fan + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if CFG_TEE_CORE_NB_CORE > 4 +#error "Max support 4 cores in one cluster now" +#endif + +void sm_pm_cpu_suspend_save(struct sm_pm_ctx *ctx, uint32_t sp) +{ + struct thread_core_local *p = thread_get_core_local(); + + p->sm_pm_ctx_phys = virt_to_phys((void *)ctx); + + /* The content will be passed to sm_pm_cpu_do_resume as register sp */ + ctx->sp = sp; + ctx->cpu_resume_addr = + virt_to_phys((void *)(vaddr_t)sm_pm_cpu_do_resume); + + sm_pm_cpu_do_suspend(ctx->suspend_regs); + + dcache_op_level1(DCACHE_OP_CLEAN_INV); + +#ifdef CFG_PL310 + arm_cl2_cleanbyway(core_mmu_get_va(PL310_BASE, MEM_AREA_IO_SEC)); +#endif +} diff --git a/core/arch/arm/sm/pm_a32.S b/core/arch/arm/sm/pm_a32.S new file mode 100644 index 00000000000..1540cd7da07 --- /dev/null +++ b/core/arch/arm/sm/pm_a32.S @@ -0,0 +1,225 @@ +/* + * Copyright 2017 NXP + * + * Peng Fan + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +.section .text + +/* + * int sm_pm_cpu_suspend(uint32_t arg, int (*fn)(uint32_t)) + * @arg will be passed to fn as argument + * return value: 0 - cpu resumed from suspended state. + * -1 - cpu not suspended. + */ +FUNC sm_pm_cpu_suspend, : +UNWIND( .fnstart) +UNWIND( .cantunwind) + push {r4 - r12, lr} + mov r5, sp + sub sp, sp, #SM_PM_CTX_SIZE + push {r0, r1} + + mov r1, r5 + add r0, sp, #8 + blx sm_pm_cpu_suspend_save + adr lr, aborted + /* Jump to arch specific suspend */ + pop {r0, pc} +aborted: + /* cpu not suspended */ + add sp, sp, #SM_PM_CTX_SIZE + /* Return -1 to the caller */ + mov r0, #(-1) +suspend_return: + pop {r4 - r12, pc} +UNWIND( .fnend) +END_FUNC sm_pm_cpu_suspend + +FUNC sm_pm_cpu_do_suspend, : +UNWIND( .fnstart) +UNWIND( .cantunwind) + push {r4 - r11} + read_midr r4 + ubfx r5, r4, #4, #12 + ldr r4, =CORTEX_A7_PART_NUM + cmp r5, r4 + beq a7_suspend + ldr r4, =CORTEX_A9_PART_NUM + cmp r5, r4 + beq a9_suspend + /* cpu not supported */ + b . + /* A9 needs PCR/DIAG */ +a9_suspend: + read_pcr r4 + read_diag r5 + stmia r0!, {r4 - r5} +a7_suspend: + read_fcseidr r4 + read_tpidruro r5 + stmia r0!, {r4 - r5} + read_dacr r4 +#ifdef CFG_WITH_LPAE +#error "Not supported" +#else + read_ttbr0 r5 + read_ttbr1 r6 + read_ttbcr r7 +#endif + read_sctlr r8 + read_actlr r9 + read_cpacr r10 + read_mvbar r11 + stmia r0!, {r4 - r11} + read_prrr r4 + read_nmrr r5 + read_vbar r6 + read_nsacr r7 + stmia r0, {r4 - r7} + pop {r4 - r11} + bx lr +UNWIND( .fnend) +END_FUNC sm_pm_cpu_do_suspend + +FUNC sm_pm_cpu_resume, : +UNWIND( .fnstart) +UNWIND( .cantunwind) + cpsid aif + + /* Call into the runtime address of get_core_pos */ + adr r0, _core_pos + ldr r1, [r0] + add r0, r0, r1 + blx r0 + + /* + * At this point, MMU is not enabled now. + * 1. Get the runtime physical address of _suspend_sp + * 2. Get the offset from _suspend_sp to &thread_core_local + * 3. Get the runtime physical address of thread_core_local + * Since moving towards non-linear mapping, + * `ldr r0, =thread_core_local` is not used here. + */ + adr r4, _suspend_sp + ldr r5, [r4] + add r4, r4, r5 + + mov_imm r1, THREAD_CORE_LOCAL_SIZE + mla r0, r0, r1, r4 + + ldr r0, [r0, #THREAD_CORE_LOCAL_SM_PM_CTX_PHYS] + /* Need to use r0!, because sm_pm_cpu_do_resume needs it */ + ldmia r0!, {sp, pc} +UNWIND( .fnend) +END_FUNC sm_pm_cpu_resume + +/* + * void sm_do_cpu_do_resume(paddr suspend_regs) __noreturn; + * Restore the registers stored when sm_pm_cpu_do_suspend + * r0 points to the physical base address of the suspend_regs + * field of struct sm_pm_ctx. + */ +FUNC sm_pm_cpu_do_resume, : +UNWIND( .fnstart) +UNWIND( .cantunwind) + read_midr r4 + ubfx r5, r4, #4, #12 + ldr r4, =CORTEX_A7_PART_NUM + cmp r5, r4 + beq a7_resume + + /* + * A9 needs PCR/DIAG + */ + ldmia r0!, {r4 - r5} + write_pcr r4 + write_diag r5 + +a7_resume: + /* v7 resume */ + mov ip, #0 + /* Invalidate icache to PoU */ + write_iciallu + /* set reserved context */ + write_contextidr ip + ldmia r0!, {r4 - r5} + write_fcseidr r4 + write_tpidruro r5 + ldmia r0!, {r4 - r11} + /* Invalidate entire TLB */ + write_tlbiall + write_dacr r4 +#ifdef CFG_WITH_LPAE +#error "Not supported -" +#else + write_ttbr0 r5 + write_ttbr1 r6 + write_ttbcr r7 +#endif + + ldmia r0, {r4 - r7} + write_prrr r4 + write_nmrr r5 + write_vbar r6 + write_nsacr r7 + + write_actlr r9 + write_cpacr r10 + write_mvbar r11 + write_bpiall + isb + dsb + /* MMU will be enabled here */ + write_sctlr r8 + isb + mov r0, #0 + b suspend_return +UNWIND( .fnend) +END_FUNC sm_pm_cpu_do_resume + +/* + * The following will be located in text section whose attribute is + * marked as readonly, but we only need to read here + * _suspend_sp stores the offset between thread_core_local to _suspend_sp. + * _core_pos stores the offset between get_core_pos to _core_pos. + */ +.align 2 +.extern thread_core_local +_suspend_sp: + .long thread_core_local - . +.extern get_core_pos +_core_pos: + .long get_core_pos - . diff --git a/core/arch/arm/sm/sm_private.h b/core/arch/arm/sm/sm_private.h index 0b41becea55..6578d473aa9 100644 --- a/core/arch/arm/sm/sm_private.h +++ b/core/arch/arm/sm/sm_private.h @@ -30,9 +30,5 @@ /* Returns true if returning to sec, false if returning to nsec */ bool sm_from_nsec(struct sm_ctx *ctx); - -void sm_save_modes_regs(struct sm_mode_regs *regs); -void sm_restore_modes_regs(struct sm_mode_regs *regs); - #endif /*SM_PRIVATE_H*/ diff --git a/core/arch/arm/sm/sub.mk b/core/arch/arm/sm/sub.mk index fef4b43ba0c..09c3840eb03 100644 --- a/core/arch/arm/sm/sub.mk +++ b/core/arch/arm/sm/sub.mk @@ -1,3 +1,3 @@ srcs-y += sm_a32.S srcs-y += sm.c -srcs-$(CFG_PSCI_ARM32) += std_smc.c psci.c psci-helper.S +srcs-$(CFG_PSCI_ARM32) += std_smc.c psci.c pm.c psci-helper.S pm_a32.S From 8ca9edc5b07f1ce4d74be899d414e05031893b5d Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Sat, 26 Aug 2017 11:56:54 +0800 Subject: [PATCH 05/11] core: arm: imx: move psci code to pm Move psci code to pm. Signed-off-by: Peng Fan Acked-by: Jens Wiklander Reviewed-by: Etienne Carriere --- core/arch/arm/plat-imx/{ => pm}/gpcv2.c | 0 core/arch/arm/plat-imx/{ => pm}/psci.c | 0 core/arch/arm/plat-imx/sub.mk | 4 ++-- 3 files changed, 2 insertions(+), 2 deletions(-) rename core/arch/arm/plat-imx/{ => pm}/gpcv2.c (100%) rename core/arch/arm/plat-imx/{ => pm}/psci.c (100%) diff --git a/core/arch/arm/plat-imx/gpcv2.c b/core/arch/arm/plat-imx/pm/gpcv2.c similarity index 100% rename from core/arch/arm/plat-imx/gpcv2.c rename to core/arch/arm/plat-imx/pm/gpcv2.c diff --git a/core/arch/arm/plat-imx/psci.c b/core/arch/arm/plat-imx/pm/psci.c similarity index 100% rename from core/arch/arm/plat-imx/psci.c rename to core/arch/arm/plat-imx/pm/psci.c diff --git a/core/arch/arm/plat-imx/sub.mk b/core/arch/arm/plat-imx/sub.mk index 81c5b7f0663..661b2aab7ff 100644 --- a/core/arch/arm/plat-imx/sub.mk +++ b/core/arch/arm/plat-imx/sub.mk @@ -2,8 +2,8 @@ global-incdirs-y += . srcs-y += main.c imx-common.c srcs-$(CFG_PL310) += imx_pl310.c -srcs-$(CFG_PSCI_ARM32) += psci.c gpcv2.c -cflags-psci.c-y += -Wno-suggest-attribute=noreturn +srcs-$(CFG_PSCI_ARM32) += pm/psci.c pm/gpcv2.c +cflags-pm/psci.c-y += -Wno-suggest-attribute=noreturn ifneq (,$(filter y, $(CFG_MX6Q) $(CFG_MX6D) $(CFG_MX6DL) $(CFG_MX6S))) srcs-y += a9_plat_init.S imx6.c From 8af2207e87e0d4e71496bb7338469a0a8d8b248d Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Tue, 12 Sep 2017 17:20:25 +0800 Subject: [PATCH 06/11] core: imx7: fix comments Fix comments. Signed-off-by: Peng Fan Acked-by: Jens Wiklander Acked-by: Etienne Carriere --- core/arch/arm/plat-imx/imx7.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/arch/arm/plat-imx/imx7.c b/core/arch/arm/plat-imx/imx7.c index 96132bd3e50..01620628c92 100644 --- a/core/arch/arm/plat-imx/imx7.c +++ b/core/arch/arm/plat-imx/imx7.c @@ -84,8 +84,8 @@ void plat_cpu_reset_late(void) */ write32(0x00FF0033, core_mmu_get_va(CSU_CSL_15, MEM_AREA_IO_SEC)); /* - * Proect SRC - * write32(0x003300FF, get_base(CSU_CSL_12, MEM_AREA_IO_SEC)); + * Protect SRC + * write32(0x003300FF, core_mmu_get_va(CSU_CSL_12, MEM_AREA_IO_SEC)); */ dsb(); From 382e20efc3e3ea3ca4507ae609c96daaf057d538 Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Sat, 26 Aug 2017 12:52:27 +0800 Subject: [PATCH 07/11] core: imx: simplify code Wrap memory registration using macros to make it easy to add new soc/arch support. Signed-off-by: Peng Fan Acked-by: Jens Wiklander Acked-by: Etienne Carriere --- core/arch/arm/plat-imx/imx7.c | 10 ---------- core/arch/arm/plat-imx/main.c | 37 +++++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 10 deletions(-) diff --git a/core/arch/arm/plat-imx/imx7.c b/core/arch/arm/plat-imx/imx7.c index 01620628c92..731d28546bf 100644 --- a/core/arch/arm/plat-imx/imx7.c +++ b/core/arch/arm/plat-imx/imx7.c @@ -47,16 +47,6 @@ #include #include -register_phys_mem(MEM_AREA_IO_SEC, SRC_BASE, CORE_MMU_DEVICE_SIZE); -register_phys_mem(MEM_AREA_IO_SEC, IOMUXC_BASE, CORE_MMU_DEVICE_SIZE); -register_phys_mem(MEM_AREA_IO_SEC, CCM_BASE, CORE_MMU_DEVICE_SIZE); -register_phys_mem(MEM_AREA_IO_SEC, ANATOP_BASE, CORE_MMU_DEVICE_SIZE); -register_phys_mem(MEM_AREA_IO_SEC, GPC_BASE, CORE_MMU_DEVICE_SIZE); -register_phys_mem(MEM_AREA_IO_SEC, DDRC_BASE, CORE_MMU_DEVICE_SIZE); -register_phys_mem(MEM_AREA_IO_SEC, AIPS1_BASE, AIPS1_SIZE); -register_phys_mem(MEM_AREA_IO_SEC, AIPS2_BASE, AIPS2_SIZE); -register_phys_mem(MEM_AREA_IO_SEC, AIPS3_BASE, AIPS3_SIZE); - void plat_cpu_reset_late(void) { uintptr_t addr; diff --git a/core/arch/arm/plat-imx/main.c b/core/arch/arm/plat-imx/main.c index ea43d4b3727..ff2e97ed3d8 100644 --- a/core/arch/arm/plat-imx/main.c +++ b/core/arch/arm/plat-imx/main.c @@ -62,9 +62,46 @@ static const struct thread_handlers handlers = { static struct imx_uart_data console_data; +#ifdef CONSOLE_UART_BASE register_phys_mem(MEM_AREA_IO_NSEC, CONSOLE_UART_BASE, CORE_MMU_DEVICE_SIZE); +#endif +#ifdef GIC_BASE register_phys_mem(MEM_AREA_IO_SEC, GIC_BASE, CORE_MMU_DEVICE_SIZE); +#endif +#ifdef ANATOP_BASE register_phys_mem(MEM_AREA_IO_SEC, ANATOP_BASE, CORE_MMU_DEVICE_SIZE); +#endif +#ifdef GICD_BASE +register_phys_mem(MEM_AREA_IO_SEC, GICD_BASE, 0x10000); +#endif +#ifdef AIPS1_BASE +register_phys_mem(MEM_AREA_IO_SEC, AIPS1_BASE, + ROUNDUP(AIPS1_SIZE, CORE_MMU_DEVICE_SIZE)); +#endif +#ifdef AIPS2_BASE +register_phys_mem(MEM_AREA_IO_SEC, AIPS2_BASE, + ROUNDUP(AIPS2_SIZE, CORE_MMU_DEVICE_SIZE)); +#endif +#ifdef AIPS3_BASE +register_phys_mem(MEM_AREA_IO_SEC, AIPS3_BASE, + ROUNDUP(AIPS3_SIZE, CORE_MMU_DEVICE_SIZE)); +#endif +#ifdef IRAM_BASE +register_phys_mem(MEM_AREA_TEE_COHERENT, + ROUNDDOWN(IRAM_BASE, CORE_MMU_DEVICE_SIZE), + CORE_MMU_DEVICE_SIZE); +#endif +#ifdef IRAM_S_BASE +register_phys_mem(MEM_AREA_TEE_COHERENT, + ROUNDDOWN(IRAM_S_BASE, CORE_MMU_DEVICE_SIZE), + CORE_MMU_DEVICE_SIZE); +#endif + +#if defined(CFG_PL310) +register_phys_mem(MEM_AREA_IO_SEC, + ROUNDDOWN(PL310_BASE, CORE_MMU_DEVICE_SIZE), + CORE_MMU_DEVICE_SIZE); +#endif const struct thread_handlers *generic_boot_get_handlers(void) { From 45998b64c2ab4af735bb40d9277603ff1a68fd79 Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Tue, 5 Sep 2017 13:05:28 +0800 Subject: [PATCH 08/11] core: arm: imx: get mmdc type Add get mmdc type support, this will be used when configuring ddr into self refresh for low power feature. Signed-off-by: Peng Fan Acked-by: Jens Wiklander Acked-by: Etienne Carriere --- core/arch/arm/plat-imx/mmdc.c | 64 +++++++++++++++++++++++++++++++++++ core/arch/arm/plat-imx/mmdc.h | 49 +++++++++++++++++++++++++++ core/arch/arm/plat-imx/sub.mk | 2 ++ 3 files changed, 115 insertions(+) create mode 100644 core/arch/arm/plat-imx/mmdc.c create mode 100644 core/arch/arm/plat-imx/mmdc.h diff --git a/core/arch/arm/plat-imx/mmdc.c b/core/arch/arm/plat-imx/mmdc.c new file mode 100644 index 00000000000..f597f531833 --- /dev/null +++ b/core/arch/arm/plat-imx/mmdc.c @@ -0,0 +1,64 @@ +/* + * Copyright 2017 NXP + * + * Peng Fan + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int imx_get_ddr_type(void) +{ + uint32_t val, off; + bool is_mx7 = soc_is_imx7ds(); + vaddr_t mmdc_base = core_mmu_get_va(MMDC_P0_BASE, MEM_AREA_IO_SEC); + + if (is_mx7) + off = DDRC_MSTR; + else + off = MMDC_MDMISC; + + val = read32(mmdc_base + off); + + if (is_mx7) { + if (val & MSTR_DDR3) + return IMX_DDR_TYPE_DDR3; + else if (val & MSTR_LPDDR2) + return IMX_DDR_TYPE_LPDDR2; + else if (val & MSTR_LPDDR3) + return IMX_DDR_TYPE_LPDDR3; + else + return -1; + } + + return (val & MDMISC_DDR_TYPE_MASK) >> MDMISC_DDR_TYPE_SHIFT; +} diff --git a/core/arch/arm/plat-imx/mmdc.h b/core/arch/arm/plat-imx/mmdc.h new file mode 100644 index 00000000000..b2c05ef734e --- /dev/null +++ b/core/arch/arm/plat-imx/mmdc.h @@ -0,0 +1,49 @@ +/* + * Copyright 2017 NXP + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __IMX_MMDC_H +#define __IMX_MMDC_H + +#define IMX_DDR_TYPE_DDR3 0 +#define IMX_DDR_TYPE_LPDDR2 1 +#define IMX_DDR_TYPE_LPDDR3 2 +/* For i.MX6SLL */ +#define IMX_MMDC_DDR_TYPE_LPDDR3 3 + +/* i.MX6 */ +#define MMDC_MDMISC 0x18 +#define MDMISC_DDR_TYPE_MASK GENMASK_32(4, 3) +#define MDMISC_DDR_TYPE_SHIFT 0x3 + +/* i.MX7 */ +#define DDRC_MSTR 0x0 +#define MSTR_DDR3 BIT(0) +#define MSTR_LPDDR2 BIT(2) +#define MSTR_LPDDR3 BIT(3) + +int imx_get_ddr_type(void); + +#endif diff --git a/core/arch/arm/plat-imx/sub.mk b/core/arch/arm/plat-imx/sub.mk index 661b2aab7ff..17fda6adc28 100644 --- a/core/arch/arm/plat-imx/sub.mk +++ b/core/arch/arm/plat-imx/sub.mk @@ -1,6 +1,8 @@ global-incdirs-y += . srcs-y += main.c imx-common.c +srcs-$(CFG_MX6)$(CFG_MX7) += mmdc.c + srcs-$(CFG_PL310) += imx_pl310.c srcs-$(CFG_PSCI_ARM32) += pm/psci.c pm/gpcv2.c cflags-pm/psci.c-y += -Wno-suggest-attribute=noreturn From 27009ca9eeaf1a71fecf95f83d1907b6f15ab446 Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Tue, 5 Sep 2017 13:08:06 +0800 Subject: [PATCH 09/11] core: arm: sm: add psci power state macros Add PSCI_POWER_STATE_X macros Signed-off-by: Peng Fan Reviewed-by: Jens Wiklander Reviewed-by: Etienne Carriere --- core/arch/arm/include/sm/psci.h | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/core/arch/arm/include/sm/psci.h b/core/arch/arm/include/sm/psci.h index 8ae8efea8ff..861370d8517 100644 --- a/core/arch/arm/include/sm/psci.h +++ b/core/arch/arm/include/sm/psci.h @@ -32,6 +32,17 @@ #define PSCI_AFFINITY_LEVEL_OFF 1 #define PSCI_AFFINITY_LEVEL_ON_PENDING 2 +#define PSCI_POWER_STATE_ID_MASK 0xffff +#define PSCI_POWER_STATE_ID_SHIFT 0 +#define PSCI_POWER_STATE_TYPE_SHIFT 16 +#define PSCI_POWER_STATE_TYPE_MASK BIT32(PSCI_POWER_STATE_TYPE_SHIFT) +#define PSCI_POWER_STATE_AFFL_SHIFT 24 +#define PSCI_POWER_STATE_AFFL_MASK SHIFT_U32(0x3, \ + PSCI_POWER_STATE_AFFL_SHIFT) + +#define PSCI_POWER_STATE_TYPE_STANDBY 0 +#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1 + #define PSCI_RET_SUCCESS (0) #define PSCI_RET_NOT_SUPPORTED (-1) #define PSCI_RET_INVALID_PARAMETERS (-2) From 5a4b957a97cb8488861e855ae8b43c52ab83abfa Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Sun, 3 Sep 2017 09:08:20 +0800 Subject: [PATCH 10/11] core: arm: imx7d: remove soc_is_imx7d/s functions Remove soc_is_imx7d/s functions. Not needed. Signed-off-by: Peng Fan Acked-by: Jens Wiklander Acked-by: Etienne Carriere --- core/arch/arm/plat-imx/imx-common.c | 34 ++--------------------------- core/arch/arm/plat-imx/imx.h | 2 -- core/arch/arm/plat-imx/pm/psci.c | 4 ++-- 3 files changed, 4 insertions(+), 36 deletions(-) diff --git a/core/arch/arm/plat-imx/imx-common.c b/core/arch/arm/plat-imx/imx-common.c index 3e584e84158..650ddd8119c 100644 --- a/core/arch/arm/plat-imx/imx-common.c +++ b/core/arch/arm/plat-imx/imx-common.c @@ -89,36 +89,6 @@ bool soc_is_imx6dqp(void) return (imx_soc_type() == SOC_MX6Q) && (imx_soc_rev_major() == 2); } -bool soc_is_imx7s(void) -{ - vaddr_t addr = core_mmu_get_va(OCOTP_BASE + 0x450, MEM_AREA_IO_SEC); - uint32_t val = read32(addr); - - if (soc_is_imx7ds()) { - if (val & 1) - return true; - else - return false; - } - - return false; -} - -bool soc_is_imx7d(void) -{ - vaddr_t addr = core_mmu_get_va(OCOTP_BASE + 0x450, MEM_AREA_IO_SEC); - uint32_t val = read32(addr); - - if (soc_is_imx7ds()) { - if (val & 1) - return false; - else - return true; - } - - return false; -} - bool soc_is_imx7ds(void) { return imx_soc_type() == SOC_MX7D; @@ -128,7 +98,7 @@ uint32_t imx_get_src_gpr(int cpu) { vaddr_t va = core_mmu_get_va(SRC_BASE, MEM_AREA_IO_SEC); - if (soc_is_imx7d()) + if (soc_is_imx7ds()) return read32(va + SRC_GPR1_MX7 + cpu * 8 + 4); else return read32(va + SRC_GPR1 + cpu * 8 + 4); @@ -138,7 +108,7 @@ void imx_set_src_gpr(int cpu, uint32_t val) { vaddr_t va = core_mmu_get_va(SRC_BASE, MEM_AREA_IO_SEC); - if (soc_is_imx7d()) + if (soc_is_imx7ds()) write32(val, va + SRC_GPR1_MX7 + cpu * 8 + 4); else write32(val, va + SRC_GPR1 + cpu * 8 + 4); diff --git a/core/arch/arm/plat-imx/imx.h b/core/arch/arm/plat-imx/imx.h index 953a3b560f8..8f08b8d7cfd 100644 --- a/core/arch/arm/plat-imx/imx.h +++ b/core/arch/arm/plat-imx/imx.h @@ -51,8 +51,6 @@ bool soc_is_imx6sdl(void); bool soc_is_imx6dq(void); bool soc_is_imx6dqp(void); bool soc_is_imx7ds(void); -bool soc_is_imx7d(void); -bool soc_is_imx7s(void); uint32_t imx_soc_type(void); void imx_gpcv2_set_core1_pdn_by_software(void); void imx_gpcv2_set_core1_pup_by_software(void); diff --git a/core/arch/arm/plat-imx/pm/psci.c b/core/arch/arm/plat-imx/pm/psci.c index 40177c09463..220da0f7d61 100644 --- a/core/arch/arm/plat-imx/pm/psci.c +++ b/core/arch/arm/plat-imx/pm/psci.c @@ -118,7 +118,7 @@ int psci_affinity_info(uint32_t affinity, cpu = affinity; - if (soc_is_imx7d()) + if (soc_is_imx7ds()) wfi = true; else wfi = read32(gpr5) & ARM_WFI_STAT_MASK(cpu); @@ -131,7 +131,7 @@ int psci_affinity_info(uint32_t affinity, * Wait secondary cpus ready to be killed * TODO: Change to non dead loop */ - if (soc_is_imx7d()) { + if (soc_is_imx7ds()) { while (read32(va + SRC_GPR1_MX7 + cpu * 8 + 4) != UINT_MAX) ; From 8e34262ae9ea82ed9d5de00ebe18851e0547989c Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Mon, 18 Sep 2017 16:31:18 +0800 Subject: [PATCH 11/11] core: arm: imx7d: add psci suspend support Implement i.MX7D suspend/resume support. When the first time runs into suspend, some initialization work needs to be done, such as code copy, iram translation table. Since we only have 32K on chip RAM for suspend/resume usage, we have to put code and data together and use section mapping and WXN is set to false. Signed-off-by: Peng Fan Acked-by: Jens Wiklander Acked-by: Etienne Carriere --- core/arch/arm/plat-imx/conf.mk | 4 + core/arch/arm/plat-imx/imx_pm.h | 142 ++++ core/arch/arm/plat-imx/pm/imx7_suspend.c | 89 +++ core/arch/arm/plat-imx/pm/pm-imx7.c | 257 +++++++ core/arch/arm/plat-imx/pm/psci-suspend-imx7.S | 718 ++++++++++++++++++ core/arch/arm/plat-imx/pm/psci.c | 50 ++ core/arch/arm/plat-imx/sub.mk | 6 +- 7 files changed, 1265 insertions(+), 1 deletion(-) create mode 100644 core/arch/arm/plat-imx/imx_pm.h create mode 100644 core/arch/arm/plat-imx/pm/imx7_suspend.c create mode 100644 core/arch/arm/plat-imx/pm/pm-imx7.c create mode 100644 core/arch/arm/plat-imx/pm/psci-suspend-imx7.S diff --git a/core/arch/arm/plat-imx/conf.mk b/core/arch/arm/plat-imx/conf.mk index 8652929fe38..87af7a6ced1 100644 --- a/core/arch/arm/plat-imx/conf.mk +++ b/core/arch/arm/plat-imx/conf.mk @@ -78,4 +78,8 @@ $(call force,CFG_SECURE_TIME_SOURCE_REE,y) CFG_BOOT_SECONDARY_REQUEST ?= y endif +ifeq ($(filter y, $(CFG_PSCI_ARM32)), y) +CFG_HWSUPP_MEM_PERM_WXN = n +endif + ta-targets = ta_arm32 diff --git a/core/arch/arm/plat-imx/imx_pm.h b/core/arch/arm/plat-imx/imx_pm.h new file mode 100644 index 00000000000..7a0283bd172 --- /dev/null +++ b/core/arch/arm/plat-imx/imx_pm.h @@ -0,0 +1,142 @@ +/* + * Copyright 2017 NXP + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __IMX_PM_H +#define __IMX_PM_H + +#include + +#define PM_INFO_MX7_M4_RESERVE0_OFF 0x0 +#define PM_INFO_MX7_M4_RESERVE1_OFF 0x4 +#define PM_INFO_MX7_M4_RESERVE2_OFF 0x8 +#define PM_INFO_MX7_PBASE_OFF 0xc +#define PM_INFO_MX7_ENTRY_OFF 0x10 +#define PM_INFO_MX7_RESUME_ADDR_OFF 0x14 +#define PM_INFO_MX7_DDR_TYPE_OFF 0x18 +#define PM_INFO_MX7_SIZE_OFF 0x1c +#define PM_INFO_MX7_DDRC_P_OFF 0x20 +#define PM_INFO_MX7_DDRC_V_OFF 0x24 +#define PM_INFO_MX7_DDRC_PHY_P_OFF 0x28 +#define PM_INFO_MX7_DDRC_PHY_V_OFF 0x2c +#define PM_INFO_MX7_SRC_P_OFF 0x30 +#define PM_INFO_MX7_SRC_V_OFF 0x34 +#define PM_INFO_MX7_IOMUXC_GPR_P_OFF 0x38 +#define PM_INFO_MX7_IOMUXC_GPR_V_OFF 0x3c +#define PM_INFO_MX7_CCM_P_OFF 0x40 +#define PM_INFO_MX7_CCM_V_OFF 0x44 +#define PM_INFO_MX7_GPC_P_OFF 0x48 +#define PM_INFO_MX7_GPC_V_OFF 0x4c +#define PM_INFO_MX7_SNVS_P_OFF 0x50 +#define PM_INFO_MX7_SNVS_V_OFF 0x54 +#define PM_INFO_MX7_ANATOP_P_OFF 0x58 +#define PM_INFO_MX7_ANATOP_V_OFF 0x5c +#define PM_INFO_MX7_LPSR_P_OFF 0x60 +#define PM_INFO_MX7_LPSR_V_OFF 0x64 +#define PM_INFO_MX7_GIC_DIST_P_OFF 0x68 +#define PM_INFO_MX7_GIC_DIST_V_OFF 0x6c +#define PM_INFO_MX7_TTBR0_OFF 0x70 +#define PM_INFO_MX7_TTBR1_OFF 0x74 +#define PM_INFO_MX7_DDRC_REG_NUM_OFF 0x78 +#define PM_INFO_MX7_DDRC_REG_OFF 0x7C +#define PM_INFO_MX7_DDRC_PHY_REG_NUM_OFF 0x17C +#define PM_INFO_MX7_DDRC_PHY_REG_OFF 0x180 + +#define MX7_DDRC_NUM 32 +#define MX7_DDRC_PHY_NUM 16 + + +#define SUSPEND_OCRAM_SIZE 0x1000 +#define LOWPOWER_IDLE_OCRAM_SIZE 0x1000 + +#define SUSPEND_OCRAM_OFFSET 0x0 +#define LOWPOWER_IDLE_OCRAM_OFFSET 0x1000 + +#ifndef ASM +#include + +struct imx7_pm_info { + uint32_t m4_reserve0; + uint32_t m4_reserve1; + uint32_t m4_reserve2; + paddr_t pa_base; /* pa of pm_info */ + uintptr_t entry; + paddr_t tee_resume; + uint32_t ddr_type; + uint32_t pm_info_size; + paddr_t ddrc_pa_base; + vaddr_t ddrc_va_base; + paddr_t ddrc_phy_pa_base; + vaddr_t ddrc_phy_va_base; + paddr_t src_pa_base; + vaddr_t src_va_base; + paddr_t iomuxc_gpr_pa_base; + vaddr_t iomuxc_gpr_va_base; + paddr_t ccm_pa_base; + vaddr_t ccm_va_base; + paddr_t gpc_pa_base; + vaddr_t gpc_va_base; + paddr_t snvs_pa_base; + vaddr_t snvs_va_base; + paddr_t anatop_pa_base; + vaddr_t anatop_va_base; + paddr_t lpsr_pa_base; + vaddr_t lpsr_va_base; + paddr_t gic_pa_base; + vaddr_t gic_va_base; + uint32_t ttbr0; + uint32_t ttbr1; + uint32_t ddrc_num; + uint32_t ddrc_val[MX7_DDRC_NUM][2]; + uint32_t ddrc_phy_num; + uint32_t ddrc_phy_val[MX7_DDRC_NUM][2]; +} __aligned(8); + +struct suspend_save_regs { + uint32_t irq[3]; + uint32_t fiq[3]; + uint32_t und[3]; + uint32_t abt[3]; + uint32_t mon[3]; +} __aligned(8); + +struct imx7_pm_data { + uint32_t ddr_type; + uint32_t ddrc_num; + uint32_t (*ddrc_offset)[2]; + uint32_t ddrc_phy_num; + uint32_t (*ddrc_phy_offset)[2]; +}; + +void imx7_suspend(struct imx7_pm_info *info); +void imx7_resume(void); +void ca7_cpu_resume(void); +int imx7_suspend_init(void); +int pm_imx7_iram_tbl_init(void); +int imx7_cpu_suspend(uint32_t power_state, uintptr_t entry, + uint32_t context_id, struct sm_nsec_ctx *nsec); +#endif + +#endif diff --git a/core/arch/arm/plat-imx/pm/imx7_suspend.c b/core/arch/arm/plat-imx/pm/imx7_suspend.c new file mode 100644 index 00000000000..660e71f64c3 --- /dev/null +++ b/core/arch/arm/plat-imx/pm/imx7_suspend.c @@ -0,0 +1,89 @@ +/* + * Copyright (C) 2017 NXP + * + * Peng Fan + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int suspended_init; + +int imx7_cpu_suspend(uint32_t power_state __unused, uintptr_t entry, + uint32_t context_id __unused, struct sm_nsec_ctx *nsec) +{ + uint32_t suspend_ocram_base = core_mmu_get_va(TRUSTZONE_OCRAM_START + + SUSPEND_OCRAM_OFFSET, + MEM_AREA_TEE_COHERENT); + struct imx7_pm_info *p = (struct imx7_pm_info *)suspend_ocram_base; + int ret; + + if (!suspended_init) { + imx7_suspend_init(); + suspended_init = 1; + } + + /* Store non-sec ctx regs */ + sm_save_modes_regs(&nsec->mode_regs); + + ret = sm_pm_cpu_suspend((uint32_t)p, (int (*)(uint32_t)) + (suspend_ocram_base + sizeof(*p))); + /* + * Sometimes sm_pm_cpu_suspend may not really suspended, + * we need to check it's return value to restore reg or not + */ + if (ret < 0) { + DMSG("=== Not suspended, GPC IRQ Pending ===\n"); + return 0; + } + + plat_cpu_reset_late(); + + /* Restore register of different mode in secure world */ + sm_restore_modes_regs(&nsec->mode_regs); + + /* Set entry for back to Linux */ + nsec->mon_lr = (uint32_t)entry; + + main_init_gic(); + + DMSG("=== Back from Suspended ===\n"); + + return 0; +} diff --git a/core/arch/arm/plat-imx/pm/pm-imx7.c b/core/arch/arm/plat-imx/pm/pm-imx7.c new file mode 100644 index 00000000000..64cd54f84a2 --- /dev/null +++ b/core/arch/arm/plat-imx/pm/pm-imx7.c @@ -0,0 +1,257 @@ +/* + * Copyright 2017 NXP + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +paddr_t iram_tbl_phys_addr = -1UL; +void *iram_tbl_virt_addr; + +#define READ_DATA_FROM_HARDWARE 0 + +static uint32_t imx7d_ddrc_ddr3_setting[][2] = { + { 0x0, READ_DATA_FROM_HARDWARE }, + { 0x1a0, READ_DATA_FROM_HARDWARE }, + { 0x1a4, READ_DATA_FROM_HARDWARE }, + { 0x1a8, READ_DATA_FROM_HARDWARE }, + { 0x64, READ_DATA_FROM_HARDWARE }, + { 0x490, READ_DATA_FROM_HARDWARE }, + { 0xd0, READ_DATA_FROM_HARDWARE }, + { 0xd4, READ_DATA_FROM_HARDWARE }, + { 0xdc, READ_DATA_FROM_HARDWARE }, + { 0xe0, READ_DATA_FROM_HARDWARE }, + { 0xe4, READ_DATA_FROM_HARDWARE }, + { 0xf4, READ_DATA_FROM_HARDWARE }, + { 0x100, READ_DATA_FROM_HARDWARE }, + { 0x104, READ_DATA_FROM_HARDWARE }, + { 0x108, READ_DATA_FROM_HARDWARE }, + { 0x10c, READ_DATA_FROM_HARDWARE }, + { 0x110, READ_DATA_FROM_HARDWARE }, + { 0x114, READ_DATA_FROM_HARDWARE }, + { 0x120, READ_DATA_FROM_HARDWARE }, + { 0x180, READ_DATA_FROM_HARDWARE }, + { 0x190, READ_DATA_FROM_HARDWARE }, + { 0x194, READ_DATA_FROM_HARDWARE }, + { 0x200, READ_DATA_FROM_HARDWARE }, + { 0x204, READ_DATA_FROM_HARDWARE }, + { 0x214, READ_DATA_FROM_HARDWARE }, + { 0x218, READ_DATA_FROM_HARDWARE }, + { 0x240, READ_DATA_FROM_HARDWARE }, + { 0x244, READ_DATA_FROM_HARDWARE }, +}; + +static uint32_t imx7d_ddrc_phy_ddr3_setting[][2] = { + { 0x0, READ_DATA_FROM_HARDWARE }, + { 0x4, READ_DATA_FROM_HARDWARE }, + { 0x10, READ_DATA_FROM_HARDWARE }, + { 0xb0, READ_DATA_FROM_HARDWARE }, + { 0x9c, READ_DATA_FROM_HARDWARE }, + { 0x7c, READ_DATA_FROM_HARDWARE }, + { 0x80, READ_DATA_FROM_HARDWARE }, + { 0x84, READ_DATA_FROM_HARDWARE }, + { 0x88, READ_DATA_FROM_HARDWARE }, + { 0x6c, READ_DATA_FROM_HARDWARE }, + { 0x20, READ_DATA_FROM_HARDWARE }, + { 0x30, READ_DATA_FROM_HARDWARE }, + { 0x50, 0x01000010 }, + { 0x50, 0x00000010 }, + { 0xc0, 0x0e407304 }, + { 0xc0, 0x0e447304 }, + { 0xc0, 0x0e447306 }, + { 0xc0, 0x0e447304 }, + { 0xc0, 0x0e407306 }, +}; + +static struct imx7_pm_data imx7d_pm_data_ddr3 = { + .ddrc_num = ARRAY_SIZE(imx7d_ddrc_ddr3_setting), + .ddrc_offset = imx7d_ddrc_ddr3_setting, + .ddrc_phy_num = ARRAY_SIZE(imx7d_ddrc_phy_ddr3_setting), + .ddrc_phy_offset = imx7d_ddrc_phy_ddr3_setting, +}; + +paddr_t phys_addr[] = { + AIPS1_BASE, AIPS2_BASE, AIPS3_BASE +}; + +int pm_imx7_iram_tbl_init(void) +{ + uint32_t i; + struct tee_mmap_region map; + + /* iram mmu translation table already initialized */ + if (iram_tbl_phys_addr != (-1UL)) + return 0; + + iram_tbl_phys_addr = TRUSTZONE_OCRAM_START + 16 * 1024; + iram_tbl_virt_addr = phys_to_virt(iram_tbl_phys_addr, + MEM_AREA_TEE_COHERENT); + + /* 16KB */ + memset(iram_tbl_virt_addr, 0, 16 * 1024); + + for (i = 0; i < ARRAY_SIZE(phys_addr); i++) { + map.pa = phys_addr[i]; + map.va = (vaddr_t)phys_to_virt(phys_addr[i], MEM_AREA_IO_SEC); + map.region_size = CORE_MMU_PGDIR_SIZE; + map.size = AIPS1_SIZE; /* 4M for AIPS1/2/3 */ + map.type = MEM_AREA_IO_SEC; + map.attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_PRW | + TEE_MATTR_GLOBAL | TEE_MATTR_SECURE | + (TEE_MATTR_CACHE_NONCACHE << TEE_MATTR_CACHE_SHIFT); + map_memarea_sections(&map, (uint32_t *)iram_tbl_virt_addr); + } + + /* Note IRAM_S_BASE is not 1M aligned, so take care */ + map.pa = ROUNDDOWN(IRAM_S_BASE, CORE_MMU_PGDIR_SIZE); + map.va = (vaddr_t)phys_to_virt(map.pa, MEM_AREA_TEE_COHERENT); + map.region_size = CORE_MMU_PGDIR_SIZE; + map.size = CORE_MMU_DEVICE_SIZE; + map.type = MEM_AREA_TEE_COHERENT; + map.attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_PRWX | TEE_MATTR_GLOBAL | + TEE_MATTR_SECURE; + map_memarea_sections(&map, (uint32_t *)iram_tbl_virt_addr); + + map.pa = GIC_BASE; + map.va = (vaddr_t)phys_to_virt((paddr_t)GIC_BASE, MEM_AREA_IO_SEC); + map.region_size = CORE_MMU_PGDIR_SIZE; + map.size = CORE_MMU_DEVICE_SIZE; + map.type = MEM_AREA_TEE_COHERENT; + map.attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_PRW | TEE_MATTR_GLOBAL | + TEE_MATTR_SECURE; + map_memarea_sections(&map, (uint32_t *)iram_tbl_virt_addr); + + return 0; +} + +int imx7_suspend_init(void) +{ + uint32_t i; + uint32_t (*ddrc_offset_array)[2]; + uint32_t (*ddrc_phy_offset_array)[2]; + uint32_t suspend_ocram_base = core_mmu_get_va(TRUSTZONE_OCRAM_START + + SUSPEND_OCRAM_OFFSET, + MEM_AREA_TEE_COHERENT); + struct imx7_pm_info *p = (struct imx7_pm_info *)suspend_ocram_base; + struct imx7_pm_data *pm_data; + + pm_imx7_iram_tbl_init(); + + dcache_op_level1(DCACHE_OP_CLEAN_INV); + + p->pa_base = TRUSTZONE_OCRAM_START + SUSPEND_OCRAM_OFFSET; + p->tee_resume = virt_to_phys((void *)(vaddr_t)ca7_cpu_resume); + p->pm_info_size = sizeof(*p); + p->ccm_va_base = core_mmu_get_va(CCM_BASE, MEM_AREA_IO_SEC); + p->ccm_pa_base = CCM_BASE; + p->ddrc_va_base = core_mmu_get_va(DDRC_BASE, MEM_AREA_IO_SEC); + p->ddrc_pa_base = DDRC_BASE; + p->ddrc_phy_va_base = core_mmu_get_va(DDRC_PHY_BASE, MEM_AREA_IO_SEC); + p->ddrc_phy_pa_base = DDRC_PHY_BASE; + p->src_va_base = core_mmu_get_va(SRC_BASE, MEM_AREA_IO_SEC); + p->src_pa_base = SRC_BASE; + p->iomuxc_gpr_va_base = core_mmu_get_va(IOMUXC_GPR_BASE, + MEM_AREA_IO_SEC); + p->iomuxc_gpr_pa_base = IOMUXC_GPR_BASE; + p->gpc_va_base = core_mmu_get_va(GPC_BASE, MEM_AREA_IO_SEC); + p->gpc_pa_base = GPC_BASE; + p->anatop_va_base = core_mmu_get_va(ANATOP_BASE, MEM_AREA_IO_SEC); + p->anatop_pa_base = ANATOP_BASE; + p->snvs_va_base = core_mmu_get_va(SNVS_BASE, MEM_AREA_IO_SEC); + p->snvs_pa_base = SNVS_BASE; + p->lpsr_va_base = core_mmu_get_va(LPSR_BASE, MEM_AREA_IO_SEC); + p->lpsr_pa_base = LPSR_BASE; + p->gic_va_base = core_mmu_get_va(GIC_BASE, MEM_AREA_IO_SEC); + p->gic_pa_base = GIC_BASE; + + /* TODO:lpsr disabled now */ + write32(0, p->lpsr_va_base); + + p->ddr_type = imx_get_ddr_type(); + switch (p->ddr_type) { + case IMX_DDR_TYPE_DDR3: + pm_data = &imx7d_pm_data_ddr3; + break; + default: + panic("Not supported ddr type\n"); + break; + } + + p->ddrc_num = pm_data->ddrc_num; + p->ddrc_phy_num = pm_data->ddrc_phy_num; + ddrc_offset_array = pm_data->ddrc_offset; + ddrc_phy_offset_array = pm_data->ddrc_phy_offset; + + for (i = 0; i < p->ddrc_num; i++) { + p->ddrc_val[i][0] = ddrc_offset_array[i][0]; + if (ddrc_offset_array[i][1] == READ_DATA_FROM_HARDWARE) + p->ddrc_val[i][1] = read32(p->ddrc_va_base + + ddrc_offset_array[i][0]); + else + p->ddrc_val[i][1] = ddrc_offset_array[i][1]; + + if (p->ddrc_val[i][0] == 0xd0) + p->ddrc_val[i][1] |= 0xc0000000; + } + + /* initialize DDRC PHY settings */ + for (i = 0; i < p->ddrc_phy_num; i++) { + p->ddrc_phy_val[i][0] = ddrc_phy_offset_array[i][0]; + if (ddrc_phy_offset_array[i][1] == READ_DATA_FROM_HARDWARE) + p->ddrc_phy_val[i][1] = + read32(p->ddrc_phy_va_base + + ddrc_phy_offset_array[i][0]); + else + p->ddrc_phy_val[i][1] = ddrc_phy_offset_array[i][1]; + } + + memcpy((void *)(suspend_ocram_base + sizeof(*p)), + (void *)(vaddr_t)imx7_suspend, SUSPEND_OCRAM_SIZE - sizeof(*p)); + + dcache_clean_range((void *)suspend_ocram_base, SUSPEND_OCRAM_SIZE); + + /* + * Note that IRAM IOSEC map, if changed to MEM map, + * need to flush cache + */ + icache_inv_all(); + + return 0; +} diff --git a/core/arch/arm/plat-imx/pm/psci-suspend-imx7.S b/core/arch/arm/plat-imx/pm/psci-suspend-imx7.S new file mode 100644 index 00000000000..cb48a1b44f7 --- /dev/null +++ b/core/arch/arm/plat-imx/pm/psci-suspend-imx7.S @@ -0,0 +1,718 @@ +/* + * Copyright 2017 NXP + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MX7_SRC_GPR1 0x74 +#define MX7_SRC_GPR2 0x78 +#define GPC_PGC_C0 0x800 +#define GPC_PGC_FM 0xa00 +#define ANADIG_SNVS_MISC_CTRL 0x380 +#define ANADIG_SNVS_MISC_CTRL_SET 0x384 +#define ANADIG_SNVS_MISC_CTRL_CLR 0x388 +#define ANADIG_DIGPROG 0x800 +#define DDRC_STAT 0x4 +#define DDRC_PWRCTL 0x30 +#define DDRC_PSTAT 0x3fc +#define DDRC_PCTRL_0 0x490 +#define DDRC_DFIMISC 0x1b0 +#define DDRC_SWCTL 0x320 +#define DDRC_SWSTAT 0x324 +#define DDRPHY_LP_CON0 0x18 + +#define CCM_SNVS_LPCG 0x250 +#define MX7D_GPC_IMR1 0x30 +#define MX7D_GPC_IMR2 0x34 +#define MX7D_GPC_IMR3 0x38 +#define MX7D_GPC_IMR4 0x3c + +/* + * The code in this file is copied to coherent on-chip ram memory, + * without any dependency on code/data in tee memory(DDR). + */ + .section .text.psci.suspend + .align 3 + + .macro disable_l1_dcache + + /* + * flush L1 data cache before clearing SCTLR.C bit. + */ + push {r0 - r10, lr} + ldr r1, =dcache_op_all + mov r0, #DCACHE_OP_CLEAN_INV + mov lr, pc + bx r1 + pop {r0 - r10, lr} + + /* disable d-cache */ + read_sctlr r7 + bic r7, r7, #SCTLR_C + write_sctlr r7 + dsb + isb + + push {r0 - r10, lr} + ldr r1, =dcache_op_all + mov r0, #DCACHE_OP_CLEAN_INV + mov lr, pc + bx r1 + pop {r0 - r10, lr} + + .endm + + .macro store_ttbr + + /* Store TTBR1 to pm_info->ttbr1 */ + read_ttbr1 r7 + str r7, [r0, #PM_INFO_MX7_TTBR1_OFF] + + /* Store TTBR0 to pm_info->ttbr1 */ + read_ttbr0 r7 + str r7, [r0, #PM_INFO_MX7_TTBR0_OFF] + + /* Disable Branch Prediction */ + read_sctlr r6 + bic r6, r6, #SCTLR_Z + write_sctlr r6 + + /* Flush the BTAC. */ + write_bpiallis + + ldr r6, =iram_tbl_phys_addr + ldr r6, [r6] + dsb + isb + + /* Store the IRAM table in TTBR1/0 */ + write_ttbr1 r6 + write_ttbr0 r6 + + /* Read TTBCR and set PD0=1 */ + read_ttbcr r6 + orr r6, r6, #TTBCR_PD0 + write_ttbcr r6 + + dsb + isb + + /* flush the TLB */ + write_tlbiallis + isb + write_tlbiall + isb + + .endm + + .macro restore_ttbr + + /* Enable L1 data cache. */ + read_sctlr r6 + orr r6, r6, #SCTLR_C + write_sctlr r6 + + dsb + isb + + /* Restore TTBCR */ + /* Read TTBCR and set PD0=0 */ + read_ttbcr r6 + bic r6, r6, #TTBCR_PD0 + write_ttbcr r6 + dsb + isb + + /* flush the TLB */ + write_tlbiallis + + /* Enable Branch Prediction */ + read_sctlr r6 + orr r6, r6, #SCTLR_Z + write_sctlr r6 + + /* Flush the Branch Target Address Cache (BTAC) */ + write_bpiallis + + /* Restore TTBR1/0, get the origin ttbr1/0 from pm info */ + ldr r7, [r0, #PM_INFO_MX7_TTBR1_OFF] + write_ttbr1 r7 + ldr r7, [r0, #PM_INFO_MX7_TTBR0_OFF] + write_ttbr0 r7 + isb + + .endm + + .macro ddrc_enter_self_refresh + + ldr r11, [r0, #PM_INFO_MX7_DDRC_V_OFF] + + /* let DDR out of self-refresh */ + ldr r7, =0x0 + str r7, [r11, #DDRC_PWRCTL] + + /* wait rw port_busy clear */ + ldr r6, =BIT32(16) + orr r6, r6, #0x1 +1: + ldr r7, [r11, #DDRC_PSTAT] + ands r7, r7, r6 + bne 1b + + /* enter self-refresh bit 5 */ + ldr r7, =BIT32(5) + str r7, [r11, #DDRC_PWRCTL] + + /* wait until self-refresh mode entered */ +2: + ldr r7, [r11, #DDRC_STAT] + and r7, r7, #0x3 + cmp r7, #0x3 + bne 2b +3: + ldr r7, [r11, #DDRC_STAT] + ands r7, r7, #0x20 + beq 3b + + /* disable dram clk */ + ldr r7, [r11, #DDRC_PWRCTL] + orr r7, r7, #BIT32(3) + str r7, [r11, #DDRC_PWRCTL] + + .endm + + .macro ddrc_exit_self_refresh + + cmp r5, #0x0 + ldreq r11, [r0, #PM_INFO_MX7_DDRC_V_OFF] + ldrne r11, [r0, #PM_INFO_MX7_DDRC_P_OFF] + + /* let DDR out of self-refresh */ + ldr r7, =0x0 + str r7, [r11, #DDRC_PWRCTL] + + /* wait until self-refresh mode entered */ +4: + ldr r7, [r11, #DDRC_STAT] + and r7, r7, #0x3 + cmp r7, #0x3 + beq 4b + + /* enable auto self-refresh */ + ldr r7, [r11, #DDRC_PWRCTL] + orr r7, r7, #BIT32(0) + str r7, [r11, #DDRC_PWRCTL] + + .endm + + .macro wait_delay +5: + subs r6, r6, #0x1 + bne 5b + + .endm + + .macro ddr_enter_retention + + ldr r11, [r0, #PM_INFO_MX7_DDRC_V_OFF] + + /* let DDR out of self-refresh */ + ldr r7, =0x0 + str r7, [r11, #DDRC_PCTRL_0] + + /* wait rw port_busy clear */ + ldr r6, =BIT32(16) + orr r6, r6, #0x1 +6: + ldr r7, [r11, #DDRC_PSTAT] + ands r7, r7, r6 + bne 6b + + ldr r11, [r0, #PM_INFO_MX7_DDRC_V_OFF] + /* enter self-refresh bit 5 */ + ldr r7, =BIT32(5) + str r7, [r11, #DDRC_PWRCTL] + + /* wait until self-refresh mode entered */ +7: + ldr r7, [r11, #DDRC_STAT] + and r7, r7, #0x3 + cmp r7, #0x3 + bne 7b +8: + ldr r7, [r11, #DDRC_STAT] + ands r7, r7, #0x20 + beq 8b + + /* disable dram clk */ + ldr r7, =BIT32(5) + orr r7, r7, #BIT32(3) + str r7, [r11, #DDRC_PWRCTL] + + ldr r11, [r0, #PM_INFO_MX7_ANATOP_V_OFF] + ldr r7, [r11, #ANADIG_DIGPROG] + and r7, r7, #0xff + cmp r7, #0x11 + bne 10f + + /* TO 1.1 */ + ldr r11, [r0, #PM_INFO_MX7_IOMUXC_GPR_V_OFF] + ldr r7, =0x38000000 + str r7, [r11] + + /* LPSR mode need to use TO1.0 flow as IOMUX lost power */ + ldr r10, [r0, #PM_INFO_MX7_LPSR_V_OFF] + ldr r7, [r10] + cmp r7, #0x0 + beq 11f +10: + /* reset ddr_phy */ + ldr r11, [r0, #PM_INFO_MX7_ANATOP_V_OFF] + ldr r7, =0x0 + str r7, [r11, #ANADIG_SNVS_MISC_CTRL] + + /* delay 7 us */ + ldr r6, =6000 + wait_delay + + ldr r11, [r0, #PM_INFO_MX7_SRC_V_OFF] + ldr r6, =0x1000 + ldr r7, [r11, r6] + orr r7, r7, #0x1 + str r7, [r11, r6] +11: + /* turn off ddr power */ + ldr r11, [r0, #PM_INFO_MX7_ANATOP_V_OFF] + ldr r7, =(0x1 << 29) + str r7, [r11, #ANADIG_SNVS_MISC_CTRL_SET] + + ldr r11, [r0, #PM_INFO_MX7_SRC_V_OFF] + ldr r6, =0x1000 + ldr r7, [r11, r6] + orr r7, r7, #0x1 + str r7, [r11, r6] + + .endm + + .macro ddr_exit_retention + + cmp r5, #0x0 + ldreq r1, [r0, #PM_INFO_MX7_ANATOP_V_OFF] + ldrne r1, [r0, #PM_INFO_MX7_ANATOP_P_OFF] + ldreq r2, [r0, #PM_INFO_MX7_SRC_V_OFF] + ldrne r2, [r0, #PM_INFO_MX7_SRC_P_OFF] + ldreq r3, [r0, #PM_INFO_MX7_DDRC_V_OFF] + ldrne r3, [r0, #PM_INFO_MX7_DDRC_P_OFF] + ldreq r4, [r0, #PM_INFO_MX7_DDRC_PHY_V_OFF] + ldrne r4, [r0, #PM_INFO_MX7_DDRC_PHY_P_OFF] + ldreq r10, [r0, #PM_INFO_MX7_CCM_V_OFF] + ldrne r10, [r0, #PM_INFO_MX7_CCM_P_OFF] + ldreq r11, [r0, #PM_INFO_MX7_IOMUXC_GPR_V_OFF] + ldrne r11, [r0, #PM_INFO_MX7_IOMUXC_GPR_P_OFF] + + /* turn on ddr power */ + ldr r7, =BIT32(29) + str r7, [r1, #ANADIG_SNVS_MISC_CTRL_CLR] + + ldr r6, =50 + wait_delay + + /* clear ddr_phy reset */ + ldr r6, =0x1000 + ldr r7, [r2, r6] + orr r7, r7, #0x3 + str r7, [r2, r6] + ldr r7, [r2, r6] + bic r7, r7, #0x1 + str r7, [r2, r6] +13: + ldr r6, [r0, #PM_INFO_MX7_DDRC_REG_NUM_OFF] + ldr r7, =PM_INFO_MX7_DDRC_REG_OFF + add r7, r7, r0 +14: + ldr r8, [r7], #0x4 + ldr r9, [r7], #0x4 + str r9, [r3, r8] + subs r6, r6, #0x1 + bne 14b + ldr r7, =0x20 + str r7, [r3, #DDRC_PWRCTL] + ldr r7, =0x0 + str r7, [r3, #DDRC_DFIMISC] + + /* do PHY, clear ddr_phy reset */ + ldr r6, =0x1000 + ldr r7, [r2, r6] + bic r7, r7, #0x2 + str r7, [r2, r6] + + ldr r7, [r1, #ANADIG_DIGPROG] + and r7, r7, #0xff + cmp r7, #0x11 + bne 12f + + /* + * TKT262940: + * System hang when press RST for DDR PAD is + * in retention mode, fixed on TO1.1 + */ + ldr r7, [r11] + bic r7, r7, #BIT32(27) + str r7, [r11] + ldr r7, [r11] + bic r7, r7, #BIT32(29) + str r7, [r11] +12: + ldr r7, =BIT32(30) + str r7, [r1, #ANADIG_SNVS_MISC_CTRL_SET] + + /* need to delay ~5mS */ + ldr r6, =0x100000 + wait_delay + + ldr r6, [r0, #PM_INFO_MX7_DDRC_PHY_REG_NUM_OFF] + ldr r7, =PM_INFO_MX7_DDRC_PHY_REG_OFF + add r7, r7, r0 + +15: + ldr r8, [r7], #0x4 + ldr r9, [r7], #0x4 + str r9, [r4, r8] + subs r6, r6, #0x1 + bne 15b + + ldr r7, =0x0 + add r9, r10, #0x4000 + str r7, [r9, #0x130] + + ldr r7, =0x170 + orr r7, r7, #0x8 + str r7, [r11, #0x20] + + ldr r7, =0x2 + add r9, r10, #0x4000 + str r7, [r9, #0x130] + + ldr r7, =0xf + str r7, [r4, #DDRPHY_LP_CON0] + + /* wait until self-refresh mode entered */ +16: + ldr r7, [r3, #DDRC_STAT] + and r7, r7, #0x3 + cmp r7, #0x3 + bne 16b + ldr r7, =0x0 + str r7, [r3, #DDRC_SWCTL] + ldr r7, =0x1 + str r7, [r3, #DDRC_DFIMISC] + ldr r7, =0x1 + str r7, [r3, #DDRC_SWCTL] +17: + ldr r7, [r3, #DDRC_SWSTAT] + and r7, r7, #0x1 + cmp r7, #0x1 + bne 17b +18: + ldr r7, [r3, #DDRC_STAT] + and r7, r7, #0x20 + cmp r7, #0x20 + bne 18b + + /* let DDR out of self-refresh */ + ldr r7, =0x0 + str r7, [r3, #DDRC_PWRCTL] +19: + ldr r7, [r3, #DDRC_STAT] + and r7, r7, #0x30 + cmp r7, #0x0 + bne 19b + +20: + ldr r7, [r3, #DDRC_STAT] + and r7, r7, #0x3 + cmp r7, #0x1 + bne 20b + + /* enable port */ + ldr r7, =0x1 + str r7, [r3, #DDRC_PCTRL_0] + + /* enable auto self-refresh */ + ldr r7, [r3, #DDRC_PWRCTL] + orr r7, r7, #(1 << 0) + str r7, [r3, #DDRC_PWRCTL] + + .endm + +FUNC imx7_suspend, : +UNWIND( .fnstart) +UNWIND( .cantunwind) + push {r4-r12} + + /* make sure SNVS clk is enabled */ + ldr r11, [r0, #PM_INFO_MX7_CCM_V_OFF] + add r11, r11, #0x4000 + ldr r7, =0x3 + str r7, [r11, #CCM_SNVS_LPCG] + + /* check whether it is a standby mode */ + ldr r11, [r0, #PM_INFO_MX7_GPC_V_OFF] + ldr r7, [r11, #GPC_PGC_C0] + cmp r7, #0 + beq ddr_only_self_refresh + + /* + * The value of r0 is mapped the same in origin table and IRAM table, + * thus no need to care r0 here. + */ + ldr r1, [r0, #PM_INFO_MX7_PBASE_OFF] + ldr r4, [r0, #PM_INFO_MX7_SIZE_OFF] + + /* + * counting the resume address in iram + * to set it in SRC register. + */ + ldr r6, =imx7_suspend + ldr r7, =resume + sub r7, r7, r6 + add r8, r1, r4 + add r9, r8, r7 + + ldr r11, [r0, #PM_INFO_MX7_SRC_V_OFF] + /* store physical resume addr and pm_info address. */ + str r9, [r11, #MX7_SRC_GPR1] + str r1, [r11, #MX7_SRC_GPR2] + + disable_l1_dcache + + store_ttbr + + ldr r11, [r0, #PM_INFO_MX7_GPC_V_OFF] + ldr r7, [r11, #GPC_PGC_FM] + cmp r7, #0 + beq ddr_only_self_refresh + + ddr_enter_retention + /* enter LPSR mode if resume addr is valid */ + ldr r11, [r0, #PM_INFO_MX7_LPSR_V_OFF] + ldr r7, [r11] + cmp r7, #0x0 + beq ddr_retention_enter_out + + /* disable STOP mode before entering LPSR */ + ldr r11, [r0, #PM_INFO_MX7_GPC_V_OFF] + ldr r7, [r11] + bic r7, #0xf + str r7, [r11] + + /* shut down vddsoc to enter lpsr mode */ + ldr r11, [r0, #PM_INFO_MX7_SNVS_V_OFF] + ldr r7, [r11, #0x38] + orr r7, r7, #0x60 + str r7, [r11, #0x38] + dsb +wait_shutdown: + wfi + b wait_shutdown + +ddr_only_self_refresh: + ddrc_enter_self_refresh + b wfi +ddr_retention_enter_out: + ldr r11, [r0, #PM_INFO_MX7_GIC_DIST_V_OFF] + ldr r7, =0x0 + ldr r8, =0x1000 + str r7, [r11, r8] + + ldr r11, [r0, #PM_INFO_MX7_GPC_V_OFF] + ldr r4, [r11, #MX7D_GPC_IMR1] + ldr r5, [r11, #MX7D_GPC_IMR2] + ldr r6, [r11, #MX7D_GPC_IMR3] + ldr r7, [r11, #MX7D_GPC_IMR4] + + ldr r8, =0xffffffff + str r8, [r11, #MX7D_GPC_IMR1] + str r8, [r11, #MX7D_GPC_IMR2] + str r8, [r11, #MX7D_GPC_IMR3] + str r8, [r11, #MX7D_GPC_IMR4] + + /* + * enable the RBC bypass counter here + * to hold off the interrupts. RBC counter + * = 8 (240us). With this setting, the latency + * from wakeup interrupt to ARM power up + * is ~250uS. + */ + ldr r8, [r11, #0x14] + bic r8, r8, #(0x3f << 24) + orr r8, r8, #(0x8 << 24) + str r8, [r11, #0x14] + + /* enable the counter. */ + ldr r8, [r11, #0x14] + orr r8, r8, #(0x1 << 30) + str r8, [r11, #0x14] + + /* unmask all the GPC interrupts. */ + str r4, [r11, #MX7D_GPC_IMR1] + str r5, [r11, #MX7D_GPC_IMR2] + str r6, [r11, #MX7D_GPC_IMR3] + str r7, [r11, #MX7D_GPC_IMR4] + + /* + * now delay for a short while (3usec) + * ARM is at 1GHz at this point + * so a short loop should be enough. + * this delay is required to ensure that + * the RBC counter can start counting in + * case an interrupt is already pending + * or in case an interrupt arrives just + * as ARM is about to assert DSM_request. + */ + ldr r7, =2000 +rbc_loop: + subs r7, r7, #0x1 + bne rbc_loop +wfi: + dsb + /* Enter stop mode */ + wfi + + mov r5, #0x0 + + ldr r11, [r0, #PM_INFO_MX7_GPC_V_OFF] + ldr r7, [r11, #GPC_PGC_FM] + cmp r7, #0 + beq wfi_ddr_self_refresh_out + + ddr_exit_retention + b wfi_ddr_retention_out +wfi_ddr_self_refresh_out: + ddrc_exit_self_refresh +wfi_ddr_retention_out: + + /* check whether it is a standby mode */ + ldr r11, [r0, #PM_INFO_MX7_GPC_V_OFF] + ldr r7, [r11, #GPC_PGC_C0] + cmp r7, #0 + beq standby_out + + ldr r11, [r0, #PM_INFO_MX7_GIC_DIST_V_OFF] + ldr r7, =0x1 + ldr r8, =0x1000 + str r7, [r11, r8] + + restore_ttbr +standby_out: + pop {r4-r12} + /* return to suspend finish */ + bx lr + +resume: + write_iciallu + write_bpiall + dsb + isb + + mov r6, #(SCTLR_I | SCTLR_Z) + write_sctlr r6 + isb + + /* + * After resume back, rom run in SVC mode, + * so we need to switch to monitor mode. + */ + cps #CPSR_MODE_MON + + /* get physical resume address from pm_info. */ + ldr lr, [r0, #PM_INFO_MX7_RESUME_ADDR_OFF] + /* clear core0's entry and parameter */ + ldr r11, [r0, #PM_INFO_MX7_SRC_P_OFF] + mov r7, #0x0 + str r7, [r11, #MX7_SRC_GPR1] + str r7, [r11, #MX7_SRC_GPR2] + + mov r5, #0x1 + + ldr r11, [r0, #PM_INFO_MX7_GPC_P_OFF] + ldr r7, [r11, #GPC_PGC_FM] + cmp r7, #0 + beq dsm_ddr_self_refresh_out + + ddr_exit_retention + b dsm_ddr_retention_out +dsm_ddr_self_refresh_out: + ddrc_exit_self_refresh +dsm_ddr_retention_out: + + bx lr +UNWIND( .fnend) +END_FUNC imx7_suspend + +FUNC ca7_cpu_resume, : +UNWIND( .fnstart) +UNWIND( .cantunwind) + mov r0, #0 @ ; write the cache size selection register to be + write_csselr r0 @ ; sure we address the data cache + isb @ ; isb to sync the change to the cachesizeid reg + +_inv_dcache_off: + mov r0, #0 @ ; set way number to 0 +_inv_nextway: + mov r1, #0 @ ; set line number (=index) to 0 +_inv_nextline: + orr r2, r0, r1 @ ; construct way/index value + write_dcisw r2 @ ; invalidate data or unified cache line by set/way + add r1, r1, #1 << LINE_FIELD_OFFSET @ ; increment the index + cmp r1, #1 << LINE_FIELD_OVERFLOW @ ; overflow out of set field? + bne _inv_nextline + add r0, r0, #1 << WAY_FIELD_OFFSET @ ; increment the way number + cmp r0, #0 @ ; overflow out of way field? + bne _inv_nextway + + dsb @ ; synchronise + isb + + /* + * No stack, scratch r0-r3 + * TODO: Need to use specific configure, but not plat_xxx. + * Because plat_xx maybe changed in future, we can not rely on it. + * Need handle sp carefully. + */ + blx plat_cpu_reset_early + + b sm_pm_cpu_resume +UNWIND( .fnend) +END_FUNC ca7_cpu_resume diff --git a/core/arch/arm/plat-imx/pm/psci.c b/core/arch/arm/plat-imx/pm/psci.c index 220da0f7d61..a7de449e9c4 100644 --- a/core/arch/arm/plat-imx/pm/psci.c +++ b/core/arch/arm/plat-imx/pm/psci.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -84,6 +85,8 @@ int psci_cpu_on(uint32_t core_idx, uint32_t entry, val |= BIT32(SRC_SCR_CORE1_RST_OFFSET + (core_idx - 1)); write32(val, va + SRC_SCR); + imx_set_src_gpr(core_idx, 0); + return PSCI_RET_SUCCESS; } @@ -155,3 +158,50 @@ int psci_affinity_info(uint32_t affinity, return PSCI_AFFINITY_LEVEL_OFF; } #endif + +__weak int imx7_cpu_suspend(uint32_t power_state __unused, + uintptr_t entry __unused, + uint32_t context_id __unused, + struct sm_nsec_ctx *nsec __unused) +{ + return 0; +} + +int psci_cpu_suspend(uint32_t power_state, + uintptr_t entry, uint32_t context_id __unused, + struct sm_nsec_ctx *nsec) +{ + uint32_t id, type; + int ret = PSCI_RET_INVALID_PARAMETERS; + + id = power_state & PSCI_POWER_STATE_ID_MASK; + type = (power_state & PSCI_POWER_STATE_TYPE_MASK) >> + PSCI_POWER_STATE_TYPE_SHIFT; + + if ((type != PSCI_POWER_STATE_TYPE_POWER_DOWN) && + (type != PSCI_POWER_STATE_TYPE_STANDBY)) { + DMSG("Not supported %x\n", type); + return ret; + } + + /* + * ID 0 means suspend + * ID 1 means low power idle + * TODO: follow PSCI StateID sample encoding. + */ + DMSG("ID = %d\n", id); + if (id == 1) { + /* Not supported now */ + return ret; + } else if (id == 0) { + if (soc_is_imx7ds()) { + return imx7_cpu_suspend(power_state, entry, + context_id, nsec); + } + return ret; + } + + DMSG("ID %d not supported\n", id); + + return ret; +} diff --git a/core/arch/arm/plat-imx/sub.mk b/core/arch/arm/plat-imx/sub.mk index 17fda6adc28..a90bb622d1d 100644 --- a/core/arch/arm/plat-imx/sub.mk +++ b/core/arch/arm/plat-imx/sub.mk @@ -4,7 +4,11 @@ srcs-y += main.c imx-common.c srcs-$(CFG_MX6)$(CFG_MX7) += mmdc.c srcs-$(CFG_PL310) += imx_pl310.c -srcs-$(CFG_PSCI_ARM32) += pm/psci.c pm/gpcv2.c +ifeq ($(CFG_PSCI_ARM32),y) +srcs-y += pm/psci.c pm/gpcv2.c +srcs-$(CFG_MX7) += pm/pm-imx7.c pm/psci-suspend-imx7.S pm/imx7_suspend.c +endif + cflags-pm/psci.c-y += -Wno-suggest-attribute=noreturn ifneq (,$(filter y, $(CFG_MX6Q) $(CFG_MX6D) $(CFG_MX6DL) $(CFG_MX6S)))