diff options
Diffstat (limited to 'include/bl31')
-rw-r--r-- | include/bl31/bl31.h | 46 | ||||
-rw-r--r-- | include/bl31/context.h | 352 | ||||
-rw-r--r-- | include/bl31/context_mgmt.h | 59 | ||||
-rw-r--r-- | include/bl31/interrupt_mgmt.h | 128 | ||||
-rw-r--r-- | include/bl31/runtime_svc.h | 273 | ||||
-rw-r--r-- | include/bl31/services/psci.h | 216 | ||||
-rw-r--r-- | include/bl31/services/std_svc.h | 51 |
7 files changed, 1125 insertions, 0 deletions
diff --git a/include/bl31/bl31.h b/include/bl31/bl31.h new file mode 100644 index 0000000..33e4ece --- /dev/null +++ b/include/bl31/bl31.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __BL31_H__ +#define __BL31_H__ + +#include <stdint.h> + +/******************************************************************************* + * Function prototypes + ******************************************************************************/ +void bl31_arch_setup(void); +void bl31_next_el_arch_setup(uint32_t security_state); +void bl31_set_next_image_type(uint32_t type); +uint32_t bl31_get_next_image_type(void); +void bl31_prepare_next_image_entry(); +void bl31_register_bl32_init(int32_t (*)(void)); + +#endif /* __BL31_H__ */ diff --git a/include/bl31/context.h b/include/bl31/context.h new file mode 100644 index 0000000..16cc744 --- /dev/null +++ b/include/bl31/context.h @@ -0,0 +1,352 @@ +/* + * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __CONTEXT_H__ +#define __CONTEXT_H__ + +/******************************************************************************* + * Constants that allow assembler code to access members of and the 'gp_regs' + * structure at their correct offsets. + ******************************************************************************/ +#define CTX_GPREGS_OFFSET 0x0 +#define CTX_GPREG_X0 0x0 +#define CTX_GPREG_X1 0x8 +#define CTX_GPREG_X2 0x10 +#define CTX_GPREG_X3 0x18 +#define CTX_GPREG_X4 0x20 +#define CTX_GPREG_X5 0x28 +#define CTX_GPREG_X6 0x30 +#define CTX_GPREG_X7 0x38 +#define CTX_GPREG_X8 0x40 +#define CTX_GPREG_X9 0x48 +#define CTX_GPREG_X10 0x50 +#define CTX_GPREG_X11 0x58 +#define CTX_GPREG_X12 0x60 +#define CTX_GPREG_X13 0x68 +#define CTX_GPREG_X14 0x70 +#define CTX_GPREG_X15 0x78 +#define CTX_GPREG_X16 0x80 +#define CTX_GPREG_X17 0x88 +#define CTX_GPREG_X18 0x90 +#define CTX_GPREG_X19 0x98 +#define CTX_GPREG_X20 0xa0 +#define CTX_GPREG_X21 0xa8 +#define CTX_GPREG_X22 0xb0 +#define CTX_GPREG_X23 0xb8 +#define CTX_GPREG_X24 0xc0 +#define CTX_GPREG_X25 0xc8 +#define CTX_GPREG_X26 0xd0 +#define CTX_GPREG_X27 0xd8 +#define CTX_GPREG_X28 0xe0 +#define CTX_GPREG_X29 0xe8 +#define CTX_GPREG_LR 0xf0 +#define CTX_GPREG_SP_EL0 0xf8 +#define CTX_GPREGS_END 0x100 + +/******************************************************************************* + * Constants that allow assembler code to access members of and the 'el3_state' + * structure at their correct offsets. Note that some of the registers are only + * 32-bits wide but are stored as 64-bit values for convenience + ******************************************************************************/ +#define CTX_EL3STATE_OFFSET (CTX_GPREGS_OFFSET + CTX_GPREGS_END) +#define CTX_VBAR_EL3 0x0 /* Currently unused */ +#define CTX_RUNTIME_SP 0x8 +#define CTX_SPSR_EL3 0x10 +#define CTX_ELR_EL3 0x18 +#define CTX_SCR_EL3 0x20 +#define CTX_SCTLR_EL3 0x28 +#define CTX_CPTR_EL3 0x30 +/* Unused space to allow registers to be stored as pairs */ +#define CTX_CNTFRQ_EL0 0x40 +#define CTX_MAIR_EL3 0x48 +#define CTX_TCR_EL3 0x50 +#define CTX_TTBR0_EL3 0x58 +#define CTX_DAIF_EL3 0x60 +/* Unused space to honour alignment requirements */ +#define CTX_EL3STATE_END 0x70 + +/******************************************************************************* + * Constants that allow assembler code to access members of and the + * 'el1_sys_regs' structure at their correct offsets. Note that some of the + * registers are only 32-bits wide but are stored as 64-bit values for + * convenience + ******************************************************************************/ +#define CTX_SYSREGS_OFFSET (CTX_EL3STATE_OFFSET + CTX_EL3STATE_END) +#define CTX_SPSR_EL1 0x0 +#define CTX_ELR_EL1 0x8 +#define CTX_SPSR_ABT 0x10 +#define CTX_SPSR_UND 0x18 +#define CTX_SPSR_IRQ 0x20 +#define CTX_SPSR_FIQ 0x28 +#define CTX_SCTLR_EL1 0x30 +#define CTX_ACTLR_EL1 0x38 +#define CTX_CPACR_EL1 0x40 +#define CTX_CSSELR_EL1 0x48 +#define CTX_SP_EL1 0x50 +#define CTX_ESR_EL1 0x58 +#define CTX_TTBR0_EL1 0x60 +#define CTX_TTBR1_EL1 0x68 +#define CTX_MAIR_EL1 0x70 +#define CTX_AMAIR_EL1 0x78 +#define CTX_TCR_EL1 0x80 +#define CTX_TPIDR_EL1 0x88 +#define CTX_TPIDR_EL0 0x90 +#define CTX_TPIDRRO_EL0 0x98 +#define CTX_DACR32_EL2 0xa0 +#define CTX_IFSR32_EL2 0xa8 +#define CTX_PAR_EL1 0xb0 +#define CTX_FAR_EL1 0xb8 +#define CTX_AFSR0_EL1 0xc0 +#define CTX_AFSR1_EL1 0xc8 +#define CTX_CONTEXTIDR_EL1 0xd0 +#define CTX_VBAR_EL1 0xd8 +/* + * If the timer registers aren't saved and restored, we don't have to reserve + * space for them in the context + */ +#if NS_TIMER_SWITCH +#define CTX_CNTP_CTL_EL0 0xe0 +#define CTX_CNTP_CVAL_EL0 0xe8 +#define CTX_CNTV_CTL_EL0 0xf0 +#define CTX_CNTV_CVAL_EL0 0xf8 +#define CTX_CNTKCTL_EL1 0x100 +#define CTX_FP_FPEXC32_EL2 0x108 +#define CTX_SYSREGS_END 0x110 +#else +#define CTX_FP_FPEXC32_EL2 0xe0 +#define CTX_SYSREGS_END 0xf0 +#endif + +/******************************************************************************* + * Constants that allow assembler code to access members of and the 'fp_regs' + * structure at their correct offsets. + ******************************************************************************/ +#define CTX_FPREGS_OFFSET (CTX_SYSREGS_OFFSET + CTX_SYSREGS_END) +#define CTX_FP_Q0 0x0 +#define CTX_FP_Q1 0x10 +#define CTX_FP_Q2 0x20 +#define CTX_FP_Q3 0x30 +#define CTX_FP_Q4 0x40 +#define CTX_FP_Q5 0x50 +#define CTX_FP_Q6 0x60 +#define CTX_FP_Q7 0x70 +#define CTX_FP_Q8 0x80 +#define CTX_FP_Q9 0x90 +#define CTX_FP_Q10 0xa0 +#define CTX_FP_Q11 0xb0 +#define CTX_FP_Q12 0xc0 +#define CTX_FP_Q13 0xd0 +#define CTX_FP_Q14 0xe0 +#define CTX_FP_Q15 0xf0 +#define CTX_FP_Q16 0x100 +#define CTX_FP_Q17 0x110 +#define CTX_FP_Q18 0x120 +#define CTX_FP_Q19 0x130 +#define CTX_FP_Q20 0x140 +#define CTX_FP_Q21 0x150 +#define CTX_FP_Q22 0x160 +#define CTX_FP_Q23 0x170 +#define CTX_FP_Q24 0x180 +#define CTX_FP_Q25 0x190 +#define CTX_FP_Q26 0x1a0 +#define CTX_FP_Q27 0x1b0 +#define CTX_FP_Q28 0x1c0 +#define CTX_FP_Q29 0x1d0 +#define CTX_FP_Q30 0x1e0 +#define CTX_FP_Q31 0x1f0 +#define CTX_FP_FPSR 0x200 +#define CTX_FP_FPCR 0x208 +#define CTX_FPREGS_END 0x210 + +/****************************************************************************** + * Offsets for the per cpu cache implementation + ******************************************************************************/ +#define PTR_CACHE_CRASH_STACK_OFFSET 0x0 + +#ifndef __ASSEMBLY__ + +#include <cassert.h> +#include <stdint.h> + +/* + * Common constants to help define the 'cpu_context' structure and its + * members below. + */ +#define DWORD_SHIFT 3 +#define DEFINE_REG_STRUCT(name, num_regs) \ + typedef struct name { \ + uint64_t _regs[num_regs]; \ + } __aligned(16) name##_t + +/* Constants to determine the size of individual context structures */ +#define CTX_GPREG_ALL (CTX_GPREGS_END >> DWORD_SHIFT) +#define CTX_SYSREG_ALL (CTX_SYSREGS_END >> DWORD_SHIFT) +#define CTX_FPREG_ALL (CTX_FPREGS_END >> DWORD_SHIFT) +#define CTX_EL3STATE_ALL (CTX_EL3STATE_END >> DWORD_SHIFT) + +/* + * AArch64 general purpose register context structure. Usually x0-x18, + * lr are saved as the compiler is expected to preserve the remaining + * callee saved registers if used by the C runtime and the assembler + * does not touch the remaining. But in case of world switch during + * exception handling, we need to save the callee registers too. + */ +DEFINE_REG_STRUCT(gp_regs, CTX_GPREG_ALL); + +/* + * AArch64 EL1 system register context structure for preserving the + * architectural state during switches from one security state to + * another in EL1. + */ +DEFINE_REG_STRUCT(el1_sys_regs, CTX_SYSREG_ALL); + +/* + * AArch64 floating point register context structure for preserving + * the floating point state during switches from one security state to + * another. + */ +DEFINE_REG_STRUCT(fp_regs, CTX_FPREG_ALL); + +/* + * Miscellaneous registers used by EL3 firmware to maintain its state + * across exception entries and exits + */ +DEFINE_REG_STRUCT(el3_state, CTX_EL3STATE_ALL); + +/* + * Macros to access members of any of the above structures using their + * offsets + */ +#define read_ctx_reg(ctx, offset) ((ctx)->_regs[offset >> DWORD_SHIFT]) +#define write_ctx_reg(ctx, offset, val) (((ctx)->_regs[offset >> DWORD_SHIFT]) \ + = val) + +/* + * Top-level context structure which is used by EL3 firmware to + * preserve the state of a core at EL1 in one of the two security + * states and save enough EL3 meta data to be able to return to that + * EL and security state. The context management library will be used + * to ensure that SP_EL3 always points to an instance of this + * structure at exception entry and exit. Each instance will + * correspond to either the secure or the non-secure state. + */ +typedef struct cpu_context { + gp_regs_t gpregs_ctx; + el3_state_t el3state_ctx; + el1_sys_regs_t sysregs_ctx; + fp_regs_t fpregs_ctx; +} cpu_context_t; + +/* Macros to access members of the 'cpu_context_t' structure */ +#define get_el3state_ctx(h) (&((cpu_context_t *) h)->el3state_ctx) +#define get_fpregs_ctx(h) (&((cpu_context_t *) h)->fpregs_ctx) +#define get_sysregs_ctx(h) (&((cpu_context_t *) h)->sysregs_ctx) +#define get_gpregs_ctx(h) (&((cpu_context_t *) h)->gpregs_ctx) + +/* + * Compile time assertions related to the 'cpu_context' structure to + * ensure that the assembler and the compiler view of the offsets of + * the structure members is the same. + */ +CASSERT(CTX_GPREGS_OFFSET == __builtin_offsetof(cpu_context_t, gpregs_ctx), \ + assert_core_context_gp_offset_mismatch); +CASSERT(CTX_SYSREGS_OFFSET == __builtin_offsetof(cpu_context_t, sysregs_ctx), \ + assert_core_context_sys_offset_mismatch); +CASSERT(CTX_FPREGS_OFFSET == __builtin_offsetof(cpu_context_t, fpregs_ctx), \ + assert_core_context_fp_offset_mismatch); +CASSERT(CTX_EL3STATE_OFFSET == __builtin_offsetof(cpu_context_t, el3state_ctx), \ + assert_core_context_el3state_offset_mismatch); + +/* + * Helper macro to set the general purpose registers that correspond to + * parameters in an aapcs_64 call i.e. x0-x7 + */ +#define set_aapcs_args0(ctx, x0) do { \ + write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X0, x0); \ + } while (0); +#define set_aapcs_args1(ctx, x0, x1) do { \ + write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X1, x1); \ + set_aapcs_args0(ctx, x0); \ + } while (0); +#define set_aapcs_args2(ctx, x0, x1, x2) do { \ + write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X2, x2); \ + set_aapcs_args1(ctx, x0, x1); \ + } while (0); +#define set_aapcs_args3(ctx, x0, x1, x2, x3) do { \ + write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X3, x3); \ + set_aapcs_args2(ctx, x0, x1, x2); \ + } while (0); +#define set_aapcs_args4(ctx, x0, x1, x2, x3, x4) do { \ + write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X4, x4); \ + set_aapcs_args3(ctx, x0, x1, x2, x3); \ + } while (0); +#define set_aapcs_args5(ctx, x0, x1, x2, x3, x4, x5) do { \ + write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X5, x5); \ + set_aapcs_args4(ctx, x0, x1, x2, x3, x4); \ + } while (0); +#define set_aapcs_args6(ctx, x0, x1, x2, x3, x4, x5, x6) do { \ + write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X6, x6); \ + set_aapcs_args5(ctx, x0, x1, x2, x3, x4, x5); \ + } while (0); +#define set_aapcs_args7(ctx, x0, x1, x2, x3, x4, x5, x6, x7) do { \ + write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X7, x7); \ + set_aapcs_args6(ctx, x0, x1, x2, x3, x4, x5, x6); \ + } while (0); + +/******************************************************************************* + * Function prototypes + ******************************************************************************/ +void el3_sysregs_context_save(el3_state_t *regs); +void el3_sysregs_context_restore(el3_state_t *regs); +void el1_sysregs_context_save(el1_sys_regs_t *regs); +void el1_sysregs_context_restore(el1_sys_regs_t *regs); +void fpregs_context_save(fp_regs_t *regs); +void fpregs_context_restore(fp_regs_t *regs); + + +/* Per-CPU pointer cache of recently used pointers and also the crash stack + * TODO: Add other commonly used variables to this (tf_issues#90) + */ +typedef struct per_cpu_ptr_cache { + uint64_t crash_stack; +} per_cpu_ptr_cache_t; + +CASSERT(PTR_CACHE_CRASH_STACK_OFFSET == __builtin_offsetof\ + (per_cpu_ptr_cache_t, crash_stack), \ + assert_per_cpu_ptr_cache_crash_stack_offset_mismatch); + +#undef CTX_SYSREG_ALL +#undef CTX_FP_ALL +#undef CTX_GPREG_ALL +#undef CTX_EL3STATE_ALL + +#endif /* __ASSEMBLY__ */ + +#endif /* __CONTEXT_H__ */ diff --git a/include/bl31/context_mgmt.h b/include/bl31/context_mgmt.h new file mode 100644 index 0000000..ce4f7a8 --- /dev/null +++ b/include/bl31/context_mgmt.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __CM_H__ +#define __CM_H__ + +#include <stdint.h> + +/******************************************************************************* + * Function & variable prototypes + ******************************************************************************/ +void cm_init(void); +void *cm_get_context(uint64_t mpidr, uint32_t security_state); +void cm_set_context(uint64_t mpidr, + void *context, + uint32_t security_state); +void cm_el3_sysregs_context_save(uint32_t security_state); +void cm_el3_sysregs_context_restore(uint32_t security_state); +void cm_el1_sysregs_context_save(uint32_t security_state); +void cm_el1_sysregs_context_restore(uint32_t security_state); +void cm_set_el3_eret_context(uint32_t security_state, uint64_t entrypoint, + uint32_t spsr, uint32_t scr); +void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint); +void cm_write_scr_el3_bit(uint32_t security_state, + uint32_t bit_pos, + uint32_t value); +void cm_set_next_eret_context(uint32_t security_state); +void cm_init_pcpu_ptr_cache(); +void cm_set_pcpu_ptr_cache(const void *pcpu_ptr); +void *cm_get_pcpu_ptr_cache(void); +uint32_t cm_get_scr_el3(uint32_t security_state); +#endif /* __CM_H__ */ diff --git a/include/bl31/interrupt_mgmt.h b/include/bl31/interrupt_mgmt.h new file mode 100644 index 0000000..3a2c00c --- /dev/null +++ b/include/bl31/interrupt_mgmt.h @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __INTERRUPT_MGMT_H__ +#define __INTERRUPT_MGMT_H__ + +#include <arch.h> + +/******************************************************************************* + * Constants for the types of interrupts recognised by the IM framework + ******************************************************************************/ +#define INTR_TYPE_S_EL1 0 +#define INTR_TYPE_EL3 1 +#define INTR_TYPE_NS 2 +#define MAX_INTR_TYPES 3 +#define INTR_TYPE_INVAL MAX_INTR_TYPES +/* + * Constant passed to the interrupt handler in the 'id' field when the + * framework does not read the gic registers to determine the interrupt id. + */ +#define INTR_ID_UNAVAILABLE 0xFFFFFFFF + + +/******************************************************************************* + * Mask for _both_ the routing model bits in the 'flags' parameter and + * constants to define the valid routing models for each supported interrupt + * type + ******************************************************************************/ +#define INTR_RM_FLAGS_SHIFT 0x0 +#define INTR_RM_FLAGS_MASK 0x3 +/* Routed to EL3 from NS. Taken to S-EL1 from Secure */ +#define INTR_SEL1_VALID_RM0 0x2 +/* Routed to EL3 from NS and Secure */ +#define INTR_SEL1_VALID_RM1 0x3 +/* Routed to EL1/EL2 from NS and to S-EL1 from Secure */ +#define INTR_NS_VALID_RM0 0x0 +/* Routed to EL1/EL2 from NS and to EL3 from Secure */ +#define INTR_NS_VALID_RM1 0x1 + + +/******************************************************************************* + * Constants for the _individual_ routing model bits in the 'flags' field for + * each interrupt type and mask to validate the 'flags' parameter while + * registering an interrupt handler + ******************************************************************************/ +#define INTR_TYPE_FLAGS_MASK 0xFFFFFFFC + +#define INTR_RM_FROM_SEC_SHIFT SECURE /* BIT[0] */ +#define INTR_RM_FROM_NS_SHIFT NON_SECURE /* BIT[1] */ +#define INTR_RM_FROM_FLAG_MASK 1 +#define get_interrupt_rm_flag(flag, ss) (((flag >> INTR_RM_FLAGS_SHIFT) >> ss) \ + & INTR_RM_FROM_FLAG_MASK) +#define set_interrupt_rm_flag(flag, ss) (flag |= 1 << ss) +#define clr_interrupt_rm_flag(flag, ss) (flag &= ~(1 << ss)) + + +/******************************************************************************* + * Macros to validate the routing model bits in the 'flags' for a type + * of interrupt. If the model does not match one of the valid masks + * -EINVAL is returned. + ******************************************************************************/ +#define validate_sel1_interrupt_rm(x) (x == INTR_SEL1_VALID_RM0 ? 0 : \ + (x == INTR_SEL1_VALID_RM1 ? 0 :\ + -EINVAL)) + +#define validate_ns_interrupt_rm(x) (x == INTR_NS_VALID_RM0 ? 0 : \ + (x == INTR_NS_VALID_RM1 ? 0 :\ + -EINVAL)) + +/******************************************************************************* + * Macros to set the 'flags' parameter passed to an interrupt type handler. Only + * the flag to indicate the security state when the exception was generated is + * supported. + ******************************************************************************/ +#define INTR_SRC_SS_FLAG_SHIFT 0 /* BIT[0] */ +#define INTR_SRC_SS_FLAG_MASK 1 +#define set_interrupt_src_ss(flag, val) (flag |= val << INTR_SRC_SS_FLAG_SHIFT) +#define clr_interrupt_src_ss(flag) (flag &= ~(1 << INTR_SRC_SS_FLAG_SHIFT)) +#define get_interrupt_src_ss(flag) ((flag >> INTR_SRC_SS_FLAG_SHIFT) & \ + INTR_SRC_SS_FLAG_MASK) + +#ifndef __ASSEMBLY__ + +/* Prototype for defining a handler for an interrupt type */ +typedef uint64_t (*interrupt_type_handler_t)(uint32_t id, + uint32_t flags, + void *handle, + void *cookie); + +/******************************************************************************* + * Function & variable prototypes + ******************************************************************************/ +uint32_t get_scr_el3_from_routing_model(uint32_t security_state); +int32_t set_routing_model(uint32_t type, uint32_t flags); +int32_t register_interrupt_type_handler(uint32_t type, + interrupt_type_handler_t handler, + uint32_t flags); +interrupt_type_handler_t get_interrupt_type_handler(uint32_t interrupt_type); + +#endif /*__ASSEMBLY__*/ +#endif /* __INTERRUPT_MGMT_H__ */ diff --git a/include/bl31/runtime_svc.h b/include/bl31/runtime_svc.h new file mode 100644 index 0000000..d7d88d4 --- /dev/null +++ b/include/bl31/runtime_svc.h @@ -0,0 +1,273 @@ +/* + * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __RUNTIME_SVC_H__ +#define __RUNTIME_SVC_H__ + +/******************************************************************************* + * Bit definitions inside the function id as per the SMC calling convention + ******************************************************************************/ +#define FUNCID_TYPE_SHIFT 31 +#define FUNCID_CC_SHIFT 30 +#define FUNCID_OEN_SHIFT 24 +#define FUNCID_NUM_SHIFT 0 + +#define FUNCID_TYPE_MASK 0x1 +#define FUNCID_CC_MASK 0x1 +#define FUNCID_OEN_MASK 0x3f +#define FUNCID_NUM_MASK 0xffff + +#define FUNCID_TYPE_WIDTH 1 +#define FUNCID_CC_WIDTH 1 +#define FUNCID_OEN_WIDTH 6 +#define FUNCID_NUM_WIDTH 16 + +#define GET_SMC_CC(id) ((id >> FUNCID_CC_SHIFT) & \ + FUNCID_CC_MASK) +#define GET_SMC_TYPE(id) ((id >> FUNCID_TYPE_SHIFT) & \ + FUNCID_TYPE_MASK) + +#define SMC_64 1 +#define SMC_32 0 +#define SMC_UNK 0xffffffff +#define SMC_TYPE_FAST 1 +#define SMC_TYPE_STD 0 +#define SMC_PREEMPTED 0xfffffffe +/******************************************************************************* + * Owning entity number definitions inside the function id as per the SMC + * calling convention + ******************************************************************************/ +#define OEN_ARM_START 0 +#define OEN_ARM_END 0 +#define OEN_CPU_START 1 +#define OEN_CPU_END 1 +#define OEN_SIP_START 2 +#define OEN_SIP_END 2 +#define OEN_OEM_START 3 +#define OEN_OEM_END 3 +#define OEN_STD_START 4 /* Standard Calls */ +#define OEN_STD_END 4 +#define OEN_TAP_START 48 /* Trusted Applications */ +#define OEN_TAP_END 49 +#define OEN_TOS_START 50 /* Trusted OS */ +#define OEN_TOS_END 63 +#define OEN_LIMIT 64 + +/******************************************************************************* + * Constants to indicate type of exception to the common exception handler. + ******************************************************************************/ +#define SYNC_EXCEPTION_SP_EL0 0x0 +#define IRQ_SP_EL0 0x1 +#define FIQ_SP_EL0 0x2 +#define SERROR_SP_EL0 0x3 +#define SYNC_EXCEPTION_SP_ELX 0x4 +#define IRQ_SP_ELX 0x5 +#define FIQ_SP_ELX 0x6 +#define SERROR_SP_ELX 0x7 +#define SYNC_EXCEPTION_AARCH64 0x8 +#define IRQ_AARCH64 0x9 +#define FIQ_AARCH64 0xa +#define SERROR_AARCH64 0xb +#define SYNC_EXCEPTION_AARCH32 0xc +#define IRQ_AARCH32 0xd +#define FIQ_AARCH32 0xe +#define SERROR_AARCH32 0xf + +/******************************************************************************* + * Structure definition, typedefs & constants for the runtime service framework + ******************************************************************************/ + +/* + * Constants to allow the assembler access a runtime service + * descriptor + */ +#define RT_SVC_SIZE_LOG2 5 +#define SIZEOF_RT_SVC_DESC (1 << RT_SVC_SIZE_LOG2) +#define RT_SVC_DESC_INIT 16 +#define RT_SVC_DESC_HANDLE 24 + +/* + * The function identifier has 6 bits for the owning entity number and + * single bit for the type of smc call. When taken together these + * values limit the maximum number of runtime services to 128. + */ +#define MAX_RT_SVCS 128 + +#ifndef __ASSEMBLY__ + +#include <cassert.h> +#include <context.h> +#include <stdint.h> + +/* Various flags passed to SMC handlers */ +#define SMC_FROM_SECURE (0 << 0) +#define SMC_FROM_NON_SECURE (1 << 0) + +#define is_caller_non_secure(_f) (!!(_f & SMC_FROM_NON_SECURE)) +#define is_caller_secure(_f) (!(is_caller_non_secure(_f))) + +/* Prototype for runtime service initializing function */ +typedef int32_t (*rt_svc_init_t)(void); + +/* Convenience macros to return from SMC handler */ +#define SMC_RET0(_h) { \ + return (uint64_t) (_h); \ +} +#define SMC_RET1(_h, _x0) { \ + write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X0, (_x0)); \ + SMC_RET0(_h); \ +} +#define SMC_RET2(_h, _x0, _x1) { \ + write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X1, (_x1)); \ + SMC_RET1(_h, (_x0)); \ +} +#define SMC_RET3(_h, _x0, _x1, _x2) { \ + write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X2, (_x2)); \ + SMC_RET2(_h, (_x0), (_x1)); \ +} +#define SMC_RET4(_h, _x0, _x1, _x2, _x3) { \ + write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X3, (_x3)); \ + SMC_RET3(_h, (_x0), (_x1), (_x2)); \ +} + + +/* + * Convenience macros to access general purpose registers using handle provided + * to SMC handler. These takes the offset values defined in context.h + */ +#define SMC_GET_GP(_h, _g) \ + read_ctx_reg(get_gpregs_ctx(_h), (_g)); +#define SMC_SET_GP(_h, _g, _v) \ + write_ctx_reg(get_gpregs_ctx(_h), (_g), (_v)); + +/* + * Convenience macros to access EL3 context registers using handle provided to + * SMC handler. These takes the offset values defined in context.h + */ +#define SMC_GET_EL3(_h, _e) \ + read_ctx_reg(get_el3state_ctx(_h), (_e)); +#define SMC_SET_EL3(_h, _e, _v) \ + write_ctx_reg(get_el3state_ctx(_h), (_e), (_v)); + +/* + * Prototype for runtime service SMC handler function. x0 (SMC Function ID) to + * x4 are as passed by the caller. Rest of the arguments to SMC and the context + * can be accessed using the handle pointer. The cookie parameter is reserved + * for future use + */ +typedef uint64_t (*rt_svc_handle_t)(uint32_t smc_fid, + uint64_t x1, + uint64_t x2, + uint64_t x3, + uint64_t x4, + void *cookie, + void *handle, + uint64_t flags); +typedef struct rt_svc_desc { + uint8_t start_oen; + uint8_t end_oen; + uint8_t call_type; + const char *name; + rt_svc_init_t init; + rt_svc_handle_t handle; +} rt_svc_desc_t; + +/* + * Convenience macro to declare a service descriptor + */ +#define DECLARE_RT_SVC(_name, _start, _end, _type, _setup, _smch) \ + static const rt_svc_desc_t __svc_desc_ ## _name \ + __attribute__ ((section("rt_svc_descs"), used)) = { \ + _start, \ + _end, \ + _type, \ + #_name, \ + _setup, \ + _smch } + +/* + * Compile time assertions related to the 'rt_svc_desc' structure to: + * 1. ensure that the assembler and the compiler view of the size + * of the structure are the same. + * 2. ensure that the assembler and the compiler see the initialisation + * routine at the same offset. + * 3. ensure that the assembler and the compiler see the handler + * routine at the same offset. + */ +CASSERT((sizeof(rt_svc_desc_t) == SIZEOF_RT_SVC_DESC), \ + assert_sizeof_rt_svc_desc_mismatch); +CASSERT(RT_SVC_DESC_INIT == __builtin_offsetof(rt_svc_desc_t, init), \ + assert_rt_svc_desc_init_offset_mismatch); +CASSERT(RT_SVC_DESC_HANDLE == __builtin_offsetof(rt_svc_desc_t, handle), \ + assert_rt_svc_desc_handle_offset_mismatch); + + +/* + * This macro combines the call type and the owning entity number corresponding + * to a runtime service to generate a unique owning entity number. This unique + * oen is used to access an entry in the 'rt_svc_descs_indices' array. The entry + * contains the index of the service descriptor in the 'rt_svc_descs' array. + */ +#define get_unique_oen(oen, call_type) ((oen & FUNCID_OEN_MASK) | \ + ((call_type & FUNCID_TYPE_MASK) \ + << FUNCID_OEN_WIDTH)) + + +/* + * Macro to define UUID for services. Apart from defining and initializing a + * uuid_t structure, this macro verifies that the first word of the defined UUID + * does not equal SMC_UNK. This is to ensure that the caller won't mistake the + * returned UUID in x0 for an invalid SMC error return + */ +#define DEFINE_SVC_UUID(_name, _tl, _tm, _th, _cl, _ch, \ + _n0, _n1, _n2, _n3, _n4, _n5) \ + CASSERT(_tl != SMC_UNK, invalid_svc_uuid);\ + static const uuid_t _name = { \ + _tl, _tm, _th, _cl, _ch, \ + { _n0, _n1, _n2, _n3, _n4, _n5 } \ + } + +/* Return a UUID in the SMC return registers */ +#define SMC_UUID_RET(_h, _uuid) \ + SMC_RET4(handle, ((const uint32_t *) &(_uuid))[0], \ + ((const uint32_t *) &(_uuid))[1], \ + ((const uint32_t *) &(_uuid))[2], \ + ((const uint32_t *) &(_uuid))[3]) + +/******************************************************************************* + * Function & variable prototypes + ******************************************************************************/ +void runtime_svc_init(); +extern uint64_t __RT_SVC_DESCS_START__; +extern uint64_t __RT_SVC_DESCS_END__; +uint64_t get_crash_stack(uint64_t mpidr); +void runtime_exceptions(void); +#endif /*__ASSEMBLY__*/ +#endif /* __RUNTIME_SVC_H__ */ diff --git a/include/bl31/services/psci.h b/include/bl31/services/psci.h new file mode 100644 index 0000000..887c4ce --- /dev/null +++ b/include/bl31/services/psci.h @@ -0,0 +1,216 @@ +/* + * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __PSCI_H__ +#define __PSCI_H__ + + +/******************************************************************************* + * Defines for runtime services func ids + ******************************************************************************/ +#define PSCI_VERSION 0x84000000 +#define PSCI_CPU_SUSPEND_AARCH32 0x84000001 +#define PSCI_CPU_SUSPEND_AARCH64 0xc4000001 +#define PSCI_CPU_OFF 0x84000002 +#define PSCI_CPU_ON_AARCH32 0x84000003 +#define PSCI_CPU_ON_AARCH64 0xc4000003 +#define PSCI_AFFINITY_INFO_AARCH32 0x84000004 +#define PSCI_AFFINITY_INFO_AARCH64 0xc4000004 +#define PSCI_MIG_AARCH32 0x84000005 +#define PSCI_MIG_AARCH64 0xc4000005 +#define PSCI_MIG_INFO_TYPE 0x84000006 +#define PSCI_MIG_INFO_UP_CPU_AARCH32 0x84000007 +#define PSCI_MIG_INFO_UP_CPU_AARCH64 0xc4000007 +#define PSCI_SYSTEM_OFF 0x84000008 +#define PSCI_SYSTEM_RESET 0x84000009 + +/* + * Number of PSCI calls (above) implemented. System off and reset aren't + * implemented as yet + */ +#define PSCI_NUM_CALLS 13 + +/******************************************************************************* + * PSCI Migrate and friends + ******************************************************************************/ +#define PSCI_TOS_UP_MIG_CAP 0 +#define PSCI_TOS_NOT_UP_MIG_CAP 1 +#define PSCI_TOS_NOT_PRESENT_MP 2 + +/******************************************************************************* + * PSCI CPU_SUSPEND 'power_state' parameter specific defines + ******************************************************************************/ +#define PSTATE_ID_SHIFT 0 +#define PSTATE_TYPE_SHIFT 16 +#define PSTATE_AFF_LVL_SHIFT 24 + +#define PSTATE_ID_MASK 0xffff +#define PSTATE_TYPE_MASK 0x1 +#define PSTATE_AFF_LVL_MASK 0x3 +#define PSTATE_VALID_MASK 0xFCFE0000 + +#define PSTATE_TYPE_STANDBY 0x0 +#define PSTATE_TYPE_POWERDOWN 0x1 + +#define psci_get_pstate_id(pstate) (pstate >> PSTATE_ID_SHIFT) & \ + PSTATE_ID_MASK +#define psci_get_pstate_type(pstate) (pstate >> PSTATE_TYPE_SHIFT) & \ + PSTATE_TYPE_MASK +#define psci_get_pstate_afflvl(pstate) (pstate >> PSTATE_AFF_LVL_SHIFT) & \ + PSTATE_AFF_LVL_MASK + +/******************************************************************************* + * PSCI version + ******************************************************************************/ +#define PSCI_MAJOR_VER (0 << 16) +#define PSCI_MINOR_VER 0x2 + +/******************************************************************************* + * PSCI error codes + ******************************************************************************/ +#define PSCI_E_SUCCESS 0 +#define PSCI_E_NOT_SUPPORTED -1 +#define PSCI_E_INVALID_PARAMS -2 +#define PSCI_E_DENIED -3 +#define PSCI_E_ALREADY_ON -4 +#define PSCI_E_ON_PENDING -5 +#define PSCI_E_INTERN_FAIL -6 +#define PSCI_E_NOT_PRESENT -7 +#define PSCI_E_DISABLED -8 + +/******************************************************************************* + * PSCI affinity state related constants. An affinity instance could be present + * or absent physically to cater for asymmetric topologies. If present then it + * could in one of the 4 further defined states. + ******************************************************************************/ +#define PSCI_STATE_SHIFT 1 +#define PSCI_STATE_MASK 0xff + +#define PSCI_AFF_ABSENT 0x0 +#define PSCI_AFF_PRESENT 0x1 +#define PSCI_STATE_ON 0x0 +#define PSCI_STATE_OFF 0x1 +#define PSCI_STATE_ON_PENDING 0x2 +#define PSCI_STATE_SUSPEND 0x3 + +#define PSCI_INVALID_DATA -1 + +#define get_phys_state(x) (x != PSCI_STATE_ON ? \ + PSCI_STATE_OFF : PSCI_STATE_ON) + +#define psci_validate_power_state(pstate) (pstate & PSTATE_VALID_MASK) + + +/* Number of affinity instances whose state this psci imp. can track */ +#define PSCI_NUM_AFFS 32ull + +#ifndef __ASSEMBLY__ + +#include <stdint.h> + + +/******************************************************************************* + * Structure populated by platform specific code to export routines which + * perform common low level pm functions + ******************************************************************************/ +typedef struct plat_pm_ops { + int (*affinst_standby)(unsigned int); + int (*affinst_on)(unsigned long, + unsigned long, + unsigned long, + unsigned int, + unsigned int); + int (*affinst_off)(unsigned long, unsigned int, unsigned int); + int (*affinst_suspend)(unsigned long, + unsigned long, + unsigned long, + unsigned int, + unsigned int); + int (*affinst_on_finish)(unsigned long, unsigned int, unsigned int); + int (*affinst_suspend_finish)(unsigned long, + unsigned int, + unsigned int); +} plat_pm_ops_t; + +/******************************************************************************* + * Optional structure populated by the Secure Payload Dispatcher to be given a + * chance to perform any bookkeeping before PSCI executes a power mgmt. + * operation. It also allows PSCI to determine certain properties of the SP e.g. + * migrate capability etc. + ******************************************************************************/ +typedef struct spd_pm_ops { + void (*svc_on)(uint64_t target_cpu); + int32_t (*svc_off)(uint64_t __unused); + void (*svc_suspend)(uint64_t power_state); + void (*svc_on_finish)(uint64_t __unused); + void (*svc_suspend_finish)(uint64_t suspend_level); + void (*svc_migrate)(uint64_t __unused1, uint64_t __unused2); + int32_t (*svc_migrate_info)(uint64_t *__unused); +} spd_pm_ops_t; + +/******************************************************************************* + * Function & Data prototypes + ******************************************************************************/ +unsigned int psci_version(void); +int __psci_cpu_suspend(unsigned int, unsigned long, unsigned long); +int __psci_cpu_off(void); +int psci_affinity_info(unsigned long, unsigned int); +int psci_migrate(unsigned int); +unsigned int psci_migrate_info_type(void); +unsigned long psci_migrate_info_up_cpu(void); +void psci_system_off(void); +void psci_system_reset(void); +int psci_cpu_on(unsigned long, + unsigned long, + unsigned long); +void __dead2 psci_power_down_wfi(void); +void psci_aff_on_finish_entry(void); +void psci_aff_suspend_finish_entry(void); +void psci_register_spd_pm_hook(const spd_pm_ops_t *); +int psci_get_suspend_stateid(unsigned long mpidr); +int psci_get_suspend_afflvl(unsigned long mpidr); + +uint64_t psci_smc_handler(uint32_t smc_fid, + uint64_t x1, + uint64_t x2, + uint64_t x3, + uint64_t x4, + void *cookie, + void *handle, + uint64_t flags); + +/* PSCI setup function */ +int32_t psci_setup(void); + + +#endif /*__ASSEMBLY__*/ + + +#endif /* __PSCI_H__ */ diff --git a/include/bl31/services/std_svc.h b/include/bl31/services/std_svc.h new file mode 100644 index 0000000..cbd5b62 --- /dev/null +++ b/include/bl31/services/std_svc.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __STD_SVC_H__ +#define __STD_SVC_H__ + +/* SMC function IDs for Standard Service queries */ + +#define ARM_STD_SVC_CALL_COUNT 0x8400ff00 +#define ARM_STD_SVC_UID 0x8400ff01 +/* 0x8400ff02 is reserved */ +#define ARM_STD_SVC_VERSION 0x8400ff03 + +/* ARM Standard Service Calls version numbers */ +#define STD_SVC_VERSION_MAJOR 0x0 +#define STD_SVC_VERSION_MINOR 0x1 + +/* The macros below are used to identify PSCI calls from the SMC function ID */ +#define PSCI_FID_MASK 0xffe0u +#define PSCI_FID_VALUE 0u +#define is_psci_fid(_fid) \ + (((_fid) & PSCI_FID_MASK) == PSCI_FID_VALUE) + +#endif /* __STD_SVC_H__ */ |