Zephyr API Documentation  3.6.99
A Scalable Open Source RTOS
Loading...
Searching...
No Matches
arch.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2016 Jean-Paul Etienne <[email protected]>
3 * Contributors: 2018 Antmicro <www.antmicro.com>
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
15#ifndef ZEPHYR_INCLUDE_ARCH_RISCV_ARCH_H_
16#define ZEPHYR_INCLUDE_ARCH_RISCV_ARCH_H_
17
24#if defined(CONFIG_USERSPACE)
26#endif /* CONFIG_USERSPACE */
27#include <zephyr/irq.h>
28#include <zephyr/sw_isr_table.h>
29#include <zephyr/devicetree.h>
32
33/* stacks, for RISCV architecture stack should be 16byte-aligned */
34#define ARCH_STACK_PTR_ALIGN 16
35
36#define Z_RISCV_STACK_PMP_ALIGN \
37 MAX(CONFIG_PMP_GRANULARITY, ARCH_STACK_PTR_ALIGN)
38
39#ifdef CONFIG_PMP_STACK_GUARD
40/*
41 * The StackGuard is an area at the bottom of the kernel-mode stack made to
42 * fault when accessed. It is _not_ faulting when in exception mode as we rely
43 * on that area to save the exception stack frame and to process said fault.
44 * Therefore the guard area must be large enough to hold the esf, plus some
45 * configurable stack wiggle room to execute the fault handling code off of,
46 * as well as some guard size to cover possible sudden stack pointer
47 * displacement before the fault.
48 */
49#ifdef CONFIG_PMP_POWER_OF_TWO_ALIGNMENT
50#define Z_RISCV_STACK_GUARD_SIZE \
51 Z_POW2_CEIL(MAX(sizeof(z_arch_esf_t) + CONFIG_PMP_STACK_GUARD_MIN_SIZE, \
52 Z_RISCV_STACK_PMP_ALIGN))
53#define ARCH_KERNEL_STACK_OBJ_ALIGN Z_RISCV_STACK_GUARD_SIZE
54#else
55#define Z_RISCV_STACK_GUARD_SIZE \
56 ROUND_UP(sizeof(z_arch_esf_t) + CONFIG_PMP_STACK_GUARD_MIN_SIZE, \
57 Z_RISCV_STACK_PMP_ALIGN)
58#define ARCH_KERNEL_STACK_OBJ_ALIGN Z_RISCV_STACK_PMP_ALIGN
59#endif
60
61/* Kernel-only stacks have the following layout if a stack guard is enabled:
62 *
63 * +------------+ <- thread.stack_obj
64 * | Guard | } Z_RISCV_STACK_GUARD_SIZE
65 * +------------+ <- thread.stack_info.start
66 * | Kernel |
67 * | stack |
68 * | |
69 * +............|
70 * | TLS | } thread.stack_info.delta
71 * +------------+ <- thread.stack_info.start + thread.stack_info.size
72 */
73#define ARCH_KERNEL_STACK_RESERVED Z_RISCV_STACK_GUARD_SIZE
74
75#else /* !CONFIG_PMP_STACK_GUARD */
76#define Z_RISCV_STACK_GUARD_SIZE 0
77#endif
78
79#ifdef CONFIG_PMP_POWER_OF_TWO_ALIGNMENT
80/* The privilege elevation stack is located in another area of memory
81 * generated at build time by gen_kobject_list.py
82 *
83 * +------------+ <- thread.arch.priv_stack_start
84 * | Guard | } Z_RISCV_STACK_GUARD_SIZE
85 * +------------+
86 * | Priv Stack | } CONFIG_PRIVILEGED_STACK_SIZE
87 * +------------+ <- thread.arch.priv_stack_start +
88 * CONFIG_PRIVILEGED_STACK_SIZE +
89 * Z_RISCV_STACK_GUARD_SIZE
90 *
91 * The main stack will be initially (or potentially only) used by kernel
92 * mode so we need to make room for a possible stack guard area when enabled:
93 *
94 * +------------+ <- thread.stack_obj
95 * | Guard | } Z_RISCV_STACK_GUARD_SIZE
96 * +............| <- thread.stack_info.start
97 * | Thread |
98 * | stack |
99 * | |
100 * +............|
101 * | TLS | } thread.stack_info.delta
102 * +------------+ <- thread.stack_info.start + thread.stack_info.size
103 *
104 * When transitioning to user space, the guard area will be removed from
105 * the main stack. Any thread running in user mode will have full access
106 * to the region denoted by thread.stack_info. Make it PMP-NAPOT compatible.
107 *
108 * +------------+ <- thread.stack_obj = thread.stack_info.start
109 * | Thread |
110 * | stack |
111 * | |
112 * +............|
113 * | TLS | } thread.stack_info.delta
114 * +------------+ <- thread.stack_info.start + thread.stack_info.size
115 */
116#define ARCH_THREAD_STACK_RESERVED Z_RISCV_STACK_GUARD_SIZE
117#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
118 Z_POW2_CEIL(MAX(MAX(size, CONFIG_PRIVILEGED_STACK_SIZE), \
119 Z_RISCV_STACK_PMP_ALIGN))
120#define ARCH_THREAD_STACK_OBJ_ALIGN(size) \
121 ARCH_THREAD_STACK_SIZE_ADJUST(size)
122
123#else /* !CONFIG_PMP_POWER_OF_TWO_ALIGNMENT */
124
125/* The stack object will contain the PMP guard, the privilege stack, and then
126 * the usermode stack buffer in that order:
127 *
128 * +------------+ <- thread.stack_obj
129 * | Guard | } Z_RISCV_STACK_GUARD_SIZE
130 * +------------+
131 * | Priv Stack | } CONFIG_PRIVILEGED_STACK_SIZE
132 * +------------+ <- thread.stack_info.start
133 * | Thread |
134 * | stack |
135 * | |
136 * +............|
137 * | TLS | } thread.stack_info.delta
138 * +------------+ <- thread.stack_info.start + thread.stack_info.size
139 */
140#define ARCH_THREAD_STACK_RESERVED \
141 ROUND_UP(Z_RISCV_STACK_GUARD_SIZE + CONFIG_PRIVILEGED_STACK_SIZE, \
142 Z_RISCV_STACK_PMP_ALIGN)
143#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
144 ROUND_UP(size, Z_RISCV_STACK_PMP_ALIGN)
145#define ARCH_THREAD_STACK_OBJ_ALIGN(size) Z_RISCV_STACK_PMP_ALIGN
146#endif /* CONFIG_PMP_POWER_OF_TWO_ALIGNMENT */
147
148#ifdef CONFIG_64BIT
149#define RV_REGSIZE 8
150#define RV_REGSHIFT 3
151#else
152#define RV_REGSIZE 4
153#define RV_REGSHIFT 2
154#endif
155
156/* Common mstatus bits. All supported cores today have the same
157 * layouts.
158 */
159
160#define MSTATUS_IEN (1UL << 3)
161#define MSTATUS_MPP_M (3UL << 11)
162#define MSTATUS_MPIE_EN (1UL << 7)
163
164#define MSTATUS_FS_OFF (0UL << 13)
165#define MSTATUS_FS_INIT (1UL << 13)
166#define MSTATUS_FS_CLEAN (2UL << 13)
167#define MSTATUS_FS_DIRTY (3UL << 13)
168
169/* This comes from openisa_rv32m1, but doesn't seem to hurt on other
170 * platforms:
171 * - Preserve machine privileges in MPP. If you see any documentation
172 * telling you that MPP is read-only on this SoC, don't believe its
173 * lies.
174 * - Enable interrupts when exiting from exception into a new thread
175 * by setting MPIE now, so it will be copied into IE on mret.
176 */
177#define MSTATUS_DEF_RESTORE (MSTATUS_MPP_M | MSTATUS_MPIE_EN)
178
179#ifndef _ASMLANGUAGE
180#include <zephyr/sys/util.h>
181
182#ifdef __cplusplus
183extern "C" {
184#endif
185
186#ifdef CONFIG_IRQ_VECTOR_TABLE_JUMP_BY_CODE
187#define ARCH_IRQ_VECTOR_JUMP_CODE(v) "j " STRINGIFY(v)
188#endif
189
190/* Kernel macros for memory attribution
191 * (access permissions and cache-ability).
192 *
193 * The macros are to be stored in k_mem_partition_attr_t
194 * objects. The format of a k_mem_partition_attr_t object
195 * is an uint8_t composed by configuration register flags
196 * located in arch/riscv/include/core_pmp.h
197 */
198
199/* Read-Write access permission attributes */
200#define K_MEM_PARTITION_P_RW_U_RW ((k_mem_partition_attr_t) \
201 {PMP_R | PMP_W})
202#define K_MEM_PARTITION_P_RW_U_RO ((k_mem_partition_attr_t) \
203 {PMP_R})
204#define K_MEM_PARTITION_P_RW_U_NA ((k_mem_partition_attr_t) \
205 {0})
206#define K_MEM_PARTITION_P_RO_U_RO ((k_mem_partition_attr_t) \
207 {PMP_R})
208#define K_MEM_PARTITION_P_RO_U_NA ((k_mem_partition_attr_t) \
209 {0})
210#define K_MEM_PARTITION_P_NA_U_NA ((k_mem_partition_attr_t) \
211 {0})
212
213/* Execution-allowed attributes */
214#define K_MEM_PARTITION_P_RWX_U_RWX ((k_mem_partition_attr_t) \
215 {PMP_R | PMP_W | PMP_X})
216#define K_MEM_PARTITION_P_RX_U_RX ((k_mem_partition_attr_t) \
217 {PMP_R | PMP_X})
218
219/* Typedef for the k_mem_partition attribute */
220typedef struct {
223
224struct arch_mem_domain {
225 unsigned int pmp_update_nr;
226};
227
228extern void z_irq_spurious(const void *unused);
229
230/*
231 * use atomic instruction csrrc to lock global irq
232 * csrrc: atomic read and clear bits in CSR register
233 */
234static ALWAYS_INLINE unsigned int arch_irq_lock(void)
235{
236#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
237 return z_soc_irq_lock();
238#else
239 unsigned int key;
240
241 __asm__ volatile ("csrrc %0, mstatus, %1"
242 : "=r" (key)
243 : "rK" (MSTATUS_IEN)
244 : "memory");
245
246 return key;
247#endif
248}
249
250/*
251 * use atomic instruction csrs to unlock global irq
252 * csrs: atomic set bits in CSR register
253 */
254static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
255{
256#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
257 z_soc_irq_unlock(key);
258#else
259 __asm__ volatile ("csrs mstatus, %0"
260 :
261 : "r" (key & MSTATUS_IEN)
262 : "memory");
263#endif
264}
265
266static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
267{
268#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
269 return z_soc_irq_unlocked(key);
270#else
271 return (key & MSTATUS_IEN) != 0;
272#endif
273}
274
275static ALWAYS_INLINE void arch_nop(void)
276{
277 __asm__ volatile("nop");
278}
279
281
282static inline uint32_t arch_k_cycle_get_32(void)
283{
284 return sys_clock_cycle_get_32();
285}
286
288
289static inline uint64_t arch_k_cycle_get_64(void)
290{
291 return sys_clock_cycle_get_64();
292}
293
295
296#ifdef __cplusplus
297}
298#endif
299
300#endif /*_ASMLANGUAGE */
301
302#if defined(CONFIG_RISCV_PRIVILEGED)
304#endif
305
306
307#endif
static ALWAYS_INLINE void arch_nop(void)
Definition: arch.h:348
uint32_t k_mem_partition_attr_t
Definition: arch.h:346
RISC-V public interrupt handling.
RISCV specific syscall header.
Per-arch thread definition.
#define ALWAYS_INLINE
Definition: common.h:129
Devicetree main header.
Public interface for configuring interrupts.
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
Definition: arch.h:63
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
Definition: arch.h:74
uint64_t sys_clock_cycle_get_64(void)
uint32_t sys_clock_cycle_get_32(void)
static uint32_t arch_k_cycle_get_32(void)
Definition: arch.h:99
static uint64_t arch_k_cycle_get_64(void)
Definition: arch.h:106
static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
Definition: arch.h:87
#define MSTATUS_IEN
Definition: arch.h:160
RISCV public error handling.
RISCV public exception handling.
__UINT32_TYPE__ uint32_t
Definition: stdint.h:90
__UINT64_TYPE__ uint64_t
Definition: stdint.h:91
__UINT8_TYPE__ uint8_t
Definition: stdint.h:88
Definition: arch.h:46
unsigned int pmp_update_nr
Definition: arch.h:225
uint8_t pmp_attr
Definition: arch.h:221
Software-managed ISR table.
Misc utilities.