(empty log message)
@@ -77,9 +77,9 @@ | ||
77 | 77 | */ |
78 | 78 | uint32_t hdr_exit; |
79 | 79 | /** |
80 | - * @brief VFQ vector. | |
80 | + * @brief VRQ vector. | |
81 | 81 | */ |
82 | - uint32_t hdr_vfq; | |
82 | + uint32_t hdr_vrq; | |
83 | 83 | /** |
84 | 84 | * @brief Used-defined parameters, defaulted to zero. |
85 | 85 | */ |
@@ -48,6 +48,15 @@ | ||
48 | 48 | #define SB_SYSC_EVENT_WAIT_ANY 9 |
49 | 49 | #define SB_SYSC_EVENT_WAIT_ALL 10 |
50 | 50 | #define SB_SYSC_EVENT_BROADCAST 11 |
51 | +#define SB_SYSC_LOADELF 12 | |
52 | +#define SB_SYSC_VRQ_SETWT 248 | |
53 | +#define SB_SYSC_VRQ_CLRWT 249 | |
54 | +#define SB_SYSC_VRQ_SETEN 250 | |
55 | +#define SB_SYSC_VRQ_CLREN 251 | |
56 | +#define SB_SYSC_VRQ_DISABLE 252 | |
57 | +#define SB_SYSC_VRQ_ENABLE 253 | |
58 | +#define SB_SYSC_VRQ_GETISR 254 | |
59 | +#define SB_SYSC_VRQ_RETURN 255 | |
51 | 60 | /** @} */ |
52 | 61 | |
53 | 62 | /** |
@@ -72,14 +72,18 @@ | ||
72 | 72 | /** @} */ |
73 | 73 | |
74 | 74 | /** |
75 | - * @name Standard API handlers | |
75 | + * @name VRQ pseudo-instructions handlers | |
76 | 76 | * @{ |
77 | 77 | */ |
78 | 78 | #if (SB_CFG_ENABLE_VRQ == TRUE) || defined(__DOXYGEN__) |
79 | -#define SB_SVC252_HANDLER sb_vrq_disable | |
80 | -#define SB_SVC253_HANDLER sb_vrq_enable | |
81 | -#define SB_SVC254_HANDLER sb_vrq_getisr | |
82 | -#define SB_SVC255_HANDLER sb_vrq_return | |
79 | +#define SB_SVC248_HANDLER sb_api_vrq_setwt | |
80 | +#define SB_SVC249_HANDLER sb_api_vrq_clrwt | |
81 | +#define SB_SVC250_HANDLER sb_api_vrq_seten | |
82 | +#define SB_SVC251_HANDLER sb_api_vrq_clren | |
83 | +#define SB_SVC252_HANDLER sb_api_vrq_disable | |
84 | +#define SB_SVC253_HANDLER sb_api_vrq_enable | |
85 | +#define SB_SVC254_HANDLER sb_api_vrq_getisr | |
86 | +#define SB_SVC255_HANDLER sb_api_vrq_return | |
83 | 87 | #endif |
84 | 88 | /** @} */ |
85 | 89 |
@@ -25,8 +25,6 @@ | ||
25 | 25 | * @{ |
26 | 26 | */ |
27 | 27 | |
28 | -#include <string.h> | |
29 | - | |
30 | 28 | #include "sb.h" |
31 | 29 | |
32 | 30 | #if (SB_CFG_ENABLE_VRQ == TRUE) || defined(__DOXYGEN__) |
@@ -51,7 +49,7 @@ | ||
51 | 49 | /* Module local functions. */ |
52 | 50 | /*===========================================================================*/ |
53 | 51 | |
54 | -__STATIC_FORCEINLINE void vfq_makectx(sb_class_t *sbp, | |
52 | +__STATIC_FORCEINLINE void vrq_makectx(sb_class_t *sbp, | |
55 | 53 | struct port_extctx *newctxp, |
56 | 54 | uint32_t active_mask) { |
57 | 55 | uint32_t irqn = __CLZ(active_mask); |
@@ -59,7 +57,7 @@ | ||
59 | 57 | |
60 | 58 | /* Building the return context.*/ |
61 | 59 | newctxp->r0 = irqn; |
62 | - newctxp->pc = sbp->sbhp->hdr_vfq; /* TODO validate or let it eventually crash? */ | |
60 | + newctxp->pc = sbp->sbhp->hdr_vrq; | |
63 | 61 | newctxp->xpsr = 0x01000000U; |
64 | 62 | #if CORTEX_USE_FPU == TRUE |
65 | 63 | newctxp->fpscr = FPU->FPDSCR; |
@@ -66,10 +64,34 @@ | ||
66 | 64 | #endif |
67 | 65 | } |
68 | 66 | |
67 | +static void vrq_check_trigger(sb_class_t *sbp, struct port_extctx *ectxp) { | |
68 | + | |
69 | + /* Triggering the VRQ if required.*/ | |
70 | + if ((sbp->vrq_isr & SB_VRQ_ISR_DISABLED) == 0U) { | |
71 | + sb_vrqmask_t active_mask = sbp->vrq_wtmask & sbp->vrq_enmask; | |
72 | + | |
73 | + if (active_mask != 0U) { | |
74 | + /* Creating a context for return.*/ | |
75 | + ectxp--; | |
76 | + | |
77 | + /* Checking if the new frame is within the sandbox else failure.*/ | |
78 | + if (!sb_is_valid_write_range(sbp, | |
79 | + (void *)ectxp, | |
80 | + sizeof (struct port_extctx))) { | |
81 | + __sb_abort(CH_RET_EFAULT); | |
82 | + } | |
83 | + | |
84 | + /* Building the return context.*/ | |
85 | + vrq_makectx(sbp, ectxp, active_mask); | |
86 | + __port_syscall_set_u_psp(sbp->tp, ectxp); | |
87 | + } | |
88 | + } | |
89 | +} | |
90 | + | |
69 | 91 | /** |
70 | 92 | * @brief Used as a known privileged address. |
71 | 93 | */ |
72 | -static void vfq_privileged_code(void) { | |
94 | +static void vrq_privileged_code(void) { | |
73 | 95 | |
74 | 96 | while (true) { |
75 | 97 | } |
@@ -81,6 +103,48 @@ | ||
81 | 103 | |
82 | 104 | /** |
83 | 105 | * @brief Triggers VRQs on the specified sandbox. |
106 | + * | |
107 | + * @param[in] sbp pointer to a @p sb_class_t structure | |
108 | + * @param[in] vmask mask of VRQs to be activated | |
109 | + * | |
110 | + * @sclass | |
111 | + */ | |
112 | +void sbVRQTriggerS(sb_class_t *sbp, sb_vrqmask_t vmask) { | |
113 | + | |
114 | + chDbgCheckClassS(); | |
115 | + | |
116 | + /* Adding VRQ mask to the pending mask.*/ | |
117 | + sbp->vrq_wtmask |= vmask; | |
118 | + | |
119 | + /* Triggering the VRQ if required.*/ | |
120 | + if ((sbp->vrq_isr & SB_VRQ_ISR_DISABLED) == 0U) { | |
121 | + sb_vrqmask_t active_mask = sbp->vrq_wtmask & sbp->vrq_enmask; | |
122 | + | |
123 | + if (active_mask != 0U) { | |
124 | + struct port_extctx *ectxp, *newctxp; | |
125 | + /* Getting the pointer from the context switch structure.*/ | |
126 | + ectxp = sbp->tp->ctx.sp; | |
127 | + newctxp = ectxp - 1; | |
128 | + | |
129 | + /* Checking if the new frame is within the sandbox else failure.*/ | |
130 | + if (!sb_is_valid_write_range(sbp, | |
131 | + (void *)newctxp, | |
132 | + sizeof (struct port_extctx))) { | |
133 | + /* Making the sandbox return on a privileged address, this | |
134 | + will cause a fault and sandbox termination.*/ | |
135 | + ectxp->pc = (uint32_t)vrq_privileged_code; | |
136 | + return; | |
137 | + } | |
138 | + | |
139 | + /* Building the return context.*/ | |
140 | + vrq_makectx(sbp, newctxp, active_mask); | |
141 | + __port_syscall_set_u_psp(sbp->tp, newctxp); | |
142 | + } | |
143 | + } | |
144 | +} | |
145 | + | |
146 | +/** | |
147 | + * @brief Triggers VRQs on the specified sandbox. | |
84 | 148 | * @note This function must be called from IRQ context because |
85 | 149 | * it manipulates exception stack frames. |
86 | 150 | * |
@@ -97,7 +161,7 @@ | ||
97 | 161 | sbp->vrq_wtmask |= vmask; |
98 | 162 | |
99 | 163 | /* Triggering the VRQ if required.*/ |
100 | - if (sbp->vrq_isr == 0U) { | |
164 | + if ((sbp->vrq_isr & SB_VRQ_ISR_DISABLED) == 0U) { | |
101 | 165 | sb_vrqmask_t active_mask = sbp->vrq_wtmask & sbp->vrq_enmask; |
102 | 166 | |
103 | 167 | if (active_mask != 0U) { |
@@ -117,7 +181,7 @@ | ||
117 | 181 | /* Making the sandbox return on a privileged address, this |
118 | 182 | will cause a fault and sandbox termination.*/ |
119 | 183 | chSysUnlockFromISR(); |
120 | - ectxp->pc = (uint32_t)vfq_privileged_code; | |
184 | + ectxp->pc = (uint32_t)vrq_privileged_code; | |
121 | 185 | return; |
122 | 186 | } |
123 | 187 | } |
@@ -134,17 +198,13 @@ | ||
134 | 198 | /* Making the sandbox return on a privileged address, this |
135 | 199 | will cause a fault and sandbox termination.*/ |
136 | 200 | chSysUnlockFromISR(); |
137 | - ectxp->pc = (uint32_t)vfq_privileged_code; | |
201 | + ectxp->pc = (uint32_t)vrq_privileged_code; | |
138 | 202 | return; |
139 | 203 | } |
140 | - | |
141 | - /* Preventing leakage of information, clearing all register values, those | |
142 | - would come from outside the sandbox.*/ | |
143 | - memset((void *)newctxp, 0, sizeof (struct port_extctx)); | |
144 | 204 | } |
145 | 205 | |
146 | 206 | /* Building the return context.*/ |
147 | - vfq_makectx(sbp, newctxp, active_mask); | |
207 | + vrq_makectx(sbp, newctxp, active_mask); | |
148 | 208 | __port_syscall_set_u_psp(sbp->tp, newctxp); |
149 | 209 | } |
150 | 210 | } |
@@ -154,66 +214,78 @@ | ||
154 | 214 | return; |
155 | 215 | } |
156 | 216 | |
157 | -void sb_vrq_disable(struct port_extctx *ectxp) { | |
217 | +void sb_api_vrq_setwt(struct port_extctx *ectxp) { | |
158 | 218 | sb_class_t *sbp = (sb_class_t *)chThdGetSelfX()->ctx.syscall.p; |
159 | 219 | |
220 | + ectxp->r0 = sbp->vrq_wtmask; | |
221 | + sbp->vrq_wtmask |= ectxp->r0; | |
222 | + | |
223 | + vrq_check_trigger(sbp, ectxp); | |
224 | +} | |
225 | + | |
226 | +void sb_api_vrq_clrwt(struct port_extctx *ectxp) { | |
227 | + sb_class_t *sbp = (sb_class_t *)chThdGetSelfX()->ctx.syscall.p; | |
228 | + | |
229 | + ectxp->r0 = sbp->vrq_wtmask; | |
230 | + sbp->vrq_wtmask &= ~ectxp->r0; | |
231 | +} | |
232 | + | |
233 | +void sb_api_vrq_seten(struct port_extctx *ectxp) { | |
234 | + sb_class_t *sbp = (sb_class_t *)chThdGetSelfX()->ctx.syscall.p; | |
235 | + | |
236 | + ectxp->r0 = sbp->vrq_enmask; | |
237 | + sbp->vrq_enmask |= ectxp->r0; | |
238 | + | |
239 | + vrq_check_trigger(sbp, ectxp); | |
240 | +} | |
241 | + | |
242 | +void sb_api_vrq_clren(struct port_extctx *ectxp) { | |
243 | + sb_class_t *sbp = (sb_class_t *)chThdGetSelfX()->ctx.syscall.p; | |
244 | + | |
245 | + ectxp->r0 = sbp->vrq_enmask; | |
246 | + sbp->vrq_enmask &= ~ectxp->r0; | |
247 | +} | |
248 | + | |
249 | +void sb_api_vrq_disable(struct port_extctx *ectxp) { | |
250 | + sb_class_t *sbp = (sb_class_t *)chThdGetSelfX()->ctx.syscall.p; | |
251 | + | |
160 | 252 | ectxp->r0 = sbp->vrq_isr; |
161 | 253 | sbp->vrq_isr |= SB_VRQ_ISR_DISABLED; |
162 | 254 | } |
163 | 255 | |
164 | -void sb_vrq_enable(struct port_extctx *ectxp) { | |
256 | +void sb_api_vrq_enable(struct port_extctx *ectxp) { | |
165 | 257 | sb_class_t *sbp = (sb_class_t *)chThdGetSelfX()->ctx.syscall.p; |
166 | 258 | |
167 | 259 | ectxp->r0 = sbp->vrq_isr; |
168 | 260 | sbp->vrq_isr &= ~SB_VRQ_ISR_DISABLED; |
169 | 261 | |
170 | - /* Re-triggering the VRQ if required.*/ | |
171 | - if (sbp->vrq_isr == 0U) { | |
172 | - sb_vrqmask_t active_mask = sbp->vrq_wtmask & sbp->vrq_enmask; | |
173 | - | |
174 | - if (active_mask != 0U) { | |
175 | - /* Creating a context for return.*/ | |
176 | - ectxp--; | |
177 | - | |
178 | - /* Checking if the new frame is within the sandbox else failure.*/ | |
179 | - if (!sb_is_valid_write_range(sbp, | |
180 | - (void *)ectxp, | |
181 | - sizeof (struct port_extctx))) { | |
182 | - __sb_abort(CH_RET_EFAULT); | |
183 | - } | |
184 | - | |
185 | - /* Building the return context.*/ | |
186 | - vfq_makectx(sbp, ectxp, active_mask); | |
187 | - __port_syscall_set_u_psp(sbp->tp, ectxp); | |
188 | - } | |
189 | - } | |
262 | + vrq_check_trigger(sbp, ectxp); | |
190 | 263 | } |
191 | 264 | |
192 | -void sb_vrq_getisr(struct port_extctx *ectxp) { | |
265 | +void sb_api_vrq_getisr(struct port_extctx *ectxp) { | |
193 | 266 | sb_class_t *sbp = (sb_class_t *)chThdGetSelfX()->ctx.syscall.p; |
194 | 267 | |
195 | 268 | ectxp->r0 = sbp->vrq_isr; |
196 | 269 | } |
197 | 270 | |
198 | -void sb_vrq_return(struct port_extctx *ectxp) { | |
271 | +void sb_api_vrq_return(struct port_extctx *ectxp) { | |
199 | 272 | sb_class_t *sbp = (sb_class_t *)chThdGetSelfX()->ctx.syscall.p; |
273 | + sb_vrqmask_t active_mask; | |
200 | 274 | |
201 | - if (((sbp->vrq_isr & SB_VRQ_ISR_IRQMODE) == 0U)) { | |
275 | + /* VRQs must be disabled on return, sanity check.*/ | |
276 | + if (((sbp->vrq_isr & SB_VRQ_ISR_DISABLED) == 0U)) { | |
202 | 277 | __sb_abort(CH_RET_EFAULT); |
203 | 278 | } |
204 | 279 | |
205 | 280 | /* Re-triggering the VRQ if required.*/ |
206 | - if (sbp->vrq_isr == 0U) { | |
207 | - sb_vrqmask_t active_mask = sbp->vrq_wtmask & sbp->vrq_enmask; | |
208 | - | |
209 | - if (active_mask != 0U) { | |
210 | - /* Building the return context, reusing the current context structure.*/ | |
211 | - vfq_makectx(sbp, ectxp, active_mask); | |
212 | - } | |
281 | + active_mask = sbp->vrq_wtmask & sbp->vrq_enmask; | |
282 | + if (active_mask != 0U) { | |
283 | + /* Building the return context, reusing the current context structure.*/ | |
284 | + vrq_makectx(sbp, ectxp, active_mask); | |
213 | 285 | } |
214 | 286 | else { |
215 | - /* Ending IRQ mode.*/ | |
216 | - sbp->vrq_isr &= ~SB_VRQ_ISR_IRQMODE; | |
287 | + /* Returning from VRQ.*/ | |
288 | + sbp->vrq_isr &= ~SB_VRQ_ISR_DISABLED; | |
217 | 289 | |
218 | 290 | /* Discarding the return current context, returning on the previous one.*/ |
219 | 291 | ectxp++; |
@@ -39,7 +39,6 @@ | ||
39 | 39 | * @{ |
40 | 40 | */ |
41 | 41 | #define SB_VRQ_ISR_DISABLED 1U |
42 | -#define SB_VRQ_ISR_IRQMODE 2U | |
43 | 42 | /** @} */ |
44 | 43 | |
45 | 44 | /*===========================================================================*/ |
@@ -65,11 +64,16 @@ | ||
65 | 64 | #ifdef __cplusplus |
66 | 65 | extern "C" { |
67 | 66 | #endif |
67 | + void sbVRQTriggerS(sb_class_t *sbp, sb_vrqmask_t vmask); | |
68 | 68 | void sbVRQTriggerFromISR(sb_class_t *sbp, sb_vrqmask_t vmask); |
69 | - void sb_vrq_disable(struct port_extctx *ectxp); | |
70 | - void sb_vrq_enable(struct port_extctx *ectxp); | |
71 | - void sb_vrq_getisr(struct port_extctx *ectxp); | |
72 | - void sb_vrq_return(struct port_extctx *ectxp); | |
69 | + void sb_api_vrq_setwt(struct port_extctx *ectxp); | |
70 | + void sb_api_vrq_clrwt(struct port_extctx *ectxp); | |
71 | + void sb_api_vrq_seten(struct port_extctx *ectxp); | |
72 | + void sb_api_vrq_clren(struct port_extctx *ectxp); | |
73 | + void sb_api_vrq_disable(struct port_extctx *ectxp); | |
74 | + void sb_api_vrq_enable(struct port_extctx *ectxp); | |
75 | + void sb_api_vrq_getisr(struct port_extctx *ectxp); | |
76 | + void sb_api_vrq_return(struct port_extctx *ectxp); | |
73 | 77 | #ifdef __cplusplus |
74 | 78 | } |
75 | 79 | #endif |
@@ -836,6 +836,87 @@ | ||
836 | 836 | sbSleep(sbTimeUS2I(usecs)); |
837 | 837 | } |
838 | 838 | |
839 | +/** | |
840 | + * @brief VRQ @p setwt pseudo-instruction. | |
841 | + * | |
842 | + * @api | |
843 | + */ | |
844 | +static inline void __sb_vrq_setwt(void) { | |
845 | + | |
846 | + __syscall0(248); | |
847 | +} | |
848 | + | |
849 | +/** | |
850 | + * @brief VRQ @p clrwt pseudo-instruction. | |
851 | + * | |
852 | + * @api | |
853 | + */ | |
854 | +static inline void __sb_vrq_clrwt(void) { | |
855 | + | |
856 | + __syscall0(249); | |
857 | +} | |
858 | + | |
859 | +/** | |
860 | + * @brief VRQ @p seten pseudo-instruction. | |
861 | + * | |
862 | + * @api | |
863 | + */ | |
864 | +static inline void __sb_vrq_seten(void) { | |
865 | + | |
866 | + __syscall0(250); | |
867 | +} | |
868 | + | |
869 | +/** | |
870 | + * @brief VRQ @p clren pseudo-instruction. | |
871 | + * | |
872 | + * @api | |
873 | + */ | |
874 | +static inline void __sb_vrq_clren(void) { | |
875 | + | |
876 | + __syscall0(251); | |
877 | +} | |
878 | + | |
879 | +/** | |
880 | + * @brief VRQ @p disable pseudo-instruction. | |
881 | + * | |
882 | + * @api | |
883 | + */ | |
884 | +static inline void __sb_vrq_disable(void) { | |
885 | + | |
886 | + __syscall0(252); | |
887 | +} | |
888 | + | |
889 | +/** | |
890 | + * @brief VRQ @p enable pseudo-instruction. | |
891 | + * | |
892 | + * @api | |
893 | + */ | |
894 | +static inline void __sb_vrq_enable(void) { | |
895 | + | |
896 | + __syscall0(253); | |
897 | +} | |
898 | + | |
899 | +/** | |
900 | + * @brief VRQ @p getisr pseudo-instruction. | |
901 | + * | |
902 | + * @api | |
903 | + */ | |
904 | +static inline uint32_t __sb_vrq_getisr(void) { | |
905 | + | |
906 | + __syscall0r(254); | |
907 | + return r0; | |
908 | +} | |
909 | + | |
910 | +/** | |
911 | + * @brief VRQ return pseudo-instruction. | |
912 | + * | |
913 | + * @api | |
914 | + */ | |
915 | +static inline void __sb_vrq_return(void) { | |
916 | + | |
917 | + __syscall0(255); | |
918 | +} | |
919 | + | |
839 | 920 | #endif /* SBUSER_H */ |
840 | 921 | |
841 | 922 | /** @} */ |