Skip to content

Commit 46aa671

Browse files
Nicolas Pitrekartben
Nicolas Pitre
authored andcommitted
Revert "arch: deprecate _current"
Mostly a revert of commit b1def71 ("arch: deprecate `_current`"). This commit was part of PR zephyrproject-rtos#80716 whose initial purpose was about providing an architecture specific optimization for _current. The actual deprecation was sneaked in later on without proper discussion. The Zephyr core always used _current before and that was fine. It is quite prevalent as well and the alternative is proving rather verbose. Furthermore, as a concept, the "current thread" is not something that is necessarily architecture specific. Therefore the primary abstraction should not carry the arch_ prefix. Hence this revert. Signed-off-by: Nicolas Pitre <[email protected]>
1 parent 31f5819 commit 46aa671

File tree

111 files changed

+494
-504
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

111 files changed

+494
-504
lines changed

arch/arc/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -262,7 +262,7 @@ config ARC_CURRENT_THREAD_USE_NO_TLS
262262
RGF_NUM_BANKS the parameter is disabled by-default because banks syncronization
263263
requires significant time, and it slows down performance.
264264
ARCMWDT works with tls pointer in different way then GCC. Optimized access to
265-
TLS pointer via arch_current_thread() does not provide significant advantages
265+
TLS pointer via the _current symbol does not provide significant advantages
266266
in case of MetaWare.
267267

268268
config GEN_ISR_TABLES

arch/arc/core/fault.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ static bool z_check_thread_stack_fail(const uint32_t fault_addr, uint32_t sp)
5555
{
5656
#if defined(CONFIG_MULTITHREADING)
5757
uint32_t guard_end, guard_start;
58-
const struct k_thread *thread = arch_current_thread();
58+
const struct k_thread *thread = _current;
5959

6060
if (!thread) {
6161
/* TODO: Under what circumstances could we get here ? */

arch/arc/core/irq_offload.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -49,8 +49,8 @@ void arch_irq_offload(irq_offload_routine_t routine, const void *parameter)
4949

5050
__asm__ volatile("sync");
5151

52-
/* If arch_current_thread() was aborted in the offload routine, we shouldn't be here */
53-
__ASSERT_NO_MSG((arch_current_thread()->base.thread_state & _THREAD_DEAD) == 0);
52+
/* If _current was aborted in the offload routine, we shouldn't be here */
53+
__ASSERT_NO_MSG((_current->base.thread_state & _THREAD_DEAD) == 0);
5454
}
5555

5656
/* need to be executed on every core in the system */

arch/arc/core/thread.c

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -210,7 +210,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
210210
#ifdef CONFIG_MULTITHREADING
211211
void *z_arch_get_next_switch_handle(struct k_thread **old_thread)
212212
{
213-
*old_thread = arch_current_thread();
213+
*old_thread = _current;
214214

215215
return z_get_next_switch_handle(NULL);
216216
}
@@ -227,16 +227,16 @@ void *z_arch_get_next_switch_handle(struct k_thread **old_thread)
227227
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
228228
void *p1, void *p2, void *p3)
229229
{
230-
setup_stack_vars(arch_current_thread());
230+
setup_stack_vars(_current);
231231

232232
/* possible optimizaiton: no need to load mem domain anymore */
233233
/* need to lock cpu here ? */
234-
configure_mpu_thread(arch_current_thread());
234+
configure_mpu_thread(_current);
235235

236236
z_arc_userspace_enter(user_entry, p1, p2, p3,
237-
(uint32_t)arch_current_thread()->stack_info.start,
238-
(arch_current_thread()->stack_info.size -
239-
arch_current_thread()->stack_info.delta), arch_current_thread());
237+
(uint32_t)_current->stack_info.start,
238+
(_current->stack_info.size -
239+
_current->stack_info.delta), _current);
240240
CODE_UNREACHABLE;
241241
}
242242
#endif
@@ -336,7 +336,7 @@ int arc_vpx_lock(k_timeout_t timeout)
336336

337337
id = _current_cpu->id;
338338
#if (CONFIG_MP_MAX_NUM_CPUS > 1) && defined(CONFIG_SCHED_CPU_MASK)
339-
__ASSERT(!arch_is_in_isr() && (arch_current_thread()->base.cpu_mask == BIT(id)), "");
339+
__ASSERT(!arch_is_in_isr() && (_current->base.cpu_mask == BIT(id)), "");
340340
#endif
341341
k_spin_unlock(&lock, key);
342342

@@ -355,7 +355,7 @@ void arc_vpx_unlock(void)
355355

356356
key = k_spin_lock(&lock);
357357
#if (CONFIG_MP_MAX_NUM_CPUS > 1) && defined(CONFIG_SCHED_CPU_MASK)
358-
__ASSERT(!arch_is_in_isr() && (arch_current_thread()->base.cpu_mask == BIT(id)), "");
358+
__ASSERT(!arch_is_in_isr() && (_current->base.cpu_mask == BIT(id)), "");
359359
#endif
360360
id = _current_cpu->id;
361361
k_spin_unlock(&lock, key);

arch/arc/core/tls.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ size_t arch_tls_stack_setup(struct k_thread *new_thread, char *stack_ptr)
2929

3030
void *_Preserve_flags _mwget_tls(void)
3131
{
32-
return (void *)(arch_current_thread()->tls);
32+
return (void *)(_current->tls);
3333
}
3434

3535
#else

arch/arm/core/cortex_a_r/fault.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,7 @@ bool z_arm_fault_undef_instruction_fp(void)
178178
* context because it is about to be overwritten.
179179
*/
180180
if (((_current_cpu->nested == 2)
181-
&& (arch_current_thread()->base.user_options & K_FP_REGS))
181+
&& (_current->base.user_options & K_FP_REGS))
182182
|| ((_current_cpu->nested > 2)
183183
&& (spill_esf->undefined & FPEXC_EN))) {
184184
/*
@@ -196,7 +196,7 @@ bool z_arm_fault_undef_instruction_fp(void)
196196
* means that a thread that uses the VFP does not have to,
197197
* but should, set K_FP_REGS on thread creation.
198198
*/
199-
arch_current_thread()->base.user_options |= K_FP_REGS;
199+
_current->base.user_options |= K_FP_REGS;
200200
}
201201

202202
return false;

arch/arm/core/cortex_a_r/swap_helper.S

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ SECTION_FUNC(TEXT, z_arm_do_swap)
7070

7171
#if defined(CONFIG_FPU_SHARING)
7272
ldrb r0, [r2, #_thread_offset_to_user_options]
73-
tst r0, #K_FP_REGS /* arch_current_thread()->base.user_options & K_FP_REGS */
73+
tst r0, #K_FP_REGS /* _current->base.user_options & K_FP_REGS */
7474
beq out_fp_inactive
7575

7676
mov ip, #FPEXC_EN
@@ -152,7 +152,7 @@ out_fp_inactive:
152152

153153
#if defined(CONFIG_FPU_SHARING)
154154
ldrb r0, [r2, #_thread_offset_to_user_options]
155-
tst r0, #K_FP_REGS /* arch_current_thread()->base.user_options & K_FP_REGS */
155+
tst r0, #K_FP_REGS /* _current->base.user_options & K_FP_REGS */
156156
beq in_fp_inactive
157157

158158
mov r3, #FPEXC_EN

arch/arm/core/cortex_a_r/thread.c

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -198,8 +198,8 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
198198
{
199199

200200
/* Set up privileged stack before entering user mode */
201-
arch_current_thread()->arch.priv_stack_start =
202-
(uint32_t)z_priv_stack_find(arch_current_thread()->stack_obj);
201+
_current->arch.priv_stack_start =
202+
(uint32_t)z_priv_stack_find(_current->stack_obj);
203203
#if defined(CONFIG_MPU_STACK_GUARD)
204204
#if defined(CONFIG_THREAD_STACK_INFO)
205205
/* We're dropping to user mode which means the guard area is no
@@ -208,37 +208,37 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
208208
* which accounted for memory borrowed from the thread stack.
209209
*/
210210
#if FP_GUARD_EXTRA_SIZE > 0
211-
if ((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) {
212-
arch_current_thread()->stack_info.start -= FP_GUARD_EXTRA_SIZE;
213-
arch_current_thread()->stack_info.size += FP_GUARD_EXTRA_SIZE;
211+
if ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) {
212+
_current->stack_info.start -= FP_GUARD_EXTRA_SIZE;
213+
_current->stack_info.size += FP_GUARD_EXTRA_SIZE;
214214
}
215215
#endif /* FP_GUARD_EXTRA_SIZE */
216-
arch_current_thread()->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
217-
arch_current_thread()->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
216+
_current->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
217+
_current->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
218218
#endif /* CONFIG_THREAD_STACK_INFO */
219219

220220
/* Stack guard area reserved at the bottom of the thread's
221221
* privileged stack. Adjust the available (writable) stack
222222
* buffer area accordingly.
223223
*/
224224
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
225-
arch_current_thread()->arch.priv_stack_start +=
226-
((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
225+
_current->arch.priv_stack_start +=
226+
((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
227227
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
228228
#else
229-
arch_current_thread()->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
229+
_current->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
230230
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
231231
#endif /* CONFIG_MPU_STACK_GUARD */
232232

233233
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
234-
arch_current_thread()->arch.priv_stack_end =
235-
arch_current_thread()->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE;
234+
_current->arch.priv_stack_end =
235+
_current->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE;
236236
#endif
237237

238238
z_arm_userspace_enter(user_entry, p1, p2, p3,
239-
(uint32_t)arch_current_thread()->stack_info.start,
240-
arch_current_thread()->stack_info.size -
241-
arch_current_thread()->stack_info.delta);
239+
(uint32_t)_current->stack_info.start,
240+
_current->stack_info.size -
241+
_current->stack_info.delta);
242242
CODE_UNREACHABLE;
243243
}
244244

@@ -304,7 +304,7 @@ EXPORT_SYMBOL(z_arm_thread_is_in_user_mode);
304304
uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp)
305305
{
306306
#if defined(CONFIG_MULTITHREADING)
307-
const struct k_thread *thread = arch_current_thread();
307+
const struct k_thread *thread = _current;
308308

309309
if (thread == NULL) {
310310
return 0;
@@ -314,7 +314,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp
314314
#if (defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)) && \
315315
defined(CONFIG_MPU_STACK_GUARD)
316316
uint32_t guard_len =
317-
((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
317+
((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
318318
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
319319
#else
320320
/* If MPU_STACK_GUARD is not enabled, the guard length is
@@ -377,7 +377,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp
377377
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
378378
int arch_float_disable(struct k_thread *thread)
379379
{
380-
if (thread != arch_current_thread()) {
380+
if (thread != _current) {
381381
return -EINVAL;
382382
}
383383

arch/arm/core/cortex_m/swap_helper.S

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -288,7 +288,7 @@ in_fp_endif:
288288
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
289289
/* Re-program dynamic memory map */
290290
push {r2,lr}
291-
mov r0, r2 /* arch_current_thread() thread */
291+
mov r0, r2 /* _current thread */
292292
bl z_arm_configure_dynamic_mpu_regions
293293
pop {r2,lr}
294294
#endif

arch/arm/core/cortex_m/thread.c

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -231,8 +231,8 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
231231
{
232232

233233
/* Set up privileged stack before entering user mode */
234-
arch_current_thread()->arch.priv_stack_start =
235-
(uint32_t)z_priv_stack_find(arch_current_thread()->stack_obj);
234+
_current->arch.priv_stack_start =
235+
(uint32_t)z_priv_stack_find(_current->stack_obj);
236236
#if defined(CONFIG_MPU_STACK_GUARD)
237237
#if defined(CONFIG_THREAD_STACK_INFO)
238238
/* We're dropping to user mode which means the guard area is no
@@ -241,32 +241,32 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
241241
* which accounted for memory borrowed from the thread stack.
242242
*/
243243
#if FP_GUARD_EXTRA_SIZE > 0
244-
if ((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) {
245-
arch_current_thread()->stack_info.start -= FP_GUARD_EXTRA_SIZE;
246-
arch_current_thread()->stack_info.size += FP_GUARD_EXTRA_SIZE;
244+
if ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) {
245+
_current->stack_info.start -= FP_GUARD_EXTRA_SIZE;
246+
_current->stack_info.size += FP_GUARD_EXTRA_SIZE;
247247
}
248248
#endif /* FP_GUARD_EXTRA_SIZE */
249-
arch_current_thread()->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
250-
arch_current_thread()->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
249+
_current->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
250+
_current->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
251251
#endif /* CONFIG_THREAD_STACK_INFO */
252252

253253
/* Stack guard area reserved at the bottom of the thread's
254254
* privileged stack. Adjust the available (writable) stack
255255
* buffer area accordingly.
256256
*/
257257
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
258-
arch_current_thread()->arch.priv_stack_start +=
259-
((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
258+
_current->arch.priv_stack_start +=
259+
((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
260260
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
261261
#else
262-
arch_current_thread()->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
262+
_current->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
263263
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
264264
#endif /* CONFIG_MPU_STACK_GUARD */
265265

266266
z_arm_userspace_enter(user_entry, p1, p2, p3,
267-
(uint32_t)arch_current_thread()->stack_info.start,
268-
arch_current_thread()->stack_info.size -
269-
arch_current_thread()->stack_info.delta);
267+
(uint32_t)_current->stack_info.start,
268+
_current->stack_info.size -
269+
_current->stack_info.delta);
270270
CODE_UNREACHABLE;
271271
}
272272

@@ -379,7 +379,7 @@ void configure_builtin_stack_guard(struct k_thread *thread)
379379
uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp)
380380
{
381381
#if defined(CONFIG_MULTITHREADING)
382-
const struct k_thread *thread = arch_current_thread();
382+
const struct k_thread *thread = _current;
383383

384384
if (thread == NULL) {
385385
return 0;
@@ -389,7 +389,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp
389389
#if (defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)) && \
390390
defined(CONFIG_MPU_STACK_GUARD)
391391
uint32_t guard_len =
392-
((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
392+
((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
393393
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
394394
#else
395395
/* If MPU_STACK_GUARD is not enabled, the guard length is
@@ -452,7 +452,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp
452452
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
453453
int arch_float_disable(struct k_thread *thread)
454454
{
455-
if (thread != arch_current_thread()) {
455+
if (thread != _current) {
456456
return -EINVAL;
457457
}
458458

arch/arm/core/cortex_m/thread_abort.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ void z_impl_k_thread_abort(k_tid_t thread)
2727
{
2828
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, abort, thread);
2929

30-
if (arch_current_thread() == thread) {
30+
if (_current == thread) {
3131
if (arch_is_in_isr()) {
3232
/* ARM is unlike most arches in that this is true
3333
* even for non-peripheral interrupts, even though

arch/arm/include/cortex_a_r/kernel_arch_func.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -40,16 +40,16 @@ static ALWAYS_INLINE void arch_kernel_init(void)
4040
static ALWAYS_INLINE int arch_swap(unsigned int key)
4141
{
4242
/* store off key and return value */
43-
arch_current_thread()->arch.basepri = key;
44-
arch_current_thread()->arch.swap_return_value = -EAGAIN;
43+
_current->arch.basepri = key;
44+
_current->arch.swap_return_value = -EAGAIN;
4545

4646
z_arm_cortex_r_svc();
4747
irq_unlock(key);
4848

4949
/* Context switch is performed here. Returning implies the
5050
* thread has been context-switched-in again.
5151
*/
52-
return arch_current_thread()->arch.swap_return_value;
52+
return _current->arch.swap_return_value;
5353
}
5454

5555
static ALWAYS_INLINE void

arch/arm/include/cortex_m/kernel_arch_func.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -87,8 +87,8 @@ extern void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf);
8787
static ALWAYS_INLINE int arch_swap(unsigned int key)
8888
{
8989
/* store off key and return value */
90-
arch_current_thread()->arch.basepri = key;
91-
arch_current_thread()->arch.swap_return_value = -EAGAIN;
90+
_current->arch.basepri = key;
91+
_current->arch.swap_return_value = -EAGAIN;
9292

9393
/* set pending bit to make sure we will take a PendSV exception */
9494
SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk;
@@ -99,7 +99,7 @@ static ALWAYS_INLINE int arch_swap(unsigned int key)
9999
/* Context switch is performed here. Returning implies the
100100
* thread has been context-switched-in again.
101101
*/
102-
return arch_current_thread()->arch.swap_return_value;
102+
return _current->arch.swap_return_value;
103103
}
104104

105105

arch/arm64/core/cortex_r/arm_mpu.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -727,7 +727,7 @@ static int configure_dynamic_mpu_regions(struct k_thread *thread)
727727
*/
728728
thread->arch.region_num = (uint8_t)region_num;
729729

730-
if (thread == arch_current_thread()) {
730+
if (thread == _current) {
731731
ret = flush_dynamic_regions_to_mpu(dyn_regions, region_num);
732732
}
733733

@@ -795,7 +795,7 @@ int arch_mem_domain_thread_add(struct k_thread *thread)
795795

796796
ret = configure_dynamic_mpu_regions(thread);
797797
#ifdef CONFIG_SMP
798-
if (ret == 0 && thread != arch_current_thread()) {
798+
if (ret == 0 && thread != _current) {
799799
/* the thread could be running on another CPU right now */
800800
z_arm64_mem_cfg_ipi();
801801
}
@@ -810,7 +810,7 @@ int arch_mem_domain_thread_remove(struct k_thread *thread)
810810

811811
ret = configure_dynamic_mpu_regions(thread);
812812
#ifdef CONFIG_SMP
813-
if (ret == 0 && thread != arch_current_thread()) {
813+
if (ret == 0 && thread != _current) {
814814
/* the thread could be running on another CPU right now */
815815
z_arm64_mem_cfg_ipi();
816816
}

arch/arm64/core/fatal.c

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -306,9 +306,8 @@ static bool z_arm64_stack_corruption_check(struct arch_esf *esf, uint64_t esr, u
306306
}
307307
}
308308
#ifdef CONFIG_USERSPACE
309-
else if ((arch_current_thread()->base.user_options & K_USER) != 0 &&
310-
GET_ESR_EC(esr) == 0x24) {
311-
sp_limit = (uint64_t)arch_current_thread()->stack_info.start;
309+
else if ((_current->base.user_options & K_USER) != 0 && GET_ESR_EC(esr) == 0x24) {
310+
sp_limit = (uint64_t)_current->stack_info.start;
312311
guard_start = sp_limit - Z_ARM64_STACK_GUARD_SIZE;
313312
sp = esf->sp;
314313
if (sp <= sp_limit || (guard_start <= far && far <= sp_limit)) {
@@ -435,7 +434,7 @@ void z_arm64_do_kernel_oops(struct arch_esf *esf)
435434
* User mode is only allowed to induce oopses and stack check
436435
* failures via software-triggered system fatal exceptions.
437436
*/
438-
if (((arch_current_thread()->base.user_options & K_USER) != 0) &&
437+
if (((_current->base.user_options & K_USER) != 0) &&
439438
reason != K_ERR_STACK_CHK_FAIL) {
440439
reason = K_ERR_KERNEL_OOPS;
441440
}

0 commit comments

Comments
 (0)