Skip to content

Commit 59779eb

Browse files
bjarki-andreasenkartben
authored andcommitted
pm: policy: event: use uptime ticks
Update events to use uptime ticks, which is a monotonic clock which in the same res as kernel ticks. This makes comparisons simple and removes the complexity of dealing with wrapping counter values. The wrapping is particularly problematic for events since this makes it quite complex to track if an event has occured in the past, or will occur in the future. This info is needed to know if an event has actually been handled or not. Signed-off-by: Bjarki Arge Andreasen <[email protected]>
1 parent 6aa760a commit 59779eb

File tree

3 files changed

+71
-82
lines changed

3 files changed

+71
-82
lines changed

include/zephyr/pm/policy.h

+21-17
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ struct pm_policy_latency_request {
6767
struct pm_policy_event {
6868
/** @cond INTERNAL_HIDDEN */
6969
sys_snode_t node;
70-
uint32_t value_cyc;
70+
int64_t uptime_ticks;
7171
/** @endcond */
7272
};
7373

@@ -137,38 +137,38 @@ void pm_policy_state_lock_put(enum pm_state state, uint8_t substate_id);
137137
*/
138138
bool pm_policy_state_lock_is_active(enum pm_state state, uint8_t substate_id);
139139

140-
141140
/**
142141
* @brief Register an event.
143142
*
144143
* Events in the power-management policy context are defined as any source that
145144
* will wake up the system at a known time in the future. By registering such
146145
* event, the policy manager will be able to decide whether certain power states
147146
* are worth entering or not.
148-
* CPU is woken up before the time passed in cycle to prevent the event handling
149-
* latency
150147
*
151-
* @note It is mandatory to unregister events once they have happened by using
152-
* pm_policy_event_unregister(). Not doing so is an API contract violation,
153-
* because the system would continue to consider them as valid events in the
154-
* *far* future, that is, after the cycle counter rollover.
148+
* CPU is woken up before the time passed in cycle to minimize event handling
149+
* latency. Once woken up, the CPU will be kept awake until the event has been
150+
* handled, which is signaled by pm_policy_event_unregister() or moving event
151+
* into the future using pm_policy_event_update().
155152
*
156153
* @param evt Event.
157-
* @param cycle When the event will occur, in absolute time (cycles).
154+
* @param uptime_ticks When the event will occur, in uptime ticks.
158155
*
159-
* @see pm_policy_event_unregister
156+
* @see pm_policy_event_unregister()
160157
*/
161-
void pm_policy_event_register(struct pm_policy_event *evt, uint32_t cycle);
158+
void pm_policy_event_register(struct pm_policy_event *evt, int64_t uptime_ticks);
162159

163160
/**
164161
* @brief Update an event.
165162
*
163+
* This shortcut allows for moving the time an event will occur without the
164+
* need for an unregister + register cycle.
165+
*
166166
* @param evt Event.
167-
* @param cycle When the event will occur, in absolute time (cycles).
167+
* @param uptime_ticks When the event will occur, in uptime ticks.
168168
*
169169
* @see pm_policy_event_register
170170
*/
171-
void pm_policy_event_update(struct pm_policy_event *evt, uint32_t cycle);
171+
void pm_policy_event_update(struct pm_policy_event *evt, int64_t uptime_ticks);
172172

173173
/**
174174
* @brief Unregister an event.
@@ -208,10 +208,14 @@ void pm_policy_device_power_lock_put(const struct device *dev);
208208
/**
209209
* @brief Returns the ticks until the next event
210210
*
211-
* If an event is registred, it will return the number of ticks until the next event as
212-
* a positive or zero value. Otherwise it returns -1
211+
* If an event is registred, it will return the number of ticks until the next event, if the
212+
* "next"/"oldest" registered event is in the past, it will return 0. Otherwise it returns -1.
213+
*
214+
* @retval >0 If next registered event is in the future
215+
* @retval 0 If next registered event is now or in the past
216+
* @retval -1 Otherwise
213217
*/
214-
int32_t pm_policy_next_event_ticks(void);
218+
int64_t pm_policy_next_event_ticks(void);
215219

216220
#else
217221
static inline void pm_policy_state_lock_get(enum pm_state state, uint8_t substate_id)
@@ -261,7 +265,7 @@ static inline void pm_policy_device_power_lock_put(const struct device *dev)
261265
ARG_UNUSED(dev);
262266
}
263267

264-
static inline int32_t pm_policy_next_event_ticks(void)
268+
static inline int64_t pm_policy_next_event_ticks(void)
265269
{
266270
return -1;
267271
}

subsys/pm/policy/policy_events.c

+37-52
Original file line numberDiff line numberDiff line change
@@ -20,81 +20,66 @@ static sys_slist_t events_list;
2020
/** Pointer to Next Event. */
2121
struct pm_policy_event *next_event;
2222

23-
/** @brief Update next event. */
24-
static void update_next_event(uint32_t cyc)
23+
static void update_next_event(void)
2524
{
26-
int64_t new_next_event_cyc = -1;
2725
struct pm_policy_event *evt;
2826

29-
/* unset the next event pointer */
3027
next_event = NULL;
3128

3229
SYS_SLIST_FOR_EACH_CONTAINER(&events_list, evt, node) {
33-
uint64_t cyc_evt = evt->value_cyc;
34-
35-
/*
36-
* cyc value is a 32-bit rolling counter:
37-
*
38-
* |---------------->-----------------------|
39-
* 0 cyc UINT32_MAX
40-
*
41-
* Values from [0, cyc) are events happening later than
42-
* [cyc, UINT32_MAX], so pad [0, cyc) with UINT32_MAX + 1 to do
43-
* the comparison.
44-
*/
45-
if (cyc_evt < cyc) {
46-
cyc_evt += (uint64_t)UINT32_MAX + 1U;
30+
if (next_event == NULL) {
31+
next_event = evt;
32+
continue;
4733
}
4834

49-
if ((new_next_event_cyc < 0) || (cyc_evt < new_next_event_cyc)) {
50-
new_next_event_cyc = cyc_evt;
51-
next_event = evt;
35+
if (next_event->uptime_ticks <= evt->uptime_ticks) {
36+
continue;
5237
}
38+
39+
next_event = evt;
5340
}
5441
}
5542

56-
int32_t pm_policy_next_event_ticks(void)
43+
int64_t pm_policy_next_event_ticks(void)
5744
{
58-
int32_t cyc_evt = -1;
59-
60-
if ((next_event) && (next_event->value_cyc > 0)) {
61-
cyc_evt = next_event->value_cyc - k_cycle_get_32();
62-
cyc_evt = MAX(0, cyc_evt);
63-
BUILD_ASSERT(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC >= CONFIG_SYS_CLOCK_TICKS_PER_SEC,
64-
"HW Cycles per sec should be greater that ticks per sec");
65-
return k_cyc_to_ticks_floor32(cyc_evt);
66-
}
45+
int64_t ticks = -1;
6746

68-
return -1;
69-
}
47+
K_SPINLOCK(&events_lock) {
48+
if (next_event == NULL) {
49+
K_SPINLOCK_BREAK;
50+
}
7051

71-
void pm_policy_event_register(struct pm_policy_event *evt, uint32_t cycle)
72-
{
73-
k_spinlock_key_t key = k_spin_lock(&events_lock);
52+
ticks = next_event->uptime_ticks - k_uptime_ticks();
7453

75-
evt->value_cyc = cycle;
76-
sys_slist_append(&events_list, &evt->node);
77-
update_next_event(k_cycle_get_32());
54+
if (ticks < 0) {
55+
ticks = 0;
56+
}
57+
}
7858

79-
k_spin_unlock(&events_lock, key);
59+
return ticks;
8060
}
8161

82-
void pm_policy_event_update(struct pm_policy_event *evt, uint32_t cycle)
62+
void pm_policy_event_register(struct pm_policy_event *evt, int64_t uptime_ticks)
8363
{
84-
k_spinlock_key_t key = k_spin_lock(&events_lock);
85-
86-
evt->value_cyc = cycle;
87-
update_next_event(k_cycle_get_32());
64+
K_SPINLOCK(&events_lock) {
65+
evt->uptime_ticks = uptime_ticks;
66+
sys_slist_append(&events_list, &evt->node);
67+
update_next_event();
68+
}
69+
}
8870

89-
k_spin_unlock(&events_lock, key);
71+
void pm_policy_event_update(struct pm_policy_event *evt, int64_t uptime_ticks)
72+
{
73+
K_SPINLOCK(&events_lock) {
74+
evt->uptime_ticks = uptime_ticks;
75+
update_next_event();
76+
}
9077
}
9178

9279
void pm_policy_event_unregister(struct pm_policy_event *evt)
9380
{
94-
k_spinlock_key_t key = k_spin_lock(&events_lock);
95-
96-
(void)sys_slist_find_and_remove(&events_list, &evt->node);
97-
update_next_event(k_cycle_get_32());
98-
99-
k_spin_unlock(&events_lock, key);
81+
K_SPINLOCK(&events_lock) {
82+
(void)sys_slist_find_and_remove(&events_list, &evt->node);
83+
update_next_event();
84+
}
10085
}

tests/subsys/pm/policy_api/src/main.c

+13-13
Original file line numberDiff line numberDiff line change
@@ -308,29 +308,29 @@ ZTEST(policy_api, test_pm_policy_events)
308308
{
309309
struct pm_policy_event evt1;
310310
struct pm_policy_event evt2;
311-
uint32_t now_cycle;
312-
uint32_t evt1_1_cycle;
313-
uint32_t evt1_2_cycle;
314-
uint32_t evt2_cycle;
311+
int64_t now_uptime_ticks;
312+
int64_t evt1_1_uptime_ticks;
313+
int64_t evt1_2_uptime_ticks;
314+
int64_t evt2_uptime_ticks;
315315

316-
now_cycle = k_cycle_get_32();
317-
evt1_1_cycle = now_cycle + k_ticks_to_cyc_floor32(100);
318-
evt1_2_cycle = now_cycle + k_ticks_to_cyc_floor32(200);
319-
evt2_cycle = now_cycle + k_ticks_to_cyc_floor32(2000);
316+
now_uptime_ticks = k_uptime_ticks();
317+
evt1_1_uptime_ticks = now_uptime_ticks + 100;
318+
evt1_2_uptime_ticks = now_uptime_ticks + 200;
319+
evt2_uptime_ticks = now_uptime_ticks + 2000;
320320

321321
zassert_equal(pm_policy_next_event_ticks(), -1);
322-
pm_policy_event_register(&evt1, evt1_1_cycle);
323-
pm_policy_event_register(&evt2, evt2_cycle);
322+
pm_policy_event_register(&evt1, evt1_1_uptime_ticks);
323+
pm_policy_event_register(&evt2, evt2_uptime_ticks);
324324
zassert_within(pm_policy_next_event_ticks(), 100, 50);
325325
pm_policy_event_unregister(&evt1);
326326
zassert_within(pm_policy_next_event_ticks(), 2000, 50);
327327
pm_policy_event_unregister(&evt2);
328328
zassert_equal(pm_policy_next_event_ticks(), -1);
329-
pm_policy_event_register(&evt2, evt2_cycle);
329+
pm_policy_event_register(&evt2, evt2_uptime_ticks);
330330
zassert_within(pm_policy_next_event_ticks(), 2000, 50);
331-
pm_policy_event_register(&evt1, evt1_1_cycle);
331+
pm_policy_event_register(&evt1, evt1_1_uptime_ticks);
332332
zassert_within(pm_policy_next_event_ticks(), 100, 50);
333-
pm_policy_event_update(&evt1, evt1_2_cycle);
333+
pm_policy_event_update(&evt1, evt1_2_uptime_ticks);
334334
zassert_within(pm_policy_next_event_ticks(), 200, 50);
335335
pm_policy_event_unregister(&evt1);
336336
pm_policy_event_unregister(&evt2);

0 commit comments

Comments
 (0)