Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions env/posix/ocf_env_list.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
/*
* Copyright(c) 2019-2021 Intel Corporation
* Copyright(c) 2024-2025 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/

Expand Down Expand Up @@ -164,5 +165,19 @@ static inline void list_move_tail(struct list_head *it, struct list_head *l1)
_list_entry_helper(item, (item)->field_name.next, field_name) != \
_list_entry_helper(item, (plist)->next, field_name); \
item = q, q = _list_entry_helper(q, (q)->field_name.next, field_name))
/**
* Iterate over a list starting at specified element. Works even if entries are
* deleted during loop.
* @param list pointer to list item (iterator)
* @param q another pointer to list item, used as helper
* @param plist pointer to list_head item
* @param field_name name of list_head field in list entry
*/
#define list_for_each_entry_safe_from(item, q, plist, field_name) \
for (q = _list_entry_helper(item, (item)->field_name.next, field_name); \
_list_entry_helper(item, (item)->field_name.next, field_name) != \
_list_entry_helper(item, (plist)->next, field_name); \
item = q, q = _list_entry_helper(q, (q)->field_name.next, field_name))


#endif // __OCF_ENV_LIST__
229 changes: 136 additions & 93 deletions src/utils/utils_alock.c
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* Copyright(c) 2024-2025 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/

Expand Down Expand Up @@ -313,25 +313,23 @@ static inline void ocf_alock_unlock_entry_rd(struct ocf_alock *alock,
env_atomic_dec(access);
}

static inline bool ocf_alock_trylock_entry_wr2wr(struct ocf_alock *alock,
static inline void ocf_alock_lock_entry_wr2wr(struct ocf_alock *alock,
ocf_cache_line_t entry)
{
env_atomic *access = &alock->access[entry];
int v = env_atomic_read(access);

ENV_BUG_ON(v != OCF_CACHE_LINE_ACCESS_WR);
return true;
}

static inline bool ocf_alock_trylock_entry_wr2rd(struct ocf_alock *alock,
static inline void ocf_alock_lock_entry_wr2rd(struct ocf_alock *alock,
ocf_cache_line_t entry)
{
env_atomic *access = &alock->access[entry];
int v = env_atomic_read(access);

ENV_BUG_ON(v != OCF_CACHE_LINE_ACCESS_WR);
env_atomic_set(access, OCF_CACHE_LINE_ACCESS_ONE_RD);
return true;
}

static inline bool ocf_alock_trylock_entry_rd2wr(struct ocf_alock *alock,
Expand Down Expand Up @@ -491,70 +489,95 @@ bool ocf_alock_lock_one_rd(struct ocf_alock *alock,
}

/*
* Unlocks the given read lock. If any waiters are registered for the same
* cacheline, one is awakened and the lock is either upgraded to a write lock
* or kept as a readlock. If there are no waiters, it's just unlocked.
* Unlocks the given read lock.
*
* The lock can be upgraded to write lock and passed to the first waiter if the
* current owner is the last one holding the read lock.
*
* If there are requests waiting for read access to the cache line, the lock
* will be granted to all such waiters that are not preceded by a write request
*/
static inline void ocf_alock_unlock_one_rd_common(struct ocf_alock *alock,
const ocf_cache_line_t entry)
{
bool locked = false;
bool exchanged = true;

bool locked = false, no_waiters = true;
int rw;
uint32_t idx = _WAITERS_LIST_ITEM(entry);
struct ocf_alock_waiters_list *lst = &alock->waiters_lsts[idx];
struct ocf_alock_waiter *waiter;

struct list_head *iter, *next;
struct ocf_alock_waiter *waiter, *waiter_tmp;

/*
* Lock exchange scenario
* 1. RD -> IDLE
* 2. RD -> RD
* 3. RD -> WR
* 2. RD -> WR
* 3. RD -> RD
* 4. RD -> Multiple RD
*/

/* Check is requested page is on the list */
list_for_each_safe(iter, next, &lst->head) {
waiter = list_entry(iter, struct ocf_alock_waiter, item);
/* Check if any request is waiting for the cache line */
list_for_each_entry_safe(waiter, waiter_tmp, &lst->head, item) {
if (entry == waiter->entry) {
no_waiters = false;
break;
}
}

/* RD -> IDLE */
if (no_waiters) {
ocf_alock_unlock_entry_rd(alock, entry);
return;
}

ENV_BUG_ON(waiter->rw != OCF_READ && waiter->rw != OCF_WRITE);

/* RD -> WR/RD */
if (waiter->rw == OCF_WRITE)
locked = ocf_alock_trylock_entry_rd2wr(alock, entry);
else if (waiter->rw == OCF_READ)
locked = ocf_alock_trylock_entry_rd2rd(alock, entry);

if (unlikely(!locked)) {
ocf_alock_unlock_entry_rd(alock, entry);
return;
}

rw = waiter->rw;

list_del(&waiter->item);

ocf_alock_mark_index_locked(alock, waiter->req, waiter->idx, true);
ocf_alock_entry_locked(alock, waiter->req, waiter->cmpl);

env_allocator_del(alock->allocator, waiter);

/* If we upgraded to write lock, the read reaquests won't be able to
* lock the cache line anyways
*/
if (rw == OCF_WRITE)
return;

waiter = waiter_tmp;

/* RD -> Multiple RD */
list_for_each_entry_safe_from(waiter, waiter_tmp, &lst->head, item) {
if (entry != waiter->entry)
continue;

if (exchanged) {
if (waiter->rw == OCF_WRITE)
locked = ocf_alock_trylock_entry_rd2wr(alock, entry);
else if (waiter->rw == OCF_READ)
locked = ocf_alock_trylock_entry_rd2rd(alock, entry);
else
ENV_BUG();
} else {
if (waiter->rw == OCF_WRITE)
locked = ocf_alock_trylock_entry_wr(alock, entry);
else if (waiter->rw == OCF_READ)
locked = ocf_alock_trylock_entry_rd(alock, entry);
else
ENV_BUG();
}
ENV_BUG_ON(waiter->rw != OCF_READ && waiter->rw != OCF_WRITE);

if (locked) {
exchanged = false;
list_del(iter);
if (waiter->rw == OCF_WRITE)
return;

ocf_alock_mark_index_locked(alock, waiter->req, waiter->idx, true);
ocf_alock_entry_locked(alock, waiter->req, waiter->cmpl);
locked = ocf_alock_trylock_entry_rd(alock, entry);
/* There is no limit for number of readers */
ENV_BUG_ON(!locked);

env_allocator_del(alock->allocator, waiter);
} else {
break;
}
}
list_del(&waiter->item);

if (exchanged) {
/* No exchange, no waiters on the list, unlock and return
* WR -> IDLE
*/
ocf_alock_unlock_entry_rd(alock, entry);
ocf_alock_mark_index_locked(alock, waiter->req, waiter->idx, true);
ocf_alock_entry_locked(alock, waiter->req, waiter->cmpl);

env_allocator_del(alock->allocator, waiter);
}
}

Expand All @@ -578,70 +601,90 @@ void ocf_alock_unlock_one_rd(struct ocf_alock *alock,
}

/*
* Unlocks the given write lock. If any waiters are registered for the same
* cacheline, one is awakened and the lock is either downgraded to a readlock
* or kept as a writelock. If there are no waiters, it's just unlocked.
* Unlocks the given write lock.
*
* The lock can be passed to the first waiter
*
* If there are requests waiting for read access to the cache line, the lock
* will be downgraded and granted to all such waiters that are not preceded by
* a write request
*/
static inline void ocf_alock_unlock_one_wr_common(struct ocf_alock *alock,
const ocf_cache_line_t entry)
{
bool locked = false;
bool exchanged = true;

bool locked = false, no_waiters = true;
int rw;
uint32_t idx = _WAITERS_LIST_ITEM(entry);
struct ocf_alock_waiters_list *lst = &alock->waiters_lsts[idx];
struct ocf_alock_waiter *waiter;

struct list_head *iter, *next;
struct ocf_alock_waiter *waiter, *waiter_tmp;

/*
* Lock exchange scenario
* 1. WR -> IDLE
* 2. WR -> RD
* 3. WR -> WR
* 2. WR -> WR
* 3. WR -> RD
* 4. WR -> Multiple RD
*/

/* Check is requested page is on the list */
list_for_each_safe(iter, next, &lst->head) {
waiter = list_entry(iter, struct ocf_alock_waiter, item);
/* Check if any request is waiting for the cache line */
list_for_each_entry_safe(waiter, waiter_tmp, &lst->head, item) {
if (entry == waiter->entry) {
no_waiters = false;
break;
}
}

/* WR -> IDLE */
if (no_waiters) {
ocf_alock_unlock_entry_wr(alock, entry);
return;
}

ENV_BUG_ON(waiter->rw != OCF_READ && waiter->rw != OCF_WRITE);

/* WR -> WR/RD */
if (waiter->rw == OCF_WRITE)
ocf_alock_lock_entry_wr2wr(alock, entry);
else if (waiter->rw == OCF_READ)
ocf_alock_lock_entry_wr2rd(alock, entry);

rw = waiter->rw;

list_del(&waiter->item);

ocf_alock_mark_index_locked(alock, waiter->req, waiter->idx, true);
ocf_alock_entry_locked(alock, waiter->req, waiter->cmpl);

env_allocator_del(alock->allocator, waiter);

/* If we passed the write lock, the read requests won't be able to lock
* the cache line anyways
*/
if (rw == OCF_WRITE)
return;

waiter = waiter_tmp;

/* WR -> Multiple RD */
list_for_each_entry_safe_from(waiter, waiter_tmp, &lst->head, item) {
if (entry != waiter->entry)
continue;

if (exchanged) {
if (waiter->rw == OCF_WRITE)
locked = ocf_alock_trylock_entry_wr2wr(alock, entry);
else if (waiter->rw == OCF_READ)
locked = ocf_alock_trylock_entry_wr2rd(alock, entry);
else
ENV_BUG();
} else {
if (waiter->rw == OCF_WRITE)
locked = ocf_alock_trylock_entry_wr(alock, entry);
else if (waiter->rw == OCF_READ)
locked = ocf_alock_trylock_entry_rd(alock, entry);
else
ENV_BUG();
}
ENV_BUG_ON(waiter->rw != OCF_READ && waiter->rw != OCF_WRITE);

if (locked) {
exchanged = false;
list_del(iter);
if (waiter->rw == OCF_WRITE)
return;

ocf_alock_mark_index_locked(alock, waiter->req, waiter->idx, true);
ocf_alock_entry_locked(alock, waiter->req, waiter->cmpl);
locked = ocf_alock_trylock_entry_rd(alock, entry);
/* There is no limit for number of readers */
ENV_BUG_ON(!locked);

env_allocator_del(alock->allocator, waiter);
} else {
break;
}
}
list_del(&waiter->item);

if (exchanged) {
/* No exchange, no waiters on the list, unlock and return
* WR -> IDLE
*/
ocf_alock_unlock_entry_wr(alock, entry);
ocf_alock_mark_index_locked(alock, waiter->req, waiter->idx, true);
ocf_alock_entry_locked(alock, waiter->req, waiter->cmpl);

env_allocator_del(alock->allocator, waiter);
}
}

Expand Down