Skip to content

Commit 77616da

Browse files
committed
util: Add mmio helpers functions for riscv
Fix the issue where using intrinsics to generate atomic load/store instructions on RISC-V caused SEGFAULT when accessing MMIO regions, by adding a RISC-V specific implementation to resolve the problem. Signed-off-by: Zheng Zhang <[email protected]>
1 parent 7a04b9e commit 77616da

File tree

1 file changed

+95
-2
lines changed

1 file changed

+95
-2
lines changed

util/mmio.h

Lines changed: 95 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,53 @@ static inline uint8_t mmio_read8(const void *addr)
124124
return res;
125125
}
126126

127-
#else /* __s390x__ */
127+
#elif defined(__riscv)
128+
129+
#define MAKE_WRITE(_NAME_, _SZ_) \
130+
static inline void _NAME_##_be(void *addr, __be##_SZ_ val) \
131+
{ \
132+
__atomic_thread_fence(__ATOMIC_RELEASE); \
133+
*(volatile uint##_SZ_##_t *)addr = (uint##_SZ_##_t) val; \
134+
__atomic_thread_fence(__ATOMIC_SEQ_CST); \
135+
} \
136+
static inline void _NAME_##_le(void *addr, __le##_SZ_ val) \
137+
{ \
138+
__atomic_thread_fence(__ATOMIC_RELEASE); \
139+
*(volatile uint##_SZ_##_t *)addr = (uint##_SZ_##_t) val; \
140+
__atomic_thread_fence(__ATOMIC_SEQ_CST); \
141+
}
142+
143+
#define MAKE_READ(_NAME_, _SZ_) \
144+
static inline __be##_SZ_ _NAME_##_be(const void *addr) \
145+
{ \
146+
__atomic_thread_fence(__ATOMIC_RELEASE); \
147+
__be##_SZ_ val = *(const uint##_SZ_##_t *)addr; \
148+
__atomic_thread_fence(__ATOMIC_SEQ_CST); \
149+
return val; \
150+
} \
151+
static inline __le##_SZ_ _NAME_##_le(const void *addr) \
152+
{ \
153+
__atomic_thread_fence(__ATOMIC_RELEASE); \
154+
__le##_SZ_ val = *(const uint##_SZ_##_t *)addr; \
155+
__atomic_thread_fence(__ATOMIC_SEQ_CST); \
156+
return val; \
157+
}
158+
159+
static inline void mmio_write8(void *addr, uint8_t val)
160+
{
161+
__atomic_thread_fence(__ATOMIC_RELEASE);
162+
*(uint8_t *)addr = val;
163+
__atomic_thread_fence(__ATOMIC_SEQ_CST);
164+
}
165+
static inline uint8_t mmio_read8(const void *addr)
166+
{
167+
__atomic_thread_fence(__ATOMIC_RELEASE);
168+
uint8_t val = *(uint8_t *)addr;
169+
__atomic_thread_fence(__ATOMIC_SEQ_CST);
170+
return val;
171+
}
172+
173+
#else
128174

129175
#define MAKE_WRITE(_NAME_, _SZ_) \
130176
static inline void _NAME_##_be(void *addr, __be##_SZ_ value) \
@@ -161,7 +207,7 @@ static inline uint8_t mmio_read8(const void *addr)
161207
return atomic_load_explicit((_Atomic(uint8_t) *)addr,
162208
memory_order_relaxed);
163209
}
164-
#endif /* __s390x__ */
210+
#endif
165211

166212
MAKE_WRITE(mmio_write16, 16)
167213
MAKE_WRITE(mmio_write32, 32)
@@ -170,8 +216,10 @@ MAKE_READ(mmio_read16, 16)
170216
MAKE_READ(mmio_read32, 32)
171217

172218
#if SIZEOF_LONG == 8
219+
173220
MAKE_WRITE(mmio_write64, 64)
174221
MAKE_READ(mmio_read64, 64)
222+
175223
#else
176224
void mmio_write64_be(void *addr, __be64 val);
177225
static inline void mmio_write64_le(void *addr, __le64 val)
@@ -234,6 +282,51 @@ static inline void _mmio_memcpy_x64(void *dest, const void *src, size_t bytecnt)
234282
})
235283
#elif defined(__s390x__)
236284
void mmio_memcpy_x64(void *dst, const void *src, size_t bytecnt);
285+
#elif defined(__riscv)
286+
static inline void _mmio_memcpy_x64_64b(void *dest, const void *src)
287+
{
288+
#if defined(__riscv_vector)
289+
const uint64_t *s = (const uint64_t *)src;
290+
volatile uint64_t *d = (uint64_t *)dest;
291+
size_t n = 8;
292+
293+
while (n) {
294+
size_t vl = vsetvl_e64m1(n);
295+
vuint64m1_t v = vle64_v_u64m1(s, vl);
296+
vse64_v_u64m1(d, v, vl);
297+
s += vl;
298+
d += vl;
299+
n -= vl;
300+
}
301+
#else
302+
const uint64_t *s = (const uint64_t *)src;
303+
volatile uint64_t *d = (uint64_t *)dest;
304+
__atomic_thread_fence(__ATOMIC_RELEASE);
305+
for (int i = 0; i < 8; i++)
306+
d[i] = s[i];
307+
__atomic_thread_fence(__ATOMIC_SEQ_CST);
308+
#endif
309+
}
310+
311+
static inline void _mmio_memcpy_x64(void *dest, const void *src, size_t bytecnt)
312+
{
313+
const char *s = (const char *)src;
314+
char *d = (char *)dest;
315+
do {
316+
_mmio_memcpy_x64_64b(d, s);
317+
bytecnt -= 64;
318+
s += 64;
319+
d += 64;
320+
} while (bytecnt > 0);
321+
}
322+
323+
#define mmio_memcpy_x64(dest, src, bytecount) \
324+
({ \
325+
if (__builtin_constant_p((bytecount) == 64)) \
326+
_mmio_memcpy_x64_64b((dest), (src)); \
327+
else \
328+
_mmio_memcpy_x64((dest), (src), (bytecount)); \
329+
})
237330
#else
238331
/* Transfer is some multiple of 64 bytes */
239332
static inline void mmio_memcpy_x64(void *dest, const void *src, size_t bytecnt)

0 commit comments

Comments
 (0)