Skip to content

Commit 7afc475

Browse files
Merge branch 'trunk' into web_dependencies
2 parents 0c8a3e5 + 5573c57 commit 7afc475

File tree

7 files changed

+94
-23
lines changed

7 files changed

+94
-23
lines changed

naga/Cargo.toml

+1-7
Original file line numberDiff line numberDiff line change
@@ -58,13 +58,7 @@ arbitrary = [
5858
]
5959
spv-in = ["dep:petgraph", "dep:spirv"]
6060
spv-out = ["dep:spirv"]
61-
wgsl-in = [
62-
"dep:hexf-parse",
63-
"dep:strum",
64-
"dep:unicode-ident",
65-
"indexmap/std",
66-
"compact",
67-
]
61+
wgsl-in = ["dep:hexf-parse", "dep:strum", "dep:unicode-ident", "compact"]
6862
wgsl-out = []
6963

7064
## Enables outputting to HLSL (Microsoft's High-Level Shader Language).

naga/src/diagnostic_filter.rs

+3-3
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,12 @@ use alloc::boxed::Box;
44

55
use crate::{Arena, Handle};
66

7+
#[cfg(feature = "wgsl-in")]
8+
use crate::FastIndexMap;
79
#[cfg(feature = "wgsl-in")]
810
use crate::Span;
911
#[cfg(feature = "arbitrary")]
1012
use arbitrary::Arbitrary;
11-
#[cfg(feature = "wgsl-in")]
12-
use indexmap::IndexMap;
1313
#[cfg(feature = "deserialize")]
1414
use serde::Deserialize;
1515
#[cfg(feature = "serialize")]
@@ -133,7 +133,7 @@ pub(crate) enum ShouldConflictOnFullDuplicate {
133133
/// [`add`]: DiagnosticFilterMap::add
134134
#[derive(Clone, Debug, Default)]
135135
#[cfg(feature = "wgsl-in")]
136-
pub(crate) struct DiagnosticFilterMap(IndexMap<FilterableTriggeringRule, (Severity, Span)>);
136+
pub(crate) struct DiagnosticFilterMap(FastIndexMap<FilterableTriggeringRule, (Severity, Span)>);
137137

138138
#[cfg(feature = "wgsl-in")]
139139
impl DiagnosticFilterMap {

naga/src/front/wgsl/lower/mod.rs

+12-10
Original file line numberDiff line numberDiff line change
@@ -2504,15 +2504,16 @@ impl<'source, 'temp> Lowerer<'source, 'temp> {
25042504
}
25052505
"atomicLoad" => {
25062506
let mut args = ctx.prepare_args(arguments, 1, span);
2507-
let pointer = self.atomic_pointer(args.next()?, ctx)?;
2507+
let (pointer, _scalar) = self.atomic_pointer(args.next()?, ctx)?;
25082508
args.finish()?;
25092509

25102510
ir::Expression::Load { pointer }
25112511
}
25122512
"atomicStore" => {
25132513
let mut args = ctx.prepare_args(arguments, 2, span);
2514-
let pointer = self.atomic_pointer(args.next()?, ctx)?;
2515-
let value = self.expression(args.next()?, ctx)?;
2514+
let (pointer, scalar) = self.atomic_pointer(args.next()?, ctx)?;
2515+
let value =
2516+
self.expression_with_leaf_scalar(args.next()?, scalar, ctx)?;
25162517
args.finish()?;
25172518

25182519
let rctx = ctx.runtime_expression_ctx(span)?;
@@ -2526,13 +2527,14 @@ impl<'source, 'temp> Lowerer<'source, 'temp> {
25262527
"atomicCompareExchangeWeak" => {
25272528
let mut args = ctx.prepare_args(arguments, 3, span);
25282529

2529-
let pointer = self.atomic_pointer(args.next()?, ctx)?;
2530+
let (pointer, scalar) = self.atomic_pointer(args.next()?, ctx)?;
25302531

2531-
let compare = self.expression(args.next()?, ctx)?;
2532+
let compare =
2533+
self.expression_with_leaf_scalar(args.next()?, scalar, ctx)?;
25322534

25332535
let value = args.next()?;
25342536
let value_span = ctx.ast_expressions.get_span(value);
2535-
let value = self.expression(value, ctx)?;
2537+
let value = self.expression_with_leaf_scalar(value, scalar, ctx)?;
25362538

25372539
args.finish()?;
25382540

@@ -3200,13 +3202,13 @@ impl<'source, 'temp> Lowerer<'source, 'temp> {
32003202
&mut self,
32013203
expr: Handle<ast::Expression<'source>>,
32023204
ctx: &mut ExpressionContext<'source, '_, '_>,
3203-
) -> Result<'source, Handle<ir::Expression>> {
3205+
) -> Result<'source, (Handle<ir::Expression>, ir::Scalar)> {
32043206
let span = ctx.ast_expressions.get_span(expr);
32053207
let pointer = self.expression(expr, ctx)?;
32063208

32073209
match *resolve_inner!(ctx, pointer) {
32083210
ir::TypeInner::Pointer { base, .. } => match ctx.module.types[base].inner {
3209-
ir::TypeInner::Atomic { .. } => Ok(pointer),
3211+
ir::TypeInner::Atomic(scalar) => Ok((pointer, scalar)),
32103212
ref other => {
32113213
log::error!("Pointer type to {:?} passed to atomic op", other);
32123214
Err(Box::new(Error::InvalidAtomicPointer(span)))
@@ -3229,8 +3231,8 @@ impl<'source, 'temp> Lowerer<'source, 'temp> {
32293231
) -> Result<'source, Option<Handle<ir::Expression>>> {
32303232
let mut args = ctx.prepare_args(args, 2, span);
32313233

3232-
let pointer = self.atomic_pointer(args.next()?, ctx)?;
3233-
let value = self.expression(args.next()?, ctx)?;
3234+
let (pointer, scalar) = self.atomic_pointer(args.next()?, ctx)?;
3235+
let value = self.expression_with_leaf_scalar(args.next()?, scalar, ctx)?;
32343236
let value_inner = resolve_inner!(ctx, value);
32353237
args.finish()?;
32363238

Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
targets = "WGSL"
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
@group(0) @binding(0)
2+
var<storage,read_write> atomic_i32: atomic<i32>;
3+
@group(0) @binding(1)
4+
var<storage,read_write> atomic_u32: atomic<u32>;
5+
6+
fn test_atomic_i32() {
7+
atomicStore(&atomic_i32, 1);
8+
_ = atomicCompareExchangeWeak(&atomic_i32, 1, 1i);
9+
_ = atomicCompareExchangeWeak(&atomic_i32, 1i, 1);
10+
11+
_ = atomicAdd(&atomic_i32, 1);
12+
_ = atomicSub(&atomic_i32, 1);
13+
_ = atomicAnd(&atomic_i32, 1);
14+
_ = atomicXor(&atomic_i32, 1);
15+
_ = atomicOr(&atomic_i32, 1);
16+
_ = atomicMin(&atomic_i32, 1);
17+
_ = atomicMax(&atomic_i32, 1);
18+
_ = atomicExchange(&atomic_i32, 1);
19+
}
20+
21+
fn test_atomic_u32() {
22+
atomicStore(&atomic_u32, 1);
23+
_ = atomicCompareExchangeWeak(&atomic_u32, 1, 1u);
24+
_ = atomicCompareExchangeWeak(&atomic_u32, 1u, 1);
25+
26+
_ = atomicAdd(&atomic_u32, 1);
27+
_ = atomicSub(&atomic_u32, 1);
28+
_ = atomicAnd(&atomic_u32, 1);
29+
_ = atomicXor(&atomic_u32, 1);
30+
_ = atomicOr(&atomic_u32, 1);
31+
_ = atomicMin(&atomic_u32, 1);
32+
_ = atomicMax(&atomic_u32, 1);
33+
_ = atomicExchange(&atomic_u32, 1);
34+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
@group(0) @binding(0)
2+
var<storage, read_write> atomic_i32_: atomic<i32>;
3+
@group(0) @binding(1)
4+
var<storage, read_write> atomic_u32_: atomic<u32>;
5+
6+
fn test_atomic_i32_() {
7+
atomicStore((&atomic_i32_), 1i);
8+
let _e5 = atomicCompareExchangeWeak((&atomic_i32_), 1i, 1i);
9+
let _e9 = atomicCompareExchangeWeak((&atomic_i32_), 1i, 1i);
10+
let _e12 = atomicAdd((&atomic_i32_), 1i);
11+
let _e15 = atomicSub((&atomic_i32_), 1i);
12+
let _e18 = atomicAnd((&atomic_i32_), 1i);
13+
let _e21 = atomicXor((&atomic_i32_), 1i);
14+
let _e24 = atomicOr((&atomic_i32_), 1i);
15+
let _e27 = atomicMin((&atomic_i32_), 1i);
16+
let _e30 = atomicMax((&atomic_i32_), 1i);
17+
let _e33 = atomicExchange((&atomic_i32_), 1i);
18+
return;
19+
}
20+
21+
fn test_atomic_u32_() {
22+
atomicStore((&atomic_u32_), 1u);
23+
let _e5 = atomicCompareExchangeWeak((&atomic_u32_), 1u, 1u);
24+
let _e9 = atomicCompareExchangeWeak((&atomic_u32_), 1u, 1u);
25+
let _e12 = atomicAdd((&atomic_u32_), 1u);
26+
let _e15 = atomicSub((&atomic_u32_), 1u);
27+
let _e18 = atomicAnd((&atomic_u32_), 1u);
28+
let _e21 = atomicXor((&atomic_u32_), 1u);
29+
let _e24 = atomicOr((&atomic_u32_), 1u);
30+
let _e27 = atomicMin((&atomic_u32_), 1u);
31+
let _e30 = atomicMax((&atomic_u32_), 1u);
32+
let _e33 = atomicExchange((&atomic_u32_), 1u);
33+
return;
34+
}
35+

wgpu-core/src/device/queue.rs

+8-3
Original file line numberDiff line numberDiff line change
@@ -476,6 +476,8 @@ impl Queue {
476476
return Ok(());
477477
};
478478

479+
let snatch_guard = self.device.snatchable_lock.read();
480+
479481
// Platform validation requires that the staging buffer always be
480482
// freed, even if an error occurs. All paths from here must call
481483
// `device.pending_writes.consume`.
@@ -489,6 +491,7 @@ impl Queue {
489491
};
490492

491493
let result = self.write_staging_buffer_impl(
494+
&snatch_guard,
492495
&mut pending_writes,
493496
&staging_buffer,
494497
buffer,
@@ -522,6 +525,7 @@ impl Queue {
522525

523526
let buffer = buffer.get()?;
524527

528+
let snatch_guard = self.device.snatchable_lock.read();
525529
let mut pending_writes = self.pending_writes.lock();
526530

527531
// At this point, we have taken ownership of the staging_buffer from the
@@ -531,6 +535,7 @@ impl Queue {
531535
let staging_buffer = staging_buffer.flush();
532536

533537
let result = self.write_staging_buffer_impl(
538+
&snatch_guard,
534539
&mut pending_writes,
535540
&staging_buffer,
536541
buffer,
@@ -583,6 +588,7 @@ impl Queue {
583588

584589
fn write_staging_buffer_impl(
585590
&self,
591+
snatch_guard: &SnatchGuard,
586592
pending_writes: &mut PendingWrites,
587593
staging_buffer: &FlushedStagingBuffer,
588594
buffer: Arc<Buffer>,
@@ -595,8 +601,7 @@ impl Queue {
595601
.set_single(&buffer, wgt::BufferUses::COPY_DST)
596602
};
597603

598-
let snatch_guard = self.device.snatchable_lock.read();
599-
let dst_raw = buffer.try_raw(&snatch_guard)?;
604+
let dst_raw = buffer.try_raw(snatch_guard)?;
600605

601606
self.same_device_as(buffer.as_ref())?;
602607

@@ -614,7 +619,7 @@ impl Queue {
614619
to: wgt::BufferUses::COPY_SRC,
615620
},
616621
})
617-
.chain(transition.map(|pending| pending.into_hal(&buffer, &snatch_guard)))
622+
.chain(transition.map(|pending| pending.into_hal(&buffer, snatch_guard)))
618623
.collect::<Vec<_>>();
619624
let encoder = pending_writes.activate();
620625
unsafe {

0 commit comments

Comments
 (0)