@@ -175,7 +175,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
175
175
| MemorySemantics :: SEQUENTIALLY_CONSISTENT
176
176
}
177
177
} ;
178
- let semantics = self . constant_bit32 ( self . span ( ) , semantics. bits ( ) ) ;
178
+ let semantics = self . constant_u32 ( self . span ( ) , semantics. bits ( ) ) ;
179
179
if invalid_seq_cst {
180
180
self . zombie (
181
181
semantics. def ( self ) ,
@@ -196,10 +196,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
196
196
. constant_u16 ( self . span ( ) , memset_fill_u16 ( fill_byte) )
197
197
. def ( self ) ,
198
198
32 => self
199
- . constant_bit32 ( self . span ( ) , memset_fill_u32 ( fill_byte) )
199
+ . constant_u32 ( self . span ( ) , memset_fill_u32 ( fill_byte) )
200
200
. def ( self ) ,
201
201
64 => self
202
- . constant_bit64 ( self . span ( ) , memset_fill_u64 ( fill_byte) )
202
+ . constant_u64 ( self . span ( ) , memset_fill_u64 ( fill_byte) )
203
203
. def ( self ) ,
204
204
_ => self . fatal ( format ! (
205
205
"memset on integer width {width} not implemented yet"
@@ -314,7 +314,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
314
314
self . store ( pat, ptr, Align :: from_bytes ( 0 ) . unwrap ( ) ) ;
315
315
} else {
316
316
for index in 0 ..count {
317
- let const_index = self . constant_bit32 ( self . span ( ) , index as u32 ) ;
317
+ let const_index = self . constant_u32 ( self . span ( ) , index as u32 ) ;
318
318
let gep_ptr = self . gep ( pat. ty , ptr, & [ const_index] ) ;
319
319
self . store ( pat, gep_ptr, Align :: from_bytes ( 0 ) . unwrap ( ) ) ;
320
320
}
@@ -431,7 +431,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
431
431
} else {
432
432
let indices = indices
433
433
. into_iter ( )
434
- . map ( |idx| self . constant_bit32 ( self . span ( ) , idx) . def ( self ) )
434
+ . map ( |idx| self . constant_u32 ( self . span ( ) , idx) . def ( self ) )
435
435
. collect :: < Vec < _ > > ( ) ;
436
436
self . emit ( )
437
437
. in_bounds_access_chain ( leaf_ptr_ty, None , ptr. def ( self ) , indices)
@@ -614,7 +614,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
614
614
} ;
615
615
let indices = base_indices
616
616
. into_iter ( )
617
- . map ( |idx| self . constant_bit32 ( self . span ( ) , idx) . def ( self ) )
617
+ . map ( |idx| self . constant_u32 ( self . span ( ) , idx) . def ( self ) )
618
618
. chain ( indices)
619
619
. collect ( ) ;
620
620
return self . emit_access_chain (
@@ -1478,7 +1478,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> {
1478
1478
let ( ptr, access_ty) = self . adjust_pointer_for_typed_access ( ptr, ty) ;
1479
1479
1480
1480
// TODO: Default to device scope
1481
- let memory = self . constant_bit32 ( self . span ( ) , Scope :: Device as u32 ) ;
1481
+ let memory = self . constant_u32 ( self . span ( ) , Scope :: Device as u32 ) ;
1482
1482
let semantics = self . ordering_to_semantics_def ( order) ;
1483
1483
let result = self
1484
1484
. emit ( )
@@ -1611,7 +1611,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> {
1611
1611
let val = self . bitcast ( val, access_ty) ;
1612
1612
1613
1613
// TODO: Default to device scope
1614
- let memory = self . constant_bit32 ( self . span ( ) , Scope :: Device as u32 ) ;
1614
+ let memory = self . constant_u32 ( self . span ( ) , Scope :: Device as u32 ) ;
1615
1615
let semantics = self . ordering_to_semantics_def ( order) ;
1616
1616
self . validate_atomic ( val. ty , ptr. def ( self ) ) ;
1617
1617
self . emit ( )
@@ -1944,7 +1944,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> {
1944
1944
) {
1945
1945
let indices = indices
1946
1946
. into_iter ( )
1947
- . map ( |idx| self . constant_bit32 ( self . span ( ) , idx) . def ( self ) )
1947
+ . map ( |idx| self . constant_u32 ( self . span ( ) , idx) . def ( self ) )
1948
1948
. collect :: < Vec < _ > > ( ) ;
1949
1949
self . emit ( )
1950
1950
. in_bounds_access_chain ( dest_ty, None , ptr. def ( self ) , indices)
@@ -2495,7 +2495,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> {
2495
2495
2496
2496
self . validate_atomic ( access_ty, dst. def ( self ) ) ;
2497
2497
// TODO: Default to device scope
2498
- let memory = self . constant_bit32 ( self . span ( ) , Scope :: Device as u32 ) ;
2498
+ let memory = self . constant_u32 ( self . span ( ) , Scope :: Device as u32 ) ;
2499
2499
let semantics_equal = self . ordering_to_semantics_def ( order) ;
2500
2500
let semantics_unequal = self . ordering_to_semantics_def ( failure_order) ;
2501
2501
// Note: OpAtomicCompareExchangeWeak is deprecated, and has the same semantics
@@ -2535,7 +2535,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> {
2535
2535
self . validate_atomic ( access_ty, dst. def ( self ) ) ;
2536
2536
// TODO: Default to device scope
2537
2537
let memory = self
2538
- . constant_bit32 ( self . span ( ) , Scope :: Device as u32 )
2538
+ . constant_u32 ( self . span ( ) , Scope :: Device as u32 )
2539
2539
. def ( self ) ;
2540
2540
let semantics = self . ordering_to_semantics_def ( order) . def ( self ) ;
2541
2541
use AtomicRmwBinOp :: * ;
@@ -2631,7 +2631,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> {
2631
2631
// Ignore sync scope (it only has "single thread" and "cross thread")
2632
2632
// TODO: Default to device scope
2633
2633
let memory = self
2634
- . constant_bit32 ( self . span ( ) , Scope :: Device as u32 )
2634
+ . constant_u32 ( self . span ( ) , Scope :: Device as u32 )
2635
2635
. def ( self ) ;
2636
2636
let semantics = self . ordering_to_semantics_def ( order) . def ( self ) ;
2637
2637
self . emit ( ) . memory_barrier ( memory, semantics) . unwrap ( ) ;
0 commit comments