@@ -78,7 +78,7 @@ use std::{
78
78
} ;
79
79
80
80
use rustc_const_eval:: interpret:: {
81
- alloc_range, AllocRange , InterpResult , MPlaceTy , ScalarMaybeUninit ,
81
+ alloc_range, AllocRange , InterpResult , MPlaceTy , Scalar ,
82
82
} ;
83
83
use rustc_data_structures:: fx:: FxHashMap ;
84
84
@@ -129,10 +129,10 @@ struct StoreElement {
129
129
/// The timestamp of the storing thread when it performed the store
130
130
timestamp : VTimestamp ,
131
131
/// The value of this store
132
- // FIXME: this means the store is either fully initialized or fully uninitialized ;
132
+ // FIXME: this means the store must be fully initialized;
133
133
// we will have to change this if we want to support atomics on
134
- // partially initialized data.
135
- val : ScalarMaybeUninit < Provenance > ,
134
+ // ( partially) uninitialized data.
135
+ val : Scalar < Provenance > ,
136
136
137
137
/// Timestamp of first loads from this store element by each thread
138
138
/// Behind a RefCell to keep load op take &self
@@ -179,7 +179,7 @@ impl StoreBufferAlloc {
179
179
fn get_or_create_store_buffer < ' tcx > (
180
180
& self ,
181
181
range : AllocRange ,
182
- init : ScalarMaybeUninit < Provenance > ,
182
+ init : Scalar < Provenance > ,
183
183
) -> InterpResult < ' tcx , Ref < ' _ , StoreBuffer > > {
184
184
let access_type = self . store_buffers . borrow ( ) . access_type ( range) ;
185
185
let pos = match access_type {
@@ -204,7 +204,7 @@ impl StoreBufferAlloc {
204
204
fn get_or_create_store_buffer_mut < ' tcx > (
205
205
& mut self ,
206
206
range : AllocRange ,
207
- init : ScalarMaybeUninit < Provenance > ,
207
+ init : Scalar < Provenance > ,
208
208
) -> InterpResult < ' tcx , & mut StoreBuffer > {
209
209
let buffers = self . store_buffers . get_mut ( ) ;
210
210
let access_type = buffers. access_type ( range) ;
@@ -225,7 +225,7 @@ impl StoreBufferAlloc {
225
225
}
226
226
227
227
impl < ' mir , ' tcx : ' mir > StoreBuffer {
228
- fn new ( init : ScalarMaybeUninit < Provenance > ) -> Self {
228
+ fn new ( init : Scalar < Provenance > ) -> Self {
229
229
let mut buffer = VecDeque :: new ( ) ;
230
230
buffer. reserve ( STORE_BUFFER_LIMIT ) ;
231
231
let mut ret = Self { buffer } ;
@@ -258,7 +258,7 @@ impl<'mir, 'tcx: 'mir> StoreBuffer {
258
258
is_seqcst : bool ,
259
259
rng : & mut ( impl rand:: Rng + ?Sized ) ,
260
260
validate : impl FnOnce ( ) -> InterpResult < ' tcx > ,
261
- ) -> InterpResult < ' tcx , ( ScalarMaybeUninit < Provenance > , LoadRecency ) > {
261
+ ) -> InterpResult < ' tcx , ( Scalar < Provenance > , LoadRecency ) > {
262
262
// Having a live borrow to store_buffer while calling validate_atomic_load is fine
263
263
// because the race detector doesn't touch store_buffer
264
264
@@ -283,7 +283,7 @@ impl<'mir, 'tcx: 'mir> StoreBuffer {
283
283
284
284
fn buffered_write (
285
285
& mut self ,
286
- val : ScalarMaybeUninit < Provenance > ,
286
+ val : Scalar < Provenance > ,
287
287
global : & DataRaceState ,
288
288
thread_mgr : & ThreadManager < ' _ , ' _ > ,
289
289
is_seqcst : bool ,
@@ -374,7 +374,7 @@ impl<'mir, 'tcx: 'mir> StoreBuffer {
374
374
/// ATOMIC STORE IMPL in the paper (except we don't need the location's vector clock)
375
375
fn store_impl (
376
376
& mut self ,
377
- val : ScalarMaybeUninit < Provenance > ,
377
+ val : Scalar < Provenance > ,
378
378
index : VectorIdx ,
379
379
thread_clock : & VClock ,
380
380
is_seqcst : bool ,
@@ -420,7 +420,7 @@ impl StoreElement {
420
420
& self ,
421
421
index : VectorIdx ,
422
422
clocks : & ThreadClockSet ,
423
- ) -> ScalarMaybeUninit < Provenance > {
423
+ ) -> Scalar < Provenance > {
424
424
let _ = self . loads . borrow_mut ( ) . try_insert ( index, clocks. clock [ index] ) ;
425
425
self . val
426
426
}
@@ -463,10 +463,10 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
463
463
464
464
fn buffered_atomic_rmw (
465
465
& mut self ,
466
- new_val : ScalarMaybeUninit < Provenance > ,
466
+ new_val : Scalar < Provenance > ,
467
467
place : & MPlaceTy < ' tcx , Provenance > ,
468
468
atomic : AtomicRwOrd ,
469
- init : ScalarMaybeUninit < Provenance > ,
469
+ init : Scalar < Provenance > ,
470
470
) -> InterpResult < ' tcx > {
471
471
let this = self . eval_context_mut ( ) ;
472
472
let ( alloc_id, base_offset, ..) = this. ptr_get_alloc_id ( place. ptr ) ?;
@@ -491,9 +491,9 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
491
491
& self ,
492
492
place : & MPlaceTy < ' tcx , Provenance > ,
493
493
atomic : AtomicReadOrd ,
494
- latest_in_mo : ScalarMaybeUninit < Provenance > ,
494
+ latest_in_mo : Scalar < Provenance > ,
495
495
validate : impl FnOnce ( ) -> InterpResult < ' tcx > ,
496
- ) -> InterpResult < ' tcx , ScalarMaybeUninit < Provenance > > {
496
+ ) -> InterpResult < ' tcx , Scalar < Provenance > > {
497
497
let this = self . eval_context_ref ( ) ;
498
498
if let Some ( global) = & this. machine . data_race {
499
499
let ( alloc_id, base_offset, ..) = this. ptr_get_alloc_id ( place. ptr ) ?;
@@ -528,10 +528,10 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
528
528
529
529
fn buffered_atomic_write (
530
530
& mut self ,
531
- val : ScalarMaybeUninit < Provenance > ,
531
+ val : Scalar < Provenance > ,
532
532
dest : & MPlaceTy < ' tcx , Provenance > ,
533
533
atomic : AtomicWriteOrd ,
534
- init : ScalarMaybeUninit < Provenance > ,
534
+ init : Scalar < Provenance > ,
535
535
) -> InterpResult < ' tcx > {
536
536
let this = self . eval_context_mut ( ) ;
537
537
let ( alloc_id, base_offset, ..) = this. ptr_get_alloc_id ( dest. ptr ) ?;
@@ -575,7 +575,7 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
575
575
& self ,
576
576
place : & MPlaceTy < ' tcx , Provenance > ,
577
577
atomic : AtomicReadOrd ,
578
- init : ScalarMaybeUninit < Provenance > ,
578
+ init : Scalar < Provenance > ,
579
579
) -> InterpResult < ' tcx > {
580
580
let this = self . eval_context_ref ( ) ;
581
581
0 commit comments