Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 23 additions & 9 deletions chain/sync.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
"github.com/decred/dcrd/blockchain/stake/v5"
"github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrd/crypto/blake256"
"github.com/decred/dcrd/gcs/v4"
"github.com/decred/dcrd/mixing/mixpool"
"github.com/decred/dcrd/wire"
"github.com/jrick/wsrpc/v2"
Expand Down Expand Up @@ -223,6 +224,11 @@ func (s *Syncer) getHeaders(ctx context.Context) error {
return err
}

birthday, err := s.wallet.BirthState(ctx)
if err != nil {
return err
}

startedSynced := s.walletSynced.Load()

cnet := s.wallet.ChainParams().Net
Expand Down Expand Up @@ -253,17 +259,25 @@ func (s *Syncer) getHeaders(ctx context.Context) error {
var g errgroup.Group
for i := range headers {
g.Go(func() error {
var err error
header := headers[i]
hash := header.BlockHash()
filter, proofIndex, proof, err := s.rpc.CFilterV2(ctx, &hash)
if err != nil {
return err
}

err = validate.CFilterV2HeaderCommitment(cnet, header,
filter, proofIndex, proof)
if err != nil {
return err
var filter *gcs.FilterV2
if birthday == nil || birthday.AfterBirthday(header) {
var (
proofIndex uint32
proof []chainhash.Hash
)
filter, proofIndex, proof, err = s.rpc.CFilterV2(ctx, &hash)
if err != nil {
return err
}

err = validate.CFilterV2HeaderCommitment(cnet, header,
filter, proofIndex, proof)
if err != nil {
return err
}
}

nodes[i] = wallet.NewBlockNode(header, &hash, filter, nil)
Expand Down
15 changes: 12 additions & 3 deletions rpc/documentation/api.md
Original file line number Diff line number Diff line change
Expand Up @@ -1109,6 +1109,8 @@ ___
The `ImportPrivateKey` method imports a private key in Wallet Import Format
(WIF) encoding to a wallet account. A rescan may optionally be started to
search for transactions involving the private key's associated payment address.
If the private key deals with transactions before the wallet birthday, if set,
a rescan must be performed to download missing cfilters.

**Request:** `ImportPrivateKeyRequest`

Expand Down Expand Up @@ -1144,7 +1146,9 @@ ___

The `ImportScript` method imports a script into the wallet. A rescan may
optionally be started to search for transactions involving the script, either
as an output or in a P2SH input.
as an output or in a P2SH input. If the script deals with transactions before
the wallet birthday, if set, a rescan must be performed to download missing
cfilters.

**Request:** `ImportScriptRequest`

Expand Down Expand Up @@ -1191,7 +1195,9 @@ seed for a hierarchical deterministic private key that is imported into the
wallet with the supplied name and locked with the supplied password. Addresses
derived from this account MUST NOT be sent any funds. They are solely for the
use of creating stake submission scripts. A rescan may optionally be started to
search for tickets using submission scripts derived from this account.
search for tickets using submission scripts derived from this account. If tickets
would exist before the wallet birthday, if set, a rescan must be performed to
download missing cfilters.

**Request:** `ImportVotingAccountFromSeedRequest`

Expand Down Expand Up @@ -2690,7 +2696,10 @@ or account must be unlocked.
#### `BirthBlock`

The `BirthBlock` method returns the wallets birthday block if set. Rescans
should generally be started from after this block.
should generally be started from after this block. If a birthday is set cfilters
from before the birthday may not be downloaded. A rescan from height will move
the birthday to the rescan height and download all missing cfilters from that
height.

**Request:** `BirthBlockRequest`

Expand Down
13 changes: 10 additions & 3 deletions spv/sync.go
Original file line number Diff line number Diff line change
Expand Up @@ -1662,6 +1662,11 @@ func (s *Syncer) initialSyncHeaders(ctx context.Context) error {
return res
}

birthday, err := s.wallet.BirthState(ctx)
if err != nil {
return err
}

// Stage 1: fetch headers.
headersChan := make(chan *headersBatch)
g.Go(func() error {
Expand Down Expand Up @@ -1737,9 +1742,11 @@ func (s *Syncer) initialSyncHeaders(ctx context.Context) error {
s.sidechainMu.Lock()
var missingCfilter []*wallet.BlockNode
for i := range batch.bestChain {
if batch.bestChain[i].FilterV2 == nil {
missingCfilter = batch.bestChain[i:]
break
if birthday == nil || birthday.AfterBirthday(batch.bestChain[i].Header) {
if batch.bestChain[i].FilterV2 == nil {
missingCfilter = batch.bestChain[i:]
break
}
}
}
s.sidechainMu.Unlock()
Expand Down
65 changes: 64 additions & 1 deletion wallet/rescan.go
Original file line number Diff line number Diff line change
Expand Up @@ -386,8 +386,71 @@ func (w *Wallet) Rescan(ctx context.Context, n NetworkBackend, startHash *chainh
func (w *Wallet) RescanFromHeight(ctx context.Context, n NetworkBackend, startHeight int32) error {
const op errors.Op = "wallet.RescanFromHeight"

bs, err := w.BirthState(ctx)
if err != nil {
return errors.E(op, err)
}
// Determine if the rescan start height is before the birthday.
// For time-based birthdays that have not been resolved to a
// height, look up the block header and compare timestamps.
beforeBirthday := false
if bs != nil {
if bs.SetFromTime {
var header *wire.BlockHeader
err = walletdb.View(ctx, w.db, func(tx walletdb.ReadTx) error {
ns := tx.ReadBucket(wtxmgrNamespaceKey)
hash, err := w.txStore.GetMainChainBlockHashForHeight(ns, startHeight)
if err != nil {
return err
}
header, err = w.txStore.GetBlockHeader(tx, &hash)
return err
})
if err != nil {
return errors.E(op, err)
}
beforeBirthday = !bs.AfterBirthday(header)
} else {
beforeBirthday = int32(bs.Height) > startHeight
}
}
if beforeBirthday {
// If our birthday is after the rescan height, we may
// not have the cfilters needed. Set birthday to the rescan
// height and download the filters. This may take some time
// depending on network conditions and amount of filters missing.
newBS := &udb.BirthdayState{
SetFromHeight: true,
Height: uint32(startHeight),
}
if err := w.SetBirthStateAndScan(ctx, newBS); err != nil {
return errors.E(op, err)
}
fetchMissing := true
if err := walletdb.Update(ctx, w.db, func(dbtx walletdb.ReadWriteTx) error {
if _, err := udb.MissingCFiltersHeight(dbtx, startHeight); err != nil {
// errors.NotExist is returned if no missing filters
// exist from start height. If we have them there is
// no need to fetch them again.
if errors.Is(err, errors.NotExist) {
fetchMissing = false
return nil
}
return err
}
return w.txStore.SetHaveMainChainCFilters(dbtx, false)
}); err != nil {
return errors.E(op, err)
}
if fetchMissing {
if err := w.FetchMissingCFilters(ctx, n); err != nil {
return errors.E(op, err)
}
}
}

var startHash chainhash.Hash
err := walletdb.View(ctx, w.db, func(tx walletdb.ReadTx) error {
err = walletdb.View(ctx, w.db, func(tx walletdb.ReadTx) error {
txmgrNs := tx.ReadBucket(wtxmgrNamespaceKey)
var err error
startHash, err = w.txStore.GetMainChainBlockHashForHeight(
Expand Down
96 changes: 63 additions & 33 deletions wallet/udb/txmined.go
Original file line number Diff line number Diff line change
Expand Up @@ -198,8 +198,8 @@ func (s *Store) MainChainTip(dbtx walletdb.ReadTx) (chainhash.Hash, int32) {
// If the block is already inserted and part of the main chain, an errors.Exist
// error is returned.
//
// The main chain tip may not be extended unless compact filters have been saved
// for all existing main chain blocks.
// The main chain may be extended without cfilters if this block is before the
// wallet birthday. If the filter is nil it will not be saved to the database.
func (s *Store) ExtendMainChain(ns walletdb.ReadWriteBucket, header *wire.BlockHeader, blockHash *chainhash.Hash, f *gcs2.FilterV2) error {
height := int32(header.Height)
if height < 1 {
Expand Down Expand Up @@ -266,9 +266,12 @@ func (s *Store) ExtendMainChain(ns walletdb.ReadWriteBucket, header *wire.BlockH
return err
}

// Save the compact filter.
bcf2Key := blockcf2.Key(&header.MerkleRoot)
return putRawCFilter(ns, blockHash[:], valueRawCFilter2(bcf2Key, f.Bytes()))
// Save the compact filter if we have it.
if f != nil {
bcf2Key := blockcf2.Key(&header.MerkleRoot)
return putRawCFilter(ns, blockHash[:], valueRawCFilter2(bcf2Key, f.Bytes()))
}
return nil
}

// ProcessedTxsBlockMarker returns the hash of the block which records the last
Expand Down Expand Up @@ -331,6 +334,17 @@ type BirthdayState struct {
SetFromHeight, SetFromTime bool
}

// AfterBirthday returns whether the given block header is at or after the
// birthday. If SetFromTime is true, the header's timestamp is compared against
// the birthday time. Otherwise, the header's height is compared against the
// birthday height regardless of the SetFromHeight flag.
func (bs *BirthdayState) AfterBirthday(h *wire.BlockHeader) bool {
if bs.SetFromTime {
return !h.Timestamp.Before(bs.Time)
}
return h.Height >= bs.Height
}

// SetBirthState sets the birthday state in the database. *BirthdayState must
// not be nil.
//
Expand Down Expand Up @@ -402,19 +416,37 @@ func (s *Store) IsMissingMainChainCFilters(dbtx walletdb.ReadTx) bool {
return len(v) != 1 || v[0] == 0
}

// SetHaveMainChainCFilters sets whether we have all of the main chain
// cfilters. Should be used to set have to false if the wallet birthday is
// moved back in time.
func (s *Store) SetHaveMainChainCFilters(dbtx walletdb.ReadWriteTx, have bool) error {
haveB := []byte{0}
if have {
haveB = []byte{1}
}
err := dbtx.ReadWriteBucket(wtxmgrBucketKey).Put(rootHaveCFilters, haveB)
if err != nil {
return errors.E(errors.IO, err)
}
return nil
}

// MissingCFiltersHeight returns the first main chain block height
// with a missing cfilter. Errors with NotExist when all main chain
// blocks record cfilters.
func (s *Store) MissingCFiltersHeight(dbtx walletdb.ReadTx) (int32, error) {
func MissingCFiltersHeight(dbtx walletdb.ReadTx, fromHeight int32) (int32, error) {
ns := dbtx.ReadBucket(wtxmgrBucketKey)
c := ns.NestedReadBucket(bucketBlocks).ReadCursor()
defer c.Close()
for k, v := c.First(); k != nil; k, v = c.Next() {
for k, v := c.Seek(keyBlockRecord(fromHeight)); k != nil; k, v = c.Next() {
hash := extractRawBlockRecordHash(v)
_, _, err := fetchRawCFilter2(ns, hash)
if errors.Is(err, errors.NotExist) {
height := int32(byteOrder.Uint32(k))
return height, nil
if err != nil {
if errors.Is(err, errors.NotExist) {
height := int32(byteOrder.Uint32(k))
return height, nil
}
return 0, errors.E(errors.IO, err)
}
}
return 0, errors.E(errors.NotExist)
Expand Down Expand Up @@ -442,42 +474,40 @@ func (s *Store) InsertMissingCFilters(dbtx walletdb.ReadWriteTx, blockHashes []*
}

for i, blockHash := range blockHashes {
// Ensure that blockHashes are ordered and that all previous cfilters in the
// main chain are known.
// Ensure that blockHashes are ordered. The first block in
// the batch is not required to have its parent's cfilter
// already present, as pre-birthday blocks intentionally
// have no cfilters.
header := existsBlockHeader(ns, blockHash[:])
if header == nil {
return errors.E(errors.NotExist, errors.Errorf("missing header for block %v", blockHash))
}
ok := i == 0 && *blockHash == s.chainParams.GenesisHash
var bcf2Key [gcs2.KeySize]byte
if !ok {
header := existsBlockHeader(ns, blockHash[:])
if header == nil {
return errors.E(errors.NotExist, errors.Errorf("missing header for block %v", blockHash))
}
parentHash := extractBlockHeaderParentHash(header)
merkleRoot := extractBlockHeaderMerkleRoot(header)
merkleRootHash, err := chainhash.NewHash(merkleRoot)
if err != nil {
return errors.E(errors.Invalid, errors.Errorf("invalid stored header %v", blockHash))
}
bcf2Key = blockcf2.Key(merkleRootHash)
if i == 0 {
_, _, err := fetchRawCFilter2(ns, parentHash)
ok = err == nil
} else {
ok = bytes.Equal(parentHash, blockHashes[i-1][:])
if i != 0 {
if !bytes.Equal(parentHash, blockHashes[i-1][:]) {
return errors.E(errors.Invalid, "block hashes are not ordered")
}
}
}
if !ok {
return errors.E(errors.Invalid, "block hashes are not ordered or previous cfilters are missing")
}

// Record cfilter for this block
err := putRawCFilter(ns, blockHash[:], valueRawCFilter2(bcf2Key, filters[i].Bytes()))
merkleRoot := extractBlockHeaderMerkleRoot(header)
merkleRootHash, err := chainhash.NewHash(merkleRoot)
if err != nil {
return errors.E(errors.Invalid, errors.Errorf("invalid stored header %v", blockHash))
}
bcf2Key := blockcf2.Key(merkleRootHash)
err = putRawCFilter(ns, blockHash[:], valueRawCFilter2(bcf2Key, filters[i].Bytes()))
if err != nil {
return err
}
}

// Mark all main chain cfilters as saved if the last block hash is the main
// chain tip.
// chain tip. Even if this is not the head block, all cfilters may be saved
// at this point. The caller may need to check and set rootHaveCFilters.
tip, _ := s.MainChainTip(dbtx)
if bytes.Equal(tip[:], blockHashes[len(blockHashes)-1][:]) {
err := ns.Put(rootHaveCFilters, []byte{1})
Expand Down
Loading
Loading