From 65173de0da234ad9d8ac0e41717d2ea90800aa1a Mon Sep 17 00:00:00 2001 From: ziggie Date: Sat, 10 Jan 2026 23:38:25 +0100 Subject: [PATCH 01/10] paymentsdb+sqldb: add migration related query Add a migration specific query which allows to set the failure reason when inserting a payment into the db. --- payments/db/sql_store.go | 12 +++++ sqldb/sqlc/payments.sql.go | 94 +++++++++++++++++++++++++++++++++ sqldb/sqlc/querier.go | 5 ++ sqldb/sqlc/queries/payments.sql | 26 +++++++++ 4 files changed, 137 insertions(+) diff --git a/payments/db/sql_store.go b/payments/db/sql_store.go index 1c6e3042a93..caad4013408 100644 --- a/payments/db/sql_store.go +++ b/payments/db/sql_store.go @@ -86,6 +86,18 @@ type SQLQueries interface { // DeleteFailedAttempts removes all failed HTLCs from the db for a // given payment. DeleteFailedAttempts(ctx context.Context, paymentID int64) error + + /* + Migration specific queries. + + These queries are used ONLY for the one-time migration from KV + to SQL. + */ + + // InsertPaymentMig is a migration-only variant of InsertPayment that + // allows setting fail_reason when inserting historical payments. + InsertPaymentMig(ctx context.Context, arg sqlc.InsertPaymentMigParams) (int64, error) + } // BatchedSQLQueries is a version of the SQLQueries that's capable diff --git a/sqldb/sqlc/payments.sql.go b/sqldb/sqlc/payments.sql.go index b9ec3149932..764e96fe254 100644 --- a/sqldb/sqlc/payments.sql.go +++ b/sqldb/sqlc/payments.sql.go @@ -843,6 +843,55 @@ func (q *Queries) InsertPaymentAttemptFirstHopCustomRecord(ctx context.Context, return err } +const insertPaymentDuplicateMig = `-- name: InsertPaymentDuplicateMig :one +INSERT INTO payment_duplicates ( + payment_id, + payment_identifier, + amount_msat, + created_at, + fail_reason, + settle_preimage, + settle_time +) +VALUES ( + $1, + $2, + $3, + $4, + $5, + $6, + $7 +) +RETURNING id +` + +type InsertPaymentDuplicateMigParams struct { + PaymentID int64 + PaymentIdentifier []byte + AmountMsat int64 + CreatedAt time.Time + FailReason sql.NullInt32 + SettlePreimage []byte + SettleTime sql.NullTime +} + +// Insert a duplicate payment record into the payment_duplicates table and +// return its ID. +func (q *Queries) InsertPaymentDuplicateMig(ctx context.Context, arg InsertPaymentDuplicateMigParams) (int64, error) { + row := q.db.QueryRowContext(ctx, insertPaymentDuplicateMig, + arg.PaymentID, + arg.PaymentIdentifier, + arg.AmountMsat, + arg.CreatedAt, + arg.FailReason, + arg.SettlePreimage, + arg.SettleTime, + ) + var id int64 + err := row.Scan(&id) + return id, err +} + const insertPaymentFirstHopCustomRecord = `-- name: InsertPaymentFirstHopCustomRecord :exec INSERT INTO payment_first_hop_custom_records ( payment_id, @@ -918,6 +967,51 @@ func (q *Queries) InsertPaymentIntent(ctx context.Context, arg InsertPaymentInte return id, err } +const insertPaymentMig = `-- name: InsertPaymentMig :one +/* ───────────────────────────────────────────── + Migration-specific queries + + These queries are used ONLY for the one-time migration from KV to SQL. + ───────────────────────────────────────────── +*/ + +INSERT INTO payments ( + amount_msat, + created_at, + payment_identifier, + fail_reason) +VALUES ( + $1, + $2, + $3, + $4 +) +RETURNING id +` + +type InsertPaymentMigParams struct { + AmountMsat int64 + CreatedAt time.Time + PaymentIdentifier []byte + FailReason sql.NullInt32 +} + +// Migration-specific payment insert that allows setting fail_reason. +// Normal InsertPayment forces fail_reason to NULL since new payments +// aren't failed yet. During migration, we're inserting historical data +// that may already be failed. +func (q *Queries) InsertPaymentMig(ctx context.Context, arg InsertPaymentMigParams) (int64, error) { + row := q.db.QueryRowContext(ctx, insertPaymentMig, + arg.AmountMsat, + arg.CreatedAt, + arg.PaymentIdentifier, + arg.FailReason, + ) + var id int64 + err := row.Scan(&id) + return id, err +} + const insertRouteHop = `-- name: InsertRouteHop :one INSERT INTO payment_route_hops ( htlc_attempt_index, diff --git a/sqldb/sqlc/querier.go b/sqldb/sqlc/querier.go index 4ea29ed58e1..08d1515f49f 100644 --- a/sqldb/sqlc/querier.go +++ b/sqldb/sqlc/querier.go @@ -149,6 +149,11 @@ type Querier interface { InsertPaymentHopCustomRecord(ctx context.Context, arg InsertPaymentHopCustomRecordParams) error // Insert a payment intent for a given payment and return its ID. InsertPaymentIntent(ctx context.Context, arg InsertPaymentIntentParams) (int64, error) + // Migration-specific payment insert that allows setting fail_reason. + // Normal InsertPayment forces fail_reason to NULL since new payments + // aren't failed yet. During migration, we're inserting historical data + // that may already be failed. + InsertPaymentMig(ctx context.Context, arg InsertPaymentMigParams) (int64, error) InsertRouteHop(ctx context.Context, arg InsertRouteHopParams) (int64, error) InsertRouteHopAmp(ctx context.Context, arg InsertRouteHopAmpParams) error InsertRouteHopBlinded(ctx context.Context, arg InsertRouteHopBlindedParams) error diff --git a/sqldb/sqlc/queries/payments.sql b/sqldb/sqlc/queries/payments.sql index 419f7bf1aca..55345edb419 100644 --- a/sqldb/sqlc/queries/payments.sql +++ b/sqldb/sqlc/queries/payments.sql @@ -368,3 +368,29 @@ VALUES ( -- name: FailPayment :execresult UPDATE payments SET fail_reason = $1 WHERE payment_identifier = $2; + +/* ───────────────────────────────────────────── + Migration-specific queries + + These queries are used ONLY for the one-time migration from KV to SQL. + ───────────────────────────────────────────── +*/ + +-- name: InsertPaymentMig :one +-- Migration-specific payment insert that allows setting fail_reason. +-- Normal InsertPayment forces fail_reason to NULL since new payments +-- aren't failed yet. During migration, we're inserting historical data +-- that may already be failed. +INSERT INTO payments ( + amount_msat, + created_at, + payment_identifier, + fail_reason) +VALUES ( + @amount_msat, + @created_at, + @payment_identifier, + @fail_reason +) +RETURNING id; + From 0a6f10c24997be2d46e7b3baa2a0ef73ae91086a Mon Sep 17 00:00:00 2001 From: ziggie Date: Sat, 10 Jan 2026 23:46:17 +0100 Subject: [PATCH 02/10] sqldb+payments: add payment_duplicates for legacy duplicate payments MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Older LND versions could create multiple payments for the same hash. We need to preserve those historical records during KV→SQL migration, but they don’t fit the normal payment schema because we enforce a unique payment hash constraint. Introduce a lean payment_duplicates table to store only the essential fields (identifier, amount, timestamps, settle/fail data). This keeps the primary payment records stable and makes the migration deterministic even when duplicate records lack attempt info. The table is intentionally minimal and can be dropped after migration if no duplicate payments exist. For now there is no logic in place which allows the noderunner to fetch duplicate payments after the migration. --- payments/db/sql_store.go | 5 ++ .../000010_payment_duplicates.down.sql | 2 + .../000010_payment_duplicates.up.sql | 47 ++++++++++++++++++ sqldb/sqlc/models.go | 11 +++++ sqldb/sqlc/payments.sql.go | 49 +++++++++++++++++++ sqldb/sqlc/querier.go | 6 +++ sqldb/sqlc/queries/payments.sql | 38 ++++++++++++++ 7 files changed, 158 insertions(+) create mode 100644 sqldb/sqlc/migrations/000010_payment_duplicates.down.sql create mode 100644 sqldb/sqlc/migrations/000010_payment_duplicates.up.sql diff --git a/payments/db/sql_store.go b/payments/db/sql_store.go index caad4013408..27e9d3568bf 100644 --- a/payments/db/sql_store.go +++ b/payments/db/sql_store.go @@ -56,6 +56,8 @@ type SQLQueries interface { FetchAllInflightAttempts(ctx context.Context, arg sqlc.FetchAllInflightAttemptsParams) ([]sqlc.PaymentHtlcAttempt, error) FetchHopsForAttempts(ctx context.Context, htlcAttemptIndices []int64) ([]sqlc.FetchHopsForAttemptsRow, error) + FetchPaymentDuplicates(ctx context.Context, paymentID int64) ([]sqlc.PaymentDuplicate, error) + FetchPaymentLevelFirstHopCustomRecords(ctx context.Context, paymentIDs []int64) ([]sqlc.PaymentFirstHopCustomRecord, error) FetchRouteLevelFirstHopCustomRecords(ctx context.Context, htlcAttemptIndices []int64) ([]sqlc.PaymentAttemptFirstHopCustomRecord, error) FetchHopLevelCustomRecords(ctx context.Context, hopIDs []int64) ([]sqlc.PaymentHopCustomRecord, error) @@ -98,6 +100,9 @@ type SQLQueries interface { // allows setting fail_reason when inserting historical payments. InsertPaymentMig(ctx context.Context, arg sqlc.InsertPaymentMigParams) (int64, error) + // InsertPaymentDuplicateMig inserts a duplicate payment record during + // migration. + InsertPaymentDuplicateMig(ctx context.Context, arg sqlc.InsertPaymentDuplicateMigParams) (int64, error) } // BatchedSQLQueries is a version of the SQLQueries that's capable diff --git a/sqldb/sqlc/migrations/000010_payment_duplicates.down.sql b/sqldb/sqlc/migrations/000010_payment_duplicates.down.sql new file mode 100644 index 00000000000..39c4a313a46 --- /dev/null +++ b/sqldb/sqlc/migrations/000010_payment_duplicates.down.sql @@ -0,0 +1,2 @@ +DROP INDEX IF EXISTS idx_payment_duplicates_payment_id; +DROP TABLE IF EXISTS payment_duplicates; diff --git a/sqldb/sqlc/migrations/000010_payment_duplicates.up.sql b/sqldb/sqlc/migrations/000010_payment_duplicates.up.sql new file mode 100644 index 00000000000..c661f5344b3 --- /dev/null +++ b/sqldb/sqlc/migrations/000010_payment_duplicates.up.sql @@ -0,0 +1,47 @@ +-- ───────────────────────────────────────────── +-- Payment Duplicate Records Table +-- ───────────────────────────────────────────── +-- Stores duplicate payment records that were created in older versions +-- of lnd. This table is intentionally minimal and is expected to be dropped +-- in the future especially if no duplicates were migrated. +-- ───────────────────────────────────────────── + +CREATE TABLE IF NOT EXISTS payment_duplicates ( + -- Primary key for the duplicate record. + id INTEGER PRIMARY KEY, + + -- Reference to the primary payment this duplicate belongs to. + payment_id BIGINT NOT NULL REFERENCES payments (id) ON DELETE CASCADE, + + -- Logical identifier for the duplicate payment. This is the payment hash + -- of the duplicate payment. + payment_identifier BLOB NOT NULL, + + -- Amount of the duplicate payment in millisatoshis. + amount_msat BIGINT NOT NULL, + + -- Timestamp when the duplicate payment was created. + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + + -- Failure reason for failed payments (if known). + fail_reason INTEGER, + + -- Settlement payload for succeeded payments (if known). + settle_preimage BLOB, + + -- Settlement time for succeeded payments (if known). + settle_time TIMESTAMP, + + -- Ensure we record either a failure reason or settlement data. + -- During the migration if we encounter a duplicate payment that has no + -- failure reason or settlement data, we will mark it as failed. Duplicate + -- payments were a bug in older versions of LND, so we can be sure if a + -- duplicate payment has no failure reason or settlement data, the + -- corresponding HTLC has been failed. + CONSTRAINT chk_payment_duplicates_outcome + CHECK (fail_reason IS NOT NULL OR settle_preimage IS NOT NULL) +); + +-- Index for efficient lookup by primary payment. +CREATE INDEX IF NOT EXISTS idx_payment_duplicates_payment_id +ON payment_duplicates(payment_id); diff --git a/sqldb/sqlc/models.go b/sqldb/sqlc/models.go index 97df2d6f6c5..6fa20d4a4ae 100644 --- a/sqldb/sqlc/models.go +++ b/sqldb/sqlc/models.go @@ -218,6 +218,17 @@ type PaymentAttemptFirstHopCustomRecord struct { Value []byte } +type PaymentDuplicate struct { + ID int64 + PaymentID int64 + PaymentIdentifier []byte + AmountMsat int64 + CreatedAt time.Time + FailReason sql.NullInt32 + SettlePreimage []byte + SettleTime sql.NullTime +} + type PaymentFirstHopCustomRecord struct { ID int64 PaymentID int64 diff --git a/sqldb/sqlc/payments.sql.go b/sqldb/sqlc/payments.sql.go index 764e96fe254..f658654ae93 100644 --- a/sqldb/sqlc/payments.sql.go +++ b/sqldb/sqlc/payments.sql.go @@ -479,6 +479,55 @@ func (q *Queries) FetchPayment(ctx context.Context, paymentIdentifier []byte) (F return i, err } +const fetchPaymentDuplicates = `-- name: FetchPaymentDuplicates :many +SELECT + id, + payment_id, + payment_identifier, + amount_msat, + created_at, + fail_reason, + settle_preimage, + settle_time +FROM payment_duplicates +WHERE payment_id = $1 +ORDER BY id ASC +` + +// Fetch all duplicate payment records from the payment_duplicates table for +// a given payment ID. +func (q *Queries) FetchPaymentDuplicates(ctx context.Context, paymentID int64) ([]PaymentDuplicate, error) { + rows, err := q.db.QueryContext(ctx, fetchPaymentDuplicates, paymentID) + if err != nil { + return nil, err + } + defer rows.Close() + var items []PaymentDuplicate + for rows.Next() { + var i PaymentDuplicate + if err := rows.Scan( + &i.ID, + &i.PaymentID, + &i.PaymentIdentifier, + &i.AmountMsat, + &i.CreatedAt, + &i.FailReason, + &i.SettlePreimage, + &i.SettleTime, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const fetchPaymentLevelFirstHopCustomRecords = `-- name: FetchPaymentLevelFirstHopCustomRecords :many SELECT l.id, diff --git a/sqldb/sqlc/querier.go b/sqldb/sqlc/querier.go index 08d1515f49f..cbcec79d57e 100644 --- a/sqldb/sqlc/querier.go +++ b/sqldb/sqlc/querier.go @@ -48,6 +48,9 @@ type Querier interface { FetchHtlcAttemptResolutionsForPayments(ctx context.Context, paymentIds []int64) ([]FetchHtlcAttemptResolutionsForPaymentsRow, error) FetchHtlcAttemptsForPayments(ctx context.Context, paymentIds []int64) ([]FetchHtlcAttemptsForPaymentsRow, error) FetchPayment(ctx context.Context, paymentIdentifier []byte) (FetchPaymentRow, error) + // Fetch all duplicate payment records from the payment_duplicates table for + // a given payment ID. + FetchPaymentDuplicates(ctx context.Context, paymentID int64) ([]PaymentDuplicate, error) FetchPaymentLevelFirstHopCustomRecords(ctx context.Context, paymentIds []int64) ([]PaymentFirstHopCustomRecord, error) // Batch fetch payment and intent data for a set of payment IDs. // Used to avoid fetching redundant payment data when processing multiple @@ -145,6 +148,9 @@ type Querier interface { // payment process. InsertPayment(ctx context.Context, arg InsertPaymentParams) (int64, error) InsertPaymentAttemptFirstHopCustomRecord(ctx context.Context, arg InsertPaymentAttemptFirstHopCustomRecordParams) error + // Insert a duplicate payment record into the payment_duplicates table and + // return its ID. + InsertPaymentDuplicateMig(ctx context.Context, arg InsertPaymentDuplicateMigParams) (int64, error) InsertPaymentFirstHopCustomRecord(ctx context.Context, arg InsertPaymentFirstHopCustomRecordParams) error InsertPaymentHopCustomRecord(ctx context.Context, arg InsertPaymentHopCustomRecordParams) error // Insert a payment intent for a given payment and return its ID. diff --git a/sqldb/sqlc/queries/payments.sql b/sqldb/sqlc/queries/payments.sql index 55345edb419..ee3266522aa 100644 --- a/sqldb/sqlc/queries/payments.sql +++ b/sqldb/sqlc/queries/payments.sql @@ -40,6 +40,22 @@ FROM payments p LEFT JOIN payment_intents i ON i.payment_id = p.id WHERE p.payment_identifier = $1; +-- name: FetchPaymentDuplicates :many +-- Fetch all duplicate payment records from the payment_duplicates table for +-- a given payment ID. +SELECT + id, + payment_id, + payment_identifier, + amount_msat, + created_at, + fail_reason, + settle_preimage, + settle_time +FROM payment_duplicates +WHERE payment_id = $1 +ORDER BY id ASC; + -- name: CountPayments :one SELECT COUNT(*) FROM payments; @@ -394,3 +410,25 @@ VALUES ( ) RETURNING id; +-- name: InsertPaymentDuplicateMig :one +-- Insert a duplicate payment record into the payment_duplicates table and +-- return its ID. +INSERT INTO payment_duplicates ( + payment_id, + payment_identifier, + amount_msat, + created_at, + fail_reason, + settle_preimage, + settle_time +) +VALUES ( + @payment_id, + @payment_identifier, + @amount_msat, + @created_at, + @fail_reason, + @settle_preimage, + @settle_time +) +RETURNING id; From 7fcf8f32813cfa7db2f64af730e2ccca6ce34732 Mon Sep 17 00:00:00 2001 From: ziggie Date: Sat, 10 Jan 2026 23:58:49 +0100 Subject: [PATCH 03/10] payments/migration1: freeze core payment code Copy the core payments/db code into payments/db/migration1 and add the required sqlc-generated types/queries from sqldb/sqlc. This effectively freezes the migration code so it stays robust against future query or schema changes in the main payments package. --- payments/db/migration1/codec.go | 141 ++ payments/db/migration1/errors.go | 149 ++ payments/db/migration1/interface.go | 140 ++ .../db/migration1/kv_duplicate_payments.go | 250 ++ payments/db/migration1/kv_store.go | 2121 +++++++++++++++++ payments/db/migration1/log.go | 32 + payments/db/migration1/options.go | 26 + payments/db/migration1/payment.go | 836 +++++++ payments/db/migration1/payment_status.go | 257 ++ payments/db/migration1/query.go | 75 + payments/db/migration1/sql_converters.go | 275 +++ payments/db/migration1/sql_store.go | 1972 +++++++++++++++ payments/db/migration1/sqlc/db.go | 31 + payments/db/migration1/sqlc/db_custom.go | 123 + payments/db/migration1/sqlc/models.go | 111 + payments/db/migration1/sqlc/payments.sql.go | 1229 ++++++++++ 16 files changed, 7768 insertions(+) create mode 100644 payments/db/migration1/codec.go create mode 100644 payments/db/migration1/errors.go create mode 100644 payments/db/migration1/interface.go create mode 100644 payments/db/migration1/kv_duplicate_payments.go create mode 100644 payments/db/migration1/kv_store.go create mode 100644 payments/db/migration1/log.go create mode 100644 payments/db/migration1/options.go create mode 100644 payments/db/migration1/payment.go create mode 100644 payments/db/migration1/payment_status.go create mode 100644 payments/db/migration1/query.go create mode 100644 payments/db/migration1/sql_converters.go create mode 100644 payments/db/migration1/sql_store.go create mode 100644 payments/db/migration1/sqlc/db.go create mode 100644 payments/db/migration1/sqlc/db_custom.go create mode 100644 payments/db/migration1/sqlc/models.go create mode 100644 payments/db/migration1/sqlc/payments.sql.go diff --git a/payments/db/migration1/codec.go b/payments/db/migration1/codec.go new file mode 100644 index 00000000000..926fb66d181 --- /dev/null +++ b/payments/db/migration1/codec.go @@ -0,0 +1,141 @@ +package migration1 + +import ( + "encoding/binary" + "errors" + "io" + "time" + + "github.com/lightningnetwork/lnd/channeldb" +) + +// Big endian is the preferred byte order, due to cursor scans over +// integer keys iterating in order. +var byteOrder = binary.BigEndian + +// UnknownElementType is an alias for channeldb.UnknownElementType. +type UnknownElementType = channeldb.UnknownElementType + +// ReadElement deserializes a single element from the provided io.Reader. +func ReadElement(r io.Reader, element interface{}) error { + err := channeldb.ReadElement(r, element) + switch { + // Known to channeldb codec. + case err == nil: + return nil + + // Fail if error is not UnknownElementType. + default: + var unknownElementType UnknownElementType + if !errors.As(err, &unknownElementType) { + return err + } + } + + // Process any paymentsdb-specific extensions to the codec. + switch e := element.(type) { + case *paymentIndexType: + if err := binary.Read(r, byteOrder, e); err != nil { + return err + } + + // Type is still unknown to paymentsdb extensions, fail. + default: + return channeldb.NewUnknownElementType( + "ReadElement", element, + ) + } + + return nil +} + +// WriteElement serializes a single element into the provided io.Writer. +func WriteElement(w io.Writer, element interface{}) error { + err := channeldb.WriteElement(w, element) + switch { + // Known to channeldb codec. + case err == nil: + return nil + + // Fail if error is not UnknownElementType. + default: + var unknownElementType UnknownElementType + if !errors.As(err, &unknownElementType) { + return err + } + } + + // Process any paymentsdb-specific extensions to the codec. + switch e := element.(type) { + case paymentIndexType: + if err := binary.Write(w, byteOrder, e); err != nil { + return err + } + + // Type is still unknown to paymentsdb extensions, fail. + default: + return channeldb.NewUnknownElementType( + "WriteElement", element, + ) + } + + return nil +} + +// WriteElements serializes a variadic list of elements into the given +// io.Writer. +func WriteElements(w io.Writer, elements ...interface{}) error { + for _, element := range elements { + if err := WriteElement(w, element); err != nil { + return err + } + } + + return nil +} + +// ReadElements deserializes the provided io.Reader into a variadic list of +// target elements. +func ReadElements(r io.Reader, elements ...interface{}) error { + for _, element := range elements { + if err := ReadElement(r, element); err != nil { + return err + } + } + + return nil +} + +// deserializeTime deserializes time as unix nanoseconds. +func deserializeTime(r io.Reader) (time.Time, error) { + var scratch [8]byte + if _, err := io.ReadFull(r, scratch[:]); err != nil { + return time.Time{}, err + } + + // Convert to time.Time. Interpret unix nano time zero as a zero + // time.Time value. + unixNano := byteOrder.Uint64(scratch[:]) + if unixNano == 0 { + return time.Time{}, nil + } + + return time.Unix(0, int64(unixNano)), nil +} + +// serializeTime serializes time as unix nanoseconds. +func serializeTime(w io.Writer, t time.Time) error { + var scratch [8]byte + + // Convert to unix nano seconds, but only if time is non-zero. Calling + // UnixNano() on a zero time yields an undefined result. + var unixNano int64 + if !t.IsZero() { + unixNano = t.UnixNano() + } + + byteOrder.PutUint64(scratch[:], uint64(unixNano)) + _, err := w.Write(scratch[:]) + + return err +} diff --git a/payments/db/migration1/errors.go b/payments/db/migration1/errors.go new file mode 100644 index 00000000000..44c3981e84b --- /dev/null +++ b/payments/db/migration1/errors.go @@ -0,0 +1,149 @@ +package migration1 + +import "errors" + +var ( + // ErrAlreadyPaid signals we have already paid this payment hash. + ErrAlreadyPaid = errors.New("invoice is already paid") + + // ErrPaymentInFlight signals that payment for this payment hash is + // already "in flight" on the network. + ErrPaymentInFlight = errors.New("payment is in transition") + + // ErrPaymentExists is returned when we try to initialize an already + // existing payment that is not failed. + ErrPaymentExists = errors.New("payment already exists") + + // ErrPaymentInternal is returned when performing the payment has a + // conflicting state, such as, + // - payment has StatusSucceeded but remaining amount is not zero. + // - payment has StatusInitiated but remaining amount is zero. + // - payment has StatusFailed but remaining amount is zero. + ErrPaymentInternal = errors.New("internal error") + + // ErrPaymentNotInitiated is returned if the payment wasn't initiated. + ErrPaymentNotInitiated = errors.New("payment isn't initiated") + + // ErrPaymentAlreadySucceeded is returned in the event we attempt to + // change the status of a payment already succeeded. + ErrPaymentAlreadySucceeded = errors.New("payment is already succeeded") + + // ErrPaymentAlreadyFailed is returned in the event we attempt to alter + // a failed payment. + ErrPaymentAlreadyFailed = errors.New("payment has already failed") + + // ErrUnknownPaymentStatus is returned when we do not recognize the + // existing state of a payment. + ErrUnknownPaymentStatus = errors.New("unknown payment status") + + // ErrPaymentTerminal is returned if we attempt to alter a payment that + // already has reached a terminal condition. + ErrPaymentTerminal = errors.New("payment has reached terminal " + + "condition") + + // ErrAttemptAlreadySettled is returned if we try to alter an already + // settled HTLC attempt. + ErrAttemptAlreadySettled = errors.New("attempt already settled") + + // ErrAttemptAlreadyFailed is returned if we try to alter an already + // failed HTLC attempt. + ErrAttemptAlreadyFailed = errors.New("attempt already failed") + + // ErrValueMismatch is returned if we try to register a non-MPP attempt + // with an amount that doesn't match the payment amount. + ErrValueMismatch = errors.New("attempted value doesn't match payment " + + "amount") + + // ErrValueExceedsAmt is returned if we try to register an attempt that + // would take the total sent amount above the payment amount. + ErrValueExceedsAmt = errors.New("attempted value exceeds payment " + + "amount") + + // ErrNonMPPayment is returned if we try to register an MPP attempt for + // a payment that already has a non-MPP attempt registered. + ErrNonMPPayment = errors.New("payment has non-MPP attempts") + + // ErrMPPayment is returned if we try to register a non-MPP attempt for + // a payment that already has an MPP attempt registered. + ErrMPPayment = errors.New("payment has MPP attempts") + + // ErrMPPRecordInBlindedPayment is returned if we try to register an + // attempt with an MPP record for a payment to a blinded path. + ErrMPPRecordInBlindedPayment = errors.New("blinded payment cannot " + + "contain MPP records") + + // ErrBlindedPaymentTotalAmountMismatch is returned if we try to + // register an HTLC shard to a blinded route where the total amount + // doesn't match existing shards. + ErrBlindedPaymentTotalAmountMismatch = errors.New("blinded path " + + "total amount mismatch") + + // ErrMixedBlindedAndNonBlindedPayments is returned if we try to + // register a non-blinded attempt to a payment which uses a blinded + // paths or vice versa. + ErrMixedBlindedAndNonBlindedPayments = errors.New("mixed blinded and " + + "non-blinded payments") + + // ErrBlindedPaymentMissingTotalAmount is returned if we try to + // register a blinded payment attempt where the final hop doesn't set + // the total amount. + ErrBlindedPaymentMissingTotalAmount = errors.New("blinded payment " + + "final hop must set total amount") + + // ErrMPPPaymentAddrMismatch is returned if we try to register an MPP + // shard where the payment address doesn't match existing shards. + ErrMPPPaymentAddrMismatch = errors.New("payment address mismatch") + + // ErrMPPTotalAmountMismatch is returned if we try to register an MPP + // shard where the total amount doesn't match existing shards. + ErrMPPTotalAmountMismatch = errors.New("mp payment total amount " + + "mismatch") + + // ErrPaymentPendingSettled is returned when we try to add a new + // attempt to a payment that has at least one of its HTLCs settled. + ErrPaymentPendingSettled = errors.New("payment has settled htlcs") + + // ErrPaymentPendingFailed is returned when we try to add a new attempt + // to a payment that already has a failure reason. + ErrPaymentPendingFailed = errors.New("payment has failure reason") + + // ErrSentExceedsTotal is returned if the payment's current total sent + // amount exceed the total amount. + ErrSentExceedsTotal = errors.New("total sent exceeds total amount") + + // ErrNoAttemptInfo is returned when no attempt info is stored yet. + ErrNoAttemptInfo = errors.New("unable to find attempt info for " + + "inflight payment") +) + +// KV backend specific errors. +var ( + // ErrNoSequenceNumber is returned if we look up a payment which does + // not have a sequence number. + ErrNoSequenceNumber = errors.New("sequence number not found") + + // ErrDuplicateNotFound is returned when we lookup a payment by its + // index and cannot find a payment with a matching sequence number. + ErrDuplicateNotFound = errors.New("duplicate payment not found") + + // ErrNoDuplicateBucket is returned when we expect to find duplicates + // when looking up a payment from its index, but the payment does not + // have any. + ErrNoDuplicateBucket = errors.New("expected duplicate bucket") + + // ErrNoDuplicateNestedBucket is returned if we do not find duplicate + // payments in their own sub-bucket. + ErrNoDuplicateNestedBucket = errors.New("nested duplicate bucket not " + + "found") + + // ErrNoSequenceNrIndex is returned when an attempt to lookup a payment + // index is made for a sequence number that is not indexed. + // + // NOTE: Only used for the kv backend. + ErrNoSequenceNrIndex = errors.New("payment sequence number index " + + "does not exist") + + // errMaxPaymentsReached is used internally to signal that the maximum + // number of payments has been reached during a paginated query. + errMaxPaymentsReached = errors.New("max payments reached") +) diff --git a/payments/db/migration1/interface.go b/payments/db/migration1/interface.go new file mode 100644 index 00000000000..7d47118e928 --- /dev/null +++ b/payments/db/migration1/interface.go @@ -0,0 +1,140 @@ +package migration1 + +import ( + "context" + + "github.com/lightningnetwork/lnd/lntypes" +) + +// DB represents the interface to the underlying payments database. +type DB interface { + PaymentReader + PaymentWriter +} + +// PaymentReader represents the interface to read operations from the payments +// database. +type PaymentReader interface { + // QueryPayments queries the payments database and should support + // pagination. + QueryPayments(ctx context.Context, query Query) (Response, error) + + // FetchPayment fetches the payment corresponding to the given payment + // hash. + FetchPayment(ctx context.Context, + paymentHash lntypes.Hash) (*MPPayment, error) + + // FetchInFlightPayments returns all payments with status InFlight. + FetchInFlightPayments(ctx context.Context) ([]*MPPayment, error) +} + +// PaymentWriter represents the interface to write operations to the payments +// database. +type PaymentWriter interface { + // DeletePayment deletes a payment from the DB given its payment hash. + DeletePayment(ctx context.Context, paymentHash lntypes.Hash, + failedAttemptsOnly bool) error + + // DeletePayments deletes all payments from the DB given the specified + // flags. + DeletePayments(ctx context.Context, failedOnly, + failedAttemptsOnly bool) (int, error) + + PaymentControl +} + +// PaymentControl represents the interface to control the payment lifecycle and +// its database operations. This interface represents the control flow of how +// a payment should be handled in the database. They are not just writing +// operations but they inherently represent the flow of a payment. The methods +// are called in the following order. +// +// 1. InitPayment. +// 2. RegisterAttempt (a payment can have multiple attempts). +// 3. SettleAttempt or FailAttempt (attempts can also fail as long as the +// sending amount will be eventually settled). +// 4. Payment succeeds or "Fail" is called. +// 5. DeleteFailedAttempts is called which will delete all failed attempts +// for a payment to clean up the database. +type PaymentControl interface { + // InitPayment checks that no other payment with the same payment hash + // exists in the database before creating a new payment. However, it + // should allow the user making a subsequent payment if the payment is + // in a Failed state. + InitPayment(context.Context, lntypes.Hash, *PaymentCreationInfo) error + + // RegisterAttempt atomically records the provided HTLCAttemptInfo. + // + // IMPORTANT: Callers MUST serialize calls to RegisterAttempt for the + // same payment hash. Concurrent calls will result in race conditions + // where both calls read the same initial payment state, validate + // against stale data, and could cause overpayment. For example: + // - Both goroutines fetch payment with 400 sats sent + // - Both validate sending 650 sats won't overpay (within limit) + // - Both commit successfully + // - Result: 1700 sats sent, exceeding the payment amount + // The payment router/controller layer is responsible for ensuring + // serialized access per payment hash. + RegisterAttempt(context.Context, lntypes.Hash, + *HTLCAttemptInfo) (*MPPayment, error) + + // SettleAttempt marks the given attempt settled with the preimage. If + // this is a multi shard payment, this might implicitly mean the + // full payment succeeded. + // + // After invoking this method, InitPayment should always return an + // error to prevent us from making duplicate payments to the same + // payment hash. The provided preimage is atomically saved to the DB + // for record keeping. + SettleAttempt(context.Context, lntypes.Hash, uint64, + *HTLCSettleInfo) (*MPPayment, error) + + // FailAttempt marks the given payment attempt failed. + FailAttempt(context.Context, lntypes.Hash, uint64, + *HTLCFailInfo) (*MPPayment, error) + + // Fail transitions a payment into the Failed state, and records + // the ultimate reason the payment failed. Note that this should only + // be called when all active attempts are already failed. After + // invoking this method, InitPayment should return nil on its next call + // for this payment hash, allowing the user to make a subsequent + // payment. + Fail(context.Context, lntypes.Hash, FailureReason) (*MPPayment, error) + + // DeleteFailedAttempts removes all failed HTLCs from the db. It should + // be called for a given payment whenever all inflight htlcs are + // completed, and the payment has reached a final terminal state. + DeleteFailedAttempts(context.Context, lntypes.Hash) error +} + +// DBMPPayment is an interface that represents the payment state during a +// payment lifecycle. +type DBMPPayment interface { + // GetState returns the current state of the payment. + GetState() *MPPaymentState + + // Terminated returns true if the payment is in a final state. + Terminated() bool + + // GetStatus returns the current status of the payment. + GetStatus() PaymentStatus + + // NeedWaitAttempts specifies whether the payment needs to wait for the + // outcome of an attempt. + NeedWaitAttempts() (bool, error) + + // GetHTLCs returns all HTLCs of this payment. + GetHTLCs() []HTLCAttempt + + // InFlightHTLCs returns all HTLCs that are in flight. + InFlightHTLCs() []HTLCAttempt + + // AllowMoreAttempts is used to decide whether we can safely attempt + // more HTLCs for a given payment state. Return an error if the payment + // is in an unexpected state. + AllowMoreAttempts() (bool, error) + + // TerminalInfo returns the settled HTLC attempt or the payment's + // failure reason. + TerminalInfo() (*HTLCAttempt, *FailureReason) +} diff --git a/payments/db/migration1/kv_duplicate_payments.go b/payments/db/migration1/kv_duplicate_payments.go new file mode 100644 index 00000000000..2c7026766fc --- /dev/null +++ b/payments/db/migration1/kv_duplicate_payments.go @@ -0,0 +1,250 @@ +package migration1 + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "time" + + "github.com/btcsuite/btcd/btcec/v2" + "github.com/lightningnetwork/lnd/kvdb" + "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/routing/route" +) + +var ( + // duplicatePaymentsBucket is the name of a optional sub-bucket within + // the payment hash bucket, that is used to hold duplicate payments to a + // payment hash. This is needed to support information from earlier + // versions of lnd, where it was possible to pay to a payment hash more + // than once. + duplicatePaymentsBucket = []byte("payment-duplicate-bucket") + + // duplicatePaymentSettleInfoKey is a key used in the payment's + // sub-bucket to store the settle info of the payment. + duplicatePaymentSettleInfoKey = []byte("payment-settle-info") + + // duplicatePaymentAttemptInfoKey is a key used in the payment's + // sub-bucket to store the info about the latest attempt that was done + // for the payment in question. + duplicatePaymentAttemptInfoKey = []byte("payment-attempt-info") + + // duplicatePaymentCreationInfoKey is a key used in the payment's + // sub-bucket to store the creation info of the payment. + duplicatePaymentCreationInfoKey = []byte("payment-creation-info") + + // duplicatePaymentFailInfoKey is a key used in the payment's sub-bucket + // to store information about the reason a payment failed. + duplicatePaymentFailInfoKey = []byte("payment-fail-info") + + // duplicatePaymentSequenceKey is a key used in the payment's sub-bucket + // to store the sequence number of the payment. + duplicatePaymentSequenceKey = []byte("payment-sequence-key") +) + +// duplicateHTLCAttemptInfo contains static information about a specific HTLC +// attempt for a payment. This information is used by the router to handle any +// errors coming back after an attempt is made, and to query the switch about +// the status of the attempt. +type duplicateHTLCAttemptInfo struct { + // attemptID is the unique ID used for this attempt. + attemptID uint64 + + // sessionKey is the ephemeral key used for this attempt. + sessionKey [btcec.PrivKeyBytesLen]byte + + // route is the route attempted to send the HTLC. + route route.Route +} + +// fetchDuplicatePaymentStatus fetches the payment status of the payment. If +// the payment isn't found, it will return error `ErrPaymentNotInitiated`. +func fetchDuplicatePaymentStatus(bucket kvdb.RBucket) (PaymentStatus, error) { + if bucket.Get(duplicatePaymentSettleInfoKey) != nil { + return StatusSucceeded, nil + } + + if bucket.Get(duplicatePaymentFailInfoKey) != nil { + return StatusFailed, nil + } + + if bucket.Get(duplicatePaymentCreationInfoKey) != nil { + return StatusInFlight, nil + } + + return 0, ErrPaymentNotInitiated +} + +func deserializeDuplicateHTLCAttemptInfo(r io.Reader) ( + *duplicateHTLCAttemptInfo, error) { + + a := &duplicateHTLCAttemptInfo{} + err := ReadElements(r, &a.attemptID, &a.sessionKey) + if err != nil { + return nil, err + } + a.route, err = DeserializeRoute(r) + if err != nil { + return nil, err + } + + return a, nil +} + +func deserializeDuplicatePaymentCreationInfo(r io.Reader) ( + *PaymentCreationInfo, error) { + + var scratch [8]byte + + c := &PaymentCreationInfo{} + + if _, err := io.ReadFull(r, c.PaymentIdentifier[:]); err != nil { + return nil, err + } + + if _, err := io.ReadFull(r, scratch[:]); err != nil { + return nil, err + } + c.Value = lnwire.MilliSatoshi(byteOrder.Uint64(scratch[:])) + + if _, err := io.ReadFull(r, scratch[:]); err != nil { + return nil, err + } + c.CreationTime = time.Unix(int64(byteOrder.Uint64(scratch[:])), 0) + + if _, err := io.ReadFull(r, scratch[:4]); err != nil { + return nil, err + } + + reqLen := byteOrder.Uint32(scratch[:4]) + payReq := make([]byte, reqLen) + if reqLen > 0 { + if _, err := io.ReadFull(r, payReq); err != nil { + return nil, err + } + } + c.PaymentRequest = payReq + + return c, nil +} + +func fetchDuplicatePayment(bucket kvdb.RBucket) (*MPPayment, error) { + seqBytes := bucket.Get(duplicatePaymentSequenceKey) + if seqBytes == nil { + return nil, fmt.Errorf("sequence number not found") + } + + sequenceNum := binary.BigEndian.Uint64(seqBytes) + + // Get the payment status. + paymentStatus, err := fetchDuplicatePaymentStatus(bucket) + if err != nil { + return nil, err + } + + // Get the PaymentCreationInfo. + b := bucket.Get(duplicatePaymentCreationInfoKey) + if b == nil { + return nil, fmt.Errorf("creation info not found") + } + + r := bytes.NewReader(b) + creationInfo, err := deserializeDuplicatePaymentCreationInfo(r) + if err != nil { + return nil, err + } + + // Get failure reason if available. + var failureReason *FailureReason + b = bucket.Get(duplicatePaymentFailInfoKey) + if b != nil { + reason := FailureReason(b[0]) + failureReason = &reason + } + + payment := &MPPayment{ + SequenceNum: sequenceNum, + Info: creationInfo, + FailureReason: failureReason, + Status: paymentStatus, + } + + // Get the HTLCAttemptInfo. It can be absent. + b = bucket.Get(duplicatePaymentAttemptInfoKey) + if b != nil { + r = bytes.NewReader(b) + attempt, err := deserializeDuplicateHTLCAttemptInfo(r) + if err != nil { + return nil, err + } + + htlc := HTLCAttempt{ + HTLCAttemptInfo: HTLCAttemptInfo{ + AttemptID: attempt.attemptID, + Route: attempt.route, + sessionKey: attempt.sessionKey, + }, + } + + // Get the payment preimage. This is only found for + // successful payments. + b = bucket.Get(duplicatePaymentSettleInfoKey) + if b != nil { + var preimg lntypes.Preimage + copy(preimg[:], b) + + htlc.Settle = &HTLCSettleInfo{ + Preimage: preimg, + SettleTime: time.Time{}, + } + } else { + // Otherwise the payment must have failed. + htlc.Failure = &HTLCFailInfo{ + FailTime: time.Time{}, + } + } + + payment.HTLCs = []HTLCAttempt{htlc} + } + + return payment, nil +} + +func fetchDuplicatePayments(paymentHashBucket kvdb.RBucket) ([]*MPPayment, + error) { + + var payments []*MPPayment + + // For older versions of lnd, duplicate payments to a payment has was + // possible. These will be found in a sub-bucket indexed by their + // sequence number if available. + dup := paymentHashBucket.NestedReadBucket(duplicatePaymentsBucket) + if dup == nil { + return nil, nil + } + + err := dup.ForEach(func(k, v []byte) error { + subBucket := dup.NestedReadBucket(k) + if subBucket == nil { + // We one bucket for each duplicate to be found. + return fmt.Errorf("non bucket element" + + "in duplicate bucket") + } + + p, err := fetchDuplicatePayment(subBucket) + if err != nil { + return err + } + + payments = append(payments, p) + + return nil + }) + if err != nil { + return nil, err + } + + return payments, nil +} diff --git a/payments/db/migration1/kv_store.go b/payments/db/migration1/kv_store.go new file mode 100644 index 00000000000..1ec54b7fbcb --- /dev/null +++ b/payments/db/migration1/kv_store.go @@ -0,0 +1,2121 @@ +package migration1 + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "sort" + "sync" + "time" + + "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/wire" + "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/kvdb" + "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/record" + "github.com/lightningnetwork/lnd/routing/route" + "github.com/lightningnetwork/lnd/tlv" +) + +const ( + // paymentSeqBlockSize is the block size used when we batch allocate + // payment sequences for future payments. + paymentSeqBlockSize = 1000 + + // paymentProgressLogInterval is the interval we use limiting the + // logging output of payment processing. + paymentProgressLogInterval = 30 * time.Second +) + +//nolint:ll +var ( + // paymentsRootBucket is the name of the top-level bucket within the + // database that stores all data related to payments. Within this + // bucket, each payment hash its own sub-bucket keyed by its payment + // hash. + // + // Bucket hierarchy: + // + // root-bucket + // | + // |-- + // | |--sequence-key: + // | |--creation-info-key: + // | |--fail-info-key: <(optional) fail info> + // | | + // | |--payment-htlcs-bucket (shard-bucket) + // | | | + // | | |-- ai: + // | | |-- si: <(optional) settle info> + // | | |-- fi: <(optional) fail info> + // | | | + // | | ... + // | | + // | | + // | |--duplicate-bucket (only for old, completed payments) + // | | + // | |-- + // | | |--sequence-key: + // | | |--creation-info-key: + // | | |--ai: + // | | |--si: + // | | |--fi: + // | | + // | |-- + // | | | + // | ... ... + // | + // |-- + // | | + // | ... + // ... + // + paymentsRootBucket = []byte("payments-root-bucket") + + // paymentSequenceKey is a key used in the payment's sub-bucket to + // store the sequence number of the payment. + paymentSequenceKey = []byte("payment-sequence-key") + + // paymentCreationInfoKey is a key used in the payment's sub-bucket to + // store the creation info of the payment. + paymentCreationInfoKey = []byte("payment-creation-info") + + // paymentHtlcsBucket is a bucket where we'll store the information + // about the HTLCs that were attempted for a payment. + paymentHtlcsBucket = []byte("payment-htlcs-bucket") + + // htlcAttemptInfoKey is the key used as the prefix of an HTLC attempt + // to store the info about the attempt that was done for the HTLC in + // question. The HTLC attempt ID is concatenated at the end. + htlcAttemptInfoKey = []byte("ai") + + // htlcSettleInfoKey is the key used as the prefix of an HTLC attempt + // settle info, if any. The HTLC attempt ID is concatenated at the end. + htlcSettleInfoKey = []byte("si") + + // htlcFailInfoKey is the key used as the prefix of an HTLC attempt + // failure information, if any.The HTLC attempt ID is concatenated at + // the end. + htlcFailInfoKey = []byte("fi") + + // paymentFailInfoKey is a key used in the payment's sub-bucket to + // store information about the reason a payment failed. + paymentFailInfoKey = []byte("payment-fail-info") + + // paymentsIndexBucket is the name of the top-level bucket within the + // database that stores an index of payment sequence numbers to its + // payment hash. + // payments-sequence-index-bucket + // |--: + // |--... + // |--: + paymentsIndexBucket = []byte("payments-index-bucket") +) + +// KVStore implements persistence for payments and payment attempts. +type KVStore struct { + // Sequence management for the kv store. + seqMu sync.Mutex + currSeq uint64 + storedSeq uint64 + + // db is the underlying database implementation. + db kvdb.Backend +} + +// A compile-time constraint to ensure KVStore implements DB. +var _ DB = (*KVStore)(nil) + +// NewKVStore creates a new KVStore for payments. +func NewKVStore(db kvdb.Backend, + options ...OptionModifier) (*KVStore, error) { + + opts := DefaultOptions() + for _, applyOption := range options { + applyOption(opts) + } + + if !opts.NoMigration { + if err := initKVStore(db); err != nil { + return nil, err + } + } + + return &KVStore{ + db: db, + }, nil +} + +// paymentsTopLevelBuckets is a list of top-level buckets that are used for +// the payments database when using the kv store. +var paymentsTopLevelBuckets = [][]byte{ + paymentsRootBucket, + paymentsIndexBucket, +} + +// initKVStore creates and initializes the top-level buckets for the payment db. +func initKVStore(db kvdb.Backend) error { + err := kvdb.Update(db, func(tx kvdb.RwTx) error { + for _, tlb := range paymentsTopLevelBuckets { + if _, err := tx.CreateTopLevelBucket(tlb); err != nil { + return err + } + } + + return nil + }, func() {}) + if err != nil { + return fmt.Errorf("unable to create new payments db: %w", err) + } + + return nil +} + +// InitPayment checks or records the given PaymentCreationInfo with the DB, +// making sure it does not already exist as an in-flight payment. When this +// method returns successfully, the payment is guaranteed to be in the InFlight +// state. +func (p *KVStore) InitPayment(_ context.Context, paymentHash lntypes.Hash, + info *PaymentCreationInfo) error { + + // Obtain a new sequence number for this payment. This is used + // to sort the payments in order of creation, and also acts as + // a unique identifier for each payment. + sequenceNum, err := p.nextPaymentSequence() + if err != nil { + return err + } + + var b bytes.Buffer + if err := serializePaymentCreationInfo(&b, info); err != nil { + return err + } + infoBytes := b.Bytes() + + var updateErr error + err = kvdb.Batch(p.db, func(tx kvdb.RwTx) error { + // Reset the update error, to avoid carrying over an error + // from a previous execution of the batched db transaction. + updateErr = nil + + prefetchPayment(tx, paymentHash) + bucket, err := createPaymentBucket(tx, paymentHash) + if err != nil { + return err + } + + // Get the existing status of this payment, if any. + paymentStatus, err := fetchPaymentStatus(bucket) + + switch { + // If no error is returned, it means we already have this + // payment. We'll check the status to decide whether we allow + // retrying the payment or return a specific error. + case err == nil: + if err := paymentStatus.initializable(); err != nil { + updateErr = err + return nil + } + + // Otherwise, if the error is not `ErrPaymentNotInitiated`, + // we'll return the error. + case !errors.Is(err, ErrPaymentNotInitiated): + return err + } + + // Before we set our new sequence number, we check whether this + // payment has a previously set sequence number and remove its + // index entry if it exists. This happens in the case where we + // have a previously attempted payment which was left in a state + // where we can retry. + seqBytes := bucket.Get(paymentSequenceKey) + if seqBytes != nil { + indexBucket := tx.ReadWriteBucket(paymentsIndexBucket) + if err := indexBucket.Delete(seqBytes); err != nil { + return err + } + } + + // Once we have obtained a sequence number, we add an entry + // to our index bucket which will map the sequence number to + // our payment identifier. + err = createPaymentIndexEntry( + tx, sequenceNum, info.PaymentIdentifier, + ) + if err != nil { + return err + } + + err = bucket.Put(paymentSequenceKey, sequenceNum) + if err != nil { + return err + } + + // Add the payment info to the bucket, which contains the + // static information for this payment + err = bucket.Put(paymentCreationInfoKey, infoBytes) + if err != nil { + return err + } + + // We'll delete any lingering HTLCs to start with, in case we + // are initializing a payment that was attempted earlier, but + // left in a state where we could retry. + err = bucket.DeleteNestedBucket(paymentHtlcsBucket) + if err != nil && !errors.Is(err, kvdb.ErrBucketNotFound) { + return err + } + + // Also delete any lingering failure info now that we are + // re-attempting. + return bucket.Delete(paymentFailInfoKey) + }) + if err != nil { + return fmt.Errorf("unable to init payment: %w", err) + } + + return updateErr +} + +// DeleteFailedAttempts deletes all failed htlcs for a payment. +func (p *KVStore) DeleteFailedAttempts(ctx context.Context, + hash lntypes.Hash) error { + + const failedHtlcsOnly = true + err := p.DeletePayment(ctx, hash, failedHtlcsOnly) + if err != nil { + return err + } + + return nil +} + +// paymentIndexTypeHash is a payment index type which indicates that we have +// created an index of payment sequence number to payment hash. +type paymentIndexType uint8 + +// paymentIndexTypeHash is a payment index type which indicates that we have +// created an index of payment sequence number to payment hash. +const paymentIndexTypeHash paymentIndexType = 0 + +// createPaymentIndexEntry creates a payment hash typed index for a payment. The +// index produced contains a payment index type (which can be used in future to +// signal different payment index types) and the payment identifier. +func createPaymentIndexEntry(tx kvdb.RwTx, sequenceNumber []byte, + id lntypes.Hash) error { + + var b bytes.Buffer + if err := WriteElements(&b, paymentIndexTypeHash, id[:]); err != nil { + return err + } + + indexes := tx.ReadWriteBucket(paymentsIndexBucket) + + return indexes.Put(sequenceNumber, b.Bytes()) +} + +// deserializePaymentIndex deserializes a payment index entry. This function +// currently only supports deserialization of payment hash indexes, and will +// fail for other types. +func deserializePaymentIndex(r io.Reader) (lntypes.Hash, error) { + var ( + indexType paymentIndexType + paymentHash []byte + ) + + if err := ReadElements(r, &indexType, &paymentHash); err != nil { + return lntypes.Hash{}, err + } + + // While we only have on payment index type, we do not need to use our + // index type to deserialize the index. However, we sanity check that + // this type is as expected, since we had to read it out anyway. + if indexType != paymentIndexTypeHash { + return lntypes.Hash{}, fmt.Errorf("unknown payment index "+ + "type: %v", indexType) + } + + hash, err := lntypes.MakeHash(paymentHash) + if err != nil { + return lntypes.Hash{}, err + } + + return hash, nil +} + +// RegisterAttempt atomically records the provided HTLCAttemptInfo to the +// DB. +func (p *KVStore) RegisterAttempt(_ context.Context, paymentHash lntypes.Hash, + attempt *HTLCAttemptInfo) (*MPPayment, error) { + + // Serialize the information before opening the db transaction. + var a bytes.Buffer + err := serializeHTLCAttemptInfo(&a, attempt) + if err != nil { + return nil, err + } + htlcInfoBytes := a.Bytes() + + htlcIDBytes := make([]byte, 8) + binary.BigEndian.PutUint64(htlcIDBytes, attempt.AttemptID) + + var payment *MPPayment + err = kvdb.Batch(p.db, func(tx kvdb.RwTx) error { + prefetchPayment(tx, paymentHash) + bucket, err := fetchPaymentBucketUpdate(tx, paymentHash) + if err != nil { + return err + } + + payment, err = fetchPayment(bucket) + if err != nil { + return err + } + + // Check if registering a new attempt is allowed. + if err := payment.Registrable(); err != nil { + return err + } + + // Verify the attempt is compatible with the existing payment. + if err := verifyAttempt(payment, attempt); err != nil { + return err + } + + htlcsBucket, err := bucket.CreateBucketIfNotExists( + paymentHtlcsBucket, + ) + if err != nil { + return err + } + + err = htlcsBucket.Put( + htlcBucketKey(htlcAttemptInfoKey, htlcIDBytes), + htlcInfoBytes, + ) + if err != nil { + return err + } + + // Retrieve attempt info for the notification. + payment, err = fetchPayment(bucket) + + return err + }) + if err != nil { + return nil, err + } + + return payment, err +} + +// SettleAttempt marks the given attempt settled with the preimage. If this is +// a multi shard payment, this might implicitly mean that the full payment +// succeeded. +// +// After invoking this method, InitPayment should always return an error to +// prevent us from making duplicate payments to the same payment hash. The +// provided preimage is atomically saved to the DB for record keeping. +func (p *KVStore) SettleAttempt(_ context.Context, hash lntypes.Hash, + attemptID uint64, settleInfo *HTLCSettleInfo) (*MPPayment, error) { + + var b bytes.Buffer + if err := serializeHTLCSettleInfo(&b, settleInfo); err != nil { + return nil, err + } + settleBytes := b.Bytes() + + return p.updateHtlcKey(hash, attemptID, htlcSettleInfoKey, settleBytes) +} + +// FailAttempt marks the given payment attempt failed. +func (p *KVStore) FailAttempt(_ context.Context, hash lntypes.Hash, + attemptID uint64, failInfo *HTLCFailInfo) (*MPPayment, error) { + + var b bytes.Buffer + if err := serializeHTLCFailInfo(&b, failInfo); err != nil { + return nil, err + } + failBytes := b.Bytes() + + return p.updateHtlcKey(hash, attemptID, htlcFailInfoKey, failBytes) +} + +// updateHtlcKey updates a database key for the specified htlc. +func (p *KVStore) updateHtlcKey(paymentHash lntypes.Hash, + attemptID uint64, key, value []byte) (*MPPayment, error) { + + aid := make([]byte, 8) + binary.BigEndian.PutUint64(aid, attemptID) + + var payment *MPPayment + err := kvdb.Batch(p.db, func(tx kvdb.RwTx) error { + payment = nil + + prefetchPayment(tx, paymentHash) + bucket, err := fetchPaymentBucketUpdate(tx, paymentHash) + if err != nil { + return err + } + + p, err := fetchPayment(bucket) + if err != nil { + return err + } + + // We can only update keys of in-flight payments. We allow + // updating keys even if the payment has reached a terminal + // condition, since the HTLC outcomes must still be updated. + if err := p.Status.updatable(); err != nil { + return err + } + + htlcsBucket := bucket.NestedReadWriteBucket(paymentHtlcsBucket) + if htlcsBucket == nil { + return fmt.Errorf("htlcs bucket not found") + } + + attemptKey := htlcBucketKey(htlcAttemptInfoKey, aid) + if htlcsBucket.Get(attemptKey) == nil { + return fmt.Errorf("HTLC with ID %v not registered", + attemptID) + } + + // Make sure the shard is not already failed or settled. + failKey := htlcBucketKey(htlcFailInfoKey, aid) + if htlcsBucket.Get(failKey) != nil { + return ErrAttemptAlreadyFailed + } + + settleKey := htlcBucketKey(htlcSettleInfoKey, aid) + if htlcsBucket.Get(settleKey) != nil { + return ErrAttemptAlreadySettled + } + + // Add or update the key for this htlc. + err = htlcsBucket.Put(htlcBucketKey(key, aid), value) + if err != nil { + return err + } + + // Retrieve attempt info for the notification. + payment, err = fetchPayment(bucket) + + return err + }) + if err != nil { + return nil, err + } + + return payment, err +} + +// Fail transitions a payment into the Failed state, and records the reason the +// payment failed. After invoking this method, InitPayment should return nil on +// its next call for this payment hash, allowing the switch to make a +// subsequent payment. +func (p *KVStore) Fail(_ context.Context, paymentHash lntypes.Hash, + reason FailureReason) (*MPPayment, error) { + + var ( + updateErr error + payment *MPPayment + ) + err := kvdb.Batch(p.db, func(tx kvdb.RwTx) error { + // Reset the update error, to avoid carrying over an error + // from a previous execution of the batched db transaction. + updateErr = nil + payment = nil + + prefetchPayment(tx, paymentHash) + bucket, err := fetchPaymentBucketUpdate(tx, paymentHash) + if errors.Is(err, ErrPaymentNotInitiated) { + updateErr = ErrPaymentNotInitiated + return nil + } else if err != nil { + return err + } + + // We mark the payment as failed as long as it is known. This + // lets the last attempt to fail with a terminal write its + // failure to the KVStore without synchronizing with + // other attempts. + _, err = fetchPaymentStatus(bucket) + if errors.Is(err, ErrPaymentNotInitiated) { + updateErr = ErrPaymentNotInitiated + return nil + } else if err != nil { + return err + } + + // Put the failure reason in the bucket for record keeping. + v := []byte{byte(reason)} + err = bucket.Put(paymentFailInfoKey, v) + if err != nil { + return err + } + + // Retrieve attempt info for the notification, if available. + payment, err = fetchPayment(bucket) + if err != nil { + return err + } + + return nil + }) + if err != nil { + return nil, err + } + + return payment, updateErr +} + +// FetchPayment returns information about a payment from the database. +func (p *KVStore) FetchPayment(_ context.Context, + paymentHash lntypes.Hash) (*MPPayment, error) { + + var payment *MPPayment + err := kvdb.View(p.db, func(tx kvdb.RTx) error { + prefetchPayment(tx, paymentHash) + bucket, err := fetchPaymentBucket(tx, paymentHash) + if err != nil { + return err + } + + payment, err = fetchPayment(bucket) + + return err + }, func() { + payment = nil + }) + if err != nil { + return nil, err + } + + return payment, nil +} + +// prefetchPayment attempts to prefetch as much of the payment as possible to +// reduce DB roundtrips. +func prefetchPayment(tx kvdb.RTx, paymentHash lntypes.Hash) { + rb := kvdb.RootBucket(tx) + kvdb.Prefetch( + rb, + []string{ + // Prefetch all keys in the payment's bucket. + string(paymentsRootBucket), + string(paymentHash[:]), + }, + []string{ + // Prefetch all keys in the payment's htlc bucket. + string(paymentsRootBucket), + string(paymentHash[:]), + string(paymentHtlcsBucket), + }, + ) +} + +// createPaymentBucket creates or fetches the sub-bucket assigned to this +// payment hash. +func createPaymentBucket(tx kvdb.RwTx, paymentHash lntypes.Hash) ( + kvdb.RwBucket, error) { + + payments, err := tx.CreateTopLevelBucket(paymentsRootBucket) + if err != nil { + return nil, err + } + + return payments.CreateBucketIfNotExists(paymentHash[:]) +} + +// fetchPaymentBucket fetches the sub-bucket assigned to this payment hash. If +// the bucket does not exist, it returns ErrPaymentNotInitiated. +func fetchPaymentBucket(tx kvdb.RTx, paymentHash lntypes.Hash) ( + kvdb.RBucket, error) { + + payments := tx.ReadBucket(paymentsRootBucket) + if payments == nil { + return nil, ErrPaymentNotInitiated + } + + bucket := payments.NestedReadBucket(paymentHash[:]) + if bucket == nil { + return nil, ErrPaymentNotInitiated + } + + return bucket, nil +} + +// fetchPaymentBucketUpdate is identical to fetchPaymentBucket, but it returns a +// bucket that can be written to. +func fetchPaymentBucketUpdate(tx kvdb.RwTx, paymentHash lntypes.Hash) ( + kvdb.RwBucket, error) { + + payments := tx.ReadWriteBucket(paymentsRootBucket) + if payments == nil { + return nil, ErrPaymentNotInitiated + } + + bucket := payments.NestedReadWriteBucket(paymentHash[:]) + if bucket == nil { + return nil, ErrPaymentNotInitiated + } + + return bucket, nil +} + +// nextPaymentSequence returns the next sequence number to store for a new +// payment. +func (p *KVStore) nextPaymentSequence() ([]byte, error) { + p.seqMu.Lock() + defer p.seqMu.Unlock() + + // Set a new upper bound in the DB every 1000 payments to avoid + // conflicts on the sequence when using etcd. + if p.currSeq == p.storedSeq { + var currPaymentSeq, newUpperBound uint64 + if err := kvdb.Update(p.db, func(tx kvdb.RwTx) error { + paymentsBucket, err := tx.CreateTopLevelBucket( + paymentsRootBucket, + ) + if err != nil { + return err + } + + currPaymentSeq = paymentsBucket.Sequence() + newUpperBound = currPaymentSeq + paymentSeqBlockSize + + return paymentsBucket.SetSequence(newUpperBound) + }, func() {}); err != nil { + return nil, err + } + + // We lazy initialize the cached currPaymentSeq here using the + // first nextPaymentSequence() call. This if statement will auto + // initialize our stored currPaymentSeq, since by default both + // this variable and storedPaymentSeq are zero which in turn + // will have us fetch the current values from the DB. + if p.currSeq == 0 { + p.currSeq = currPaymentSeq + } + + p.storedSeq = newUpperBound + } + + p.currSeq++ + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, p.currSeq) + + return b, nil +} + +// fetchPaymentStatus fetches the payment status of the payment. If the payment +// isn't found, it will return error `ErrPaymentNotInitiated`. +func fetchPaymentStatus(bucket kvdb.RBucket) (PaymentStatus, error) { + // Creation info should be set for all payments, regardless of state. + // If not, it is unknown. + if bucket.Get(paymentCreationInfoKey) == nil { + return 0, ErrPaymentNotInitiated + } + + payment, err := fetchPayment(bucket) + if err != nil { + return 0, err + } + + return payment.Status, nil +} + +// FetchInFlightPayments returns all payments with status InFlight. +func (p *KVStore) FetchInFlightPayments(_ context.Context) ([]*MPPayment, + error) { + + var ( + inFlights []*MPPayment + start = time.Now() + lastLogTime = time.Now() + processedCount int + ) + + err := kvdb.View(p.db, func(tx kvdb.RTx) error { + payments := tx.ReadBucket(paymentsRootBucket) + if payments == nil { + return nil + } + + return payments.ForEach(func(k, _ []byte) error { + bucket := payments.NestedReadBucket(k) + if bucket == nil { + return fmt.Errorf("non bucket element") + } + + p, err := fetchPayment(bucket) + if err != nil { + return err + } + + processedCount++ + if time.Since(lastLogTime) >= + paymentProgressLogInterval { + + log.Debugf("Scanning inflight payments "+ + "(in progress), processed %d, last "+ + "processed payment: %v", processedCount, + p.Info) + + lastLogTime = time.Now() + } + + // Skip the payment if it's terminated. + if p.Terminated() { + return nil + } + + inFlights = append(inFlights, p) + + return nil + }) + }, func() { + inFlights = nil + }) + if err != nil { + return nil, err + } + + elapsed := time.Since(start) + log.Debugf("Completed scanning for inflight payments: "+ + "total_processed=%d, found_inflight=%d, elapsed=%v", + processedCount, len(inFlights), + elapsed.Round(time.Millisecond)) + + return inFlights, nil +} + +// htlcBucketKey creates a composite key from prefix and id where the result is +// simply the two concatenated. +func htlcBucketKey(prefix, id []byte) []byte { + key := make([]byte, len(prefix)+len(id)) + copy(key, prefix) + copy(key[len(prefix):], id) + + return key +} + +// FetchPayments returns all sent payments found in the DB. +func (p *KVStore) FetchPayments() ([]*MPPayment, error) { + var payments []*MPPayment + + err := kvdb.View(p.db, func(tx kvdb.RTx) error { + paymentsBucket := tx.ReadBucket(paymentsRootBucket) + if paymentsBucket == nil { + return nil + } + + return paymentsBucket.ForEach(func(k, v []byte) error { + bucket := paymentsBucket.NestedReadBucket(k) + if bucket == nil { + // We only expect sub-buckets to be found in + // this top-level bucket. + return fmt.Errorf("non bucket element in " + + "payments bucket") + } + + p, err := fetchPayment(bucket) + if err != nil { + return err + } + + payments = append(payments, p) + + // For older versions of lnd, duplicate payments to a + // payment has was possible. These will be found in a + // sub-bucket indexed by their sequence number if + // available. + duplicatePayments, err := fetchDuplicatePayments(bucket) + if err != nil { + return err + } + + payments = append(payments, duplicatePayments...) + + return nil + }) + }, func() { + payments = nil + }) + if err != nil { + return nil, err + } + + // Before returning, sort the payments by their sequence number. + sort.Slice(payments, func(i, j int) bool { + return payments[i].SequenceNum < payments[j].SequenceNum + }) + + return payments, nil +} + +func fetchCreationInfo(bucket kvdb.RBucket) (*PaymentCreationInfo, error) { + b := bucket.Get(paymentCreationInfoKey) + if b == nil { + return nil, fmt.Errorf("creation info not found") + } + + r := bytes.NewReader(b) + + return deserializePaymentCreationInfo(r) +} + +func fetchPayment(bucket kvdb.RBucket) (*MPPayment, error) { + seqBytes := bucket.Get(paymentSequenceKey) + if seqBytes == nil { + return nil, fmt.Errorf("sequence number not found") + } + + sequenceNum := binary.BigEndian.Uint64(seqBytes) + + // Get the PaymentCreationInfo. + creationInfo, err := fetchCreationInfo(bucket) + if err != nil { + return nil, err + } + + var htlcs []HTLCAttempt + htlcsBucket := bucket.NestedReadBucket(paymentHtlcsBucket) + if htlcsBucket != nil { + // Get the payment attempts. This can be empty. + htlcs, err = fetchHtlcAttempts(htlcsBucket) + if err != nil { + return nil, err + } + } + + // Get failure reason if available. + var failureReason *FailureReason + b := bucket.Get(paymentFailInfoKey) + if b != nil { + reason := FailureReason(b[0]) + failureReason = &reason + } + + // Create a new payment. + payment := &MPPayment{ + SequenceNum: sequenceNum, + Info: creationInfo, + HTLCs: htlcs, + FailureReason: failureReason, + } + + // Set its state and status. + if err := payment.setState(); err != nil { + return nil, err + } + + return payment, nil +} + +// fetchHtlcAttempts retrieves all htlc attempts made for the payment found in +// the given bucket. +func fetchHtlcAttempts(bucket kvdb.RBucket) ([]HTLCAttempt, error) { + htlcsMap := make(map[uint64]*HTLCAttempt) + + attemptInfoCount := 0 + err := bucket.ForEach(func(k, v []byte) error { + aid := byteOrder.Uint64(k[len(k)-8:]) + + if _, ok := htlcsMap[aid]; !ok { + htlcsMap[aid] = &HTLCAttempt{} + } + + var err error + switch { + case bytes.HasPrefix(k, htlcAttemptInfoKey): + attemptInfo, err := readHtlcAttemptInfo(v) + if err != nil { + return err + } + + attemptInfo.AttemptID = aid + htlcsMap[aid].HTLCAttemptInfo = *attemptInfo + attemptInfoCount++ + + case bytes.HasPrefix(k, htlcSettleInfoKey): + htlcsMap[aid].Settle, err = readHtlcSettleInfo(v) + if err != nil { + return err + } + + case bytes.HasPrefix(k, htlcFailInfoKey): + htlcsMap[aid].Failure, err = readHtlcFailInfo(v) + if err != nil { + return err + } + + default: + return fmt.Errorf("unknown htlc attempt key") + } + + return nil + }) + if err != nil { + return nil, err + } + + // Sanity check that all htlcs have an attempt info. + if attemptInfoCount != len(htlcsMap) { + return nil, ErrNoAttemptInfo + } + + keys := make([]uint64, len(htlcsMap)) + i := 0 + for k := range htlcsMap { + keys[i] = k + i++ + } + + // Sort HTLC attempts by their attempt ID. This is needed because in the + // DB we store the attempts with keys prefixed by their status which + // changes order (groups them together by status). + sort.Slice(keys, func(i, j int) bool { + return keys[i] < keys[j] + }) + + htlcs := make([]HTLCAttempt, len(htlcsMap)) + for i, key := range keys { + htlcs[i] = *htlcsMap[key] + } + + return htlcs, nil +} + +// readHtlcAttemptInfo reads the payment attempt info for this htlc. +func readHtlcAttemptInfo(b []byte) (*HTLCAttemptInfo, error) { + r := bytes.NewReader(b) + return deserializeHTLCAttemptInfo(r) +} + +// readHtlcSettleInfo reads the settle info for the htlc. If the htlc isn't +// settled, nil is returned. +func readHtlcSettleInfo(b []byte) (*HTLCSettleInfo, error) { + r := bytes.NewReader(b) + return deserializeHTLCSettleInfo(r) +} + +// readHtlcFailInfo reads the failure info for the htlc. If the htlc hasn't +// failed, nil is returned. +func readHtlcFailInfo(b []byte) (*HTLCFailInfo, error) { + r := bytes.NewReader(b) + return deserializeHTLCFailInfo(r) +} + +// fetchFailedHtlcKeys retrieves the bucket keys of all failed HTLCs of a +// payment bucket. +func fetchFailedHtlcKeys(bucket kvdb.RBucket) ([][]byte, error) { + htlcsBucket := bucket.NestedReadBucket(paymentHtlcsBucket) + + var htlcs []HTLCAttempt + var err error + if htlcsBucket != nil { + htlcs, err = fetchHtlcAttempts(htlcsBucket) + if err != nil { + return nil, err + } + } + + // Now iterate though them and save the bucket keys for the failed + // HTLCs. + var htlcKeys [][]byte + for _, h := range htlcs { + if h.Failure == nil { + continue + } + + htlcKeyBytes := make([]byte, 8) + binary.BigEndian.PutUint64(htlcKeyBytes, h.AttemptID) + + htlcKeys = append(htlcKeys, htlcKeyBytes) + } + + return htlcKeys, nil +} + +// QueryPayments is a query to the payments database which is restricted +// to a subset of payments by the payments query, containing an offset +// index and a maximum number of returned payments. +func (p *KVStore) QueryPayments(_ context.Context, + query Query) (Response, error) { + + var resp Response + + if err := kvdb.View(p.db, func(tx kvdb.RTx) error { + // Get the root payments bucket. + paymentsBucket := tx.ReadBucket(paymentsRootBucket) + if paymentsBucket == nil { + return nil + } + + // Get the index bucket which maps sequence number -> payment + // hash and duplicate bool. If we have a payments bucket, we + // should have an indexes bucket as well. + indexes := tx.ReadBucket(paymentsIndexBucket) + if indexes == nil { + return fmt.Errorf("index bucket does not exist") + } + + // accumulatePayments gets payments with the sequence number + // and hash provided and adds them to our list of payments if + // they meet the criteria of our query. It returns the number + // of payments that were added. + accumulatePayments := func(sequenceKey, hash []byte) (bool, + error) { + + r := bytes.NewReader(hash) + paymentHash, err := deserializePaymentIndex(r) + if err != nil { + return false, err + } + + payment, err := fetchPaymentWithSequenceNumber( + tx, paymentHash, sequenceKey, + ) + if err != nil { + return false, err + } + + // To keep compatibility with the old API, we only + // return non-succeeded payments if requested. + if payment.Status != StatusSucceeded && + !query.IncludeIncomplete { + + return false, err + } + + // Get the creation time in Unix seconds, this always + // rounds down the nanoseconds to full seconds. + createTime := payment.Info.CreationTime.Unix() + + // Skip any payments that were created before the + // specified time. + if createTime < query.CreationDateStart { + return false, nil + } + + // Skip any payments that were created after the + // specified time. + if query.CreationDateEnd != 0 && + createTime > query.CreationDateEnd { + + return false, nil + } + + // At this point, we've exhausted the offset, so we'll + // begin collecting invoices found within the range. + resp.Payments = append(resp.Payments, payment) + + return true, nil + } + + // Create a paginator which reads from our sequence index bucket + // with the parameters provided by the payments query. + paginator := channeldb.NewPaginator( + indexes.ReadCursor(), query.Reversed, query.IndexOffset, + query.MaxPayments, + ) + + // Run a paginated query, adding payments to our response. + if err := paginator.Query(accumulatePayments); err != nil { + return err + } + + // Counting the total number of payments is expensive, since we + // literally have to traverse the cursor linearly, which can + // take quite a while. So it's an optional query parameter. + if query.CountTotal { + var ( + totalPayments uint64 + err error + ) + countFn := func(_, _ []byte) error { + totalPayments++ + + return nil + } + + // In non-boltdb database backends, there's a faster + // ForAll query that allows for batch fetching items. + fastBucket, ok := indexes.(kvdb.ExtendedRBucket) + if ok { + err = fastBucket.ForAll(countFn) + } else { + err = indexes.ForEach(countFn) + } + if err != nil { + return fmt.Errorf("error counting payments: %w", + err) + } + + resp.TotalCount = totalPayments + } + + return nil + }, func() { + resp = Response{} + }); err != nil { + return resp, err + } + + // Need to swap the payments slice order if reversed order. + if query.Reversed { + for l, r := 0, len(resp.Payments)-1; l < r; l, r = l+1, r-1 { + resp.Payments[l], resp.Payments[r] = + resp.Payments[r], resp.Payments[l] + } + } + + // Set the first and last index of the returned payments so that the + // caller can resume from this point later on. + if len(resp.Payments) > 0 { + resp.FirstIndexOffset = resp.Payments[0].SequenceNum + resp.LastIndexOffset = + resp.Payments[len(resp.Payments)-1].SequenceNum + } + + return resp, nil +} + +// fetchPaymentWithSequenceNumber get the payment which matches the payment hash +// *and* sequence number provided from the database. This is required because +// we previously had more than one payment per hash, so we have multiple indexes +// pointing to a single payment; we want to retrieve the correct one. +func fetchPaymentWithSequenceNumber(tx kvdb.RTx, paymentHash lntypes.Hash, + sequenceNumber []byte) (*MPPayment, error) { + + // We can now lookup the payment keyed by its hash in + // the payments root bucket. + bucket, err := fetchPaymentBucket(tx, paymentHash) + if err != nil { + return nil, err + } + + // A single payment hash can have multiple payments associated with it. + // We lookup our sequence number first, to determine whether this is + // the payment we are actually looking for. + seqBytes := bucket.Get(paymentSequenceKey) + if seqBytes == nil { + return nil, ErrNoSequenceNumber + } + + // If this top level payment has the sequence number we are looking for, + // return it. + if bytes.Equal(seqBytes, sequenceNumber) { + return fetchPayment(bucket) + } + + // If we were not looking for the top level payment, we are looking for + // one of our duplicate payments. We need to iterate through the seq + // numbers in this bucket to find the correct payments. If we do not + // find a duplicate payments bucket here, something is wrong. + dup := bucket.NestedReadBucket(duplicatePaymentsBucket) + if dup == nil { + return nil, ErrNoDuplicateBucket + } + + var duplicatePayment *MPPayment + err = dup.ForEach(func(k, v []byte) error { + subBucket := dup.NestedReadBucket(k) + if subBucket == nil { + // We one bucket for each duplicate to be found. + return ErrNoDuplicateNestedBucket + } + + seqBytes := subBucket.Get(duplicatePaymentSequenceKey) + if seqBytes == nil { + return err + } + + // If this duplicate payment is not the sequence number we are + // looking for, we can continue. + if !bytes.Equal(seqBytes, sequenceNumber) { + return nil + } + + duplicatePayment, err = fetchDuplicatePayment(subBucket) + if err != nil { + return err + } + + return nil + }) + if err != nil { + return nil, err + } + + // If none of the duplicate payments matched our sequence number, we + // failed to find the payment with this sequence number; something is + // wrong. + if duplicatePayment == nil { + return nil, ErrDuplicateNotFound + } + + return duplicatePayment, nil +} + +// DeletePayment deletes a payment from the DB given its payment hash. If +// failedHtlcsOnly is set, only failed HTLC attempts of the payment will be +// deleted. +func (p *KVStore) DeletePayment(_ context.Context, paymentHash lntypes.Hash, + failedHtlcsOnly bool) error { + + return kvdb.Update(p.db, func(tx kvdb.RwTx) error { + payments := tx.ReadWriteBucket(paymentsRootBucket) + if payments == nil { + return nil + } + + bucket := payments.NestedReadWriteBucket(paymentHash[:]) + if bucket == nil { + return fmt.Errorf("non bucket element in payments " + + "bucket") + } + + // If the status is InFlight, we cannot safely delete + // the payment information, so we return early. + paymentStatus, err := fetchPaymentStatus(bucket) + if err != nil { + return err + } + + // If the payment has inflight HTLCs, we cannot safely delete + // the payment information, so we return an error. + if err := paymentStatus.removable(); err != nil { + return fmt.Errorf("payment '%v' has inflight HTLCs"+ + "and therefore cannot be deleted: %w", + paymentHash.String(), err) + } + + // Delete the failed HTLC attempts we found. + if failedHtlcsOnly { + toDelete, err := fetchFailedHtlcKeys(bucket) + if err != nil { + return err + } + + htlcsBucket := bucket.NestedReadWriteBucket( + paymentHtlcsBucket, + ) + + for _, htlcID := range toDelete { + err = htlcsBucket.Delete( + htlcBucketKey( + htlcAttemptInfoKey, htlcID, + ), + ) + if err != nil { + return err + } + + err = htlcsBucket.Delete( + htlcBucketKey(htlcFailInfoKey, htlcID), + ) + if err != nil { + return err + } + + err = htlcsBucket.Delete( + htlcBucketKey( + htlcSettleInfoKey, htlcID, + ), + ) + if err != nil { + return err + } + } + + return nil + } + + seqNrs, err := fetchSequenceNumbers(bucket) + if err != nil { + return err + } + + err = payments.DeleteNestedBucket(paymentHash[:]) + if err != nil { + return err + } + + indexBucket := tx.ReadWriteBucket(paymentsIndexBucket) + for _, k := range seqNrs { + if err := indexBucket.Delete(k); err != nil { + return err + } + } + + return nil + }, func() {}) +} + +// DeletePayments deletes all completed and failed payments from the DB. If +// failedOnly is set, only failed payments will be considered for deletion. If +// failedHtlcsOnly is set, the payment itself won't be deleted, only failed HTLC +// attempts. The method returns the number of deleted payments, which is always +// 0 if failedHtlcsOnly is set. +func (p *KVStore) DeletePayments(_ context.Context, failedOnly, + failedHtlcsOnly bool) (int, error) { + + var numPayments int + err := kvdb.Update(p.db, func(tx kvdb.RwTx) error { + payments := tx.ReadWriteBucket(paymentsRootBucket) + if payments == nil { + return nil + } + + var ( + // deleteBuckets is the set of payment buckets we need + // to delete. + deleteBuckets [][]byte + + // deleteIndexes is the set of indexes pointing to these + // payments that need to be deleted. + deleteIndexes [][]byte + + // deleteHtlcs maps a payment hash to the HTLC IDs we + // want to delete for that payment. + deleteHtlcs = make(map[lntypes.Hash][][]byte) + ) + err := payments.ForEach(func(k, _ []byte) error { + bucket := payments.NestedReadBucket(k) + if bucket == nil { + // We only expect sub-buckets to be found in + // this top-level bucket. + return fmt.Errorf("non bucket element in " + + "payments bucket") + } + + // If the status is InFlight, we cannot safely delete + // the payment information, so we return early. + paymentStatus, err := fetchPaymentStatus(bucket) + if err != nil { + return err + } + + // If the payment has inflight HTLCs, we cannot safely + // delete the payment information, so we return an nil + // to skip it. + if err := paymentStatus.removable(); err != nil { + return nil + } + + // If we requested to only delete failed payments, we + // can return if this one is not. + if failedOnly && paymentStatus != StatusFailed { + return nil + } + + // If we are only deleting failed HTLCs, fetch them. + if failedHtlcsOnly { + toDelete, err := fetchFailedHtlcKeys(bucket) + if err != nil { + return err + } + + hash, err := lntypes.MakeHash(k) + if err != nil { + return err + } + + deleteHtlcs[hash] = toDelete + + // We return, we are only deleting attempts. + return nil + } + + // Add the bucket to the set of buckets we can delete. + deleteBuckets = append(deleteBuckets, k) + + // Get all the sequence number associated with the + // payment, including duplicates. + seqNrs, err := fetchSequenceNumbers(bucket) + if err != nil { + return err + } + + deleteIndexes = append(deleteIndexes, seqNrs...) + numPayments++ + + return nil + }) + if err != nil { + return err + } + + // Delete the failed HTLC attempts we found. + for hash, htlcIDs := range deleteHtlcs { + bucket := payments.NestedReadWriteBucket(hash[:]) + htlcsBucket := bucket.NestedReadWriteBucket( + paymentHtlcsBucket, + ) + + for _, aid := range htlcIDs { + if err := htlcsBucket.Delete( + htlcBucketKey(htlcAttemptInfoKey, aid), + ); err != nil { + return err + } + + if err := htlcsBucket.Delete( + htlcBucketKey(htlcFailInfoKey, aid), + ); err != nil { + return err + } + + if err := htlcsBucket.Delete( + htlcBucketKey(htlcSettleInfoKey, aid), + ); err != nil { + return err + } + } + } + + for _, k := range deleteBuckets { + if err := payments.DeleteNestedBucket(k); err != nil { + return err + } + } + + // Get our index bucket and delete all indexes pointing to the + // payments we are deleting. + indexBucket := tx.ReadWriteBucket(paymentsIndexBucket) + for _, k := range deleteIndexes { + if err := indexBucket.Delete(k); err != nil { + return err + } + } + + return nil + }, func() { + numPayments = 0 + }) + if err != nil { + return 0, err + } + + return numPayments, nil +} + +// fetchSequenceNumbers fetches all the sequence numbers associated with a +// payment, including those belonging to any duplicate payments. +func fetchSequenceNumbers(paymentBucket kvdb.RBucket) ([][]byte, error) { + seqNum := paymentBucket.Get(paymentSequenceKey) + if seqNum == nil { + return nil, errors.New("expected sequence number") + } + + sequenceNumbers := [][]byte{seqNum} + + // Get the duplicate payments bucket, if it has no duplicates, just + // return early with the payment sequence number. + duplicates := paymentBucket.NestedReadBucket(duplicatePaymentsBucket) + if duplicates == nil { + return sequenceNumbers, nil + } + + // If we do have duplicated, they are keyed by sequence number, so we + // iterate through the duplicates bucket and add them to our set of + // sequence numbers. + if err := duplicates.ForEach(func(k, v []byte) error { + sequenceNumbers = append(sequenceNumbers, k) + return nil + }); err != nil { + return nil, err + } + + return sequenceNumbers, nil +} + +func serializePaymentCreationInfo(w io.Writer, c *PaymentCreationInfo) error { + var scratch [8]byte + + if _, err := w.Write(c.PaymentIdentifier[:]); err != nil { + return err + } + + byteOrder.PutUint64(scratch[:], uint64(c.Value)) + if _, err := w.Write(scratch[:]); err != nil { + return err + } + + if err := serializeTime(w, c.CreationTime); err != nil { + return err + } + + byteOrder.PutUint32(scratch[:4], uint32(len(c.PaymentRequest))) + if _, err := w.Write(scratch[:4]); err != nil { + return err + } + + if _, err := w.Write(c.PaymentRequest); err != nil { + return err + } + + // Any remaining bytes are TLV encoded records. Currently, these are + // only the custom records provided by the user to be sent to the first + // hop. But this can easily be extended with further records by merging + // the records into a single TLV stream. + err := c.FirstHopCustomRecords.SerializeTo(w) + if err != nil { + return err + } + + return nil +} + +func deserializePaymentCreationInfo(r io.Reader) (*PaymentCreationInfo, + error) { + + var scratch [8]byte + + c := &PaymentCreationInfo{} + + if _, err := io.ReadFull(r, c.PaymentIdentifier[:]); err != nil { + return nil, err + } + + if _, err := io.ReadFull(r, scratch[:]); err != nil { + return nil, err + } + c.Value = lnwire.MilliSatoshi(byteOrder.Uint64(scratch[:])) + + creationTime, err := deserializeTime(r) + if err != nil { + return nil, err + } + c.CreationTime = creationTime + + if _, err := io.ReadFull(r, scratch[:4]); err != nil { + return nil, err + } + + reqLen := byteOrder.Uint32(scratch[:4]) + payReq := make([]byte, reqLen) + if reqLen > 0 { + if _, err := io.ReadFull(r, payReq); err != nil { + return nil, err + } + } + c.PaymentRequest = payReq + + // Any remaining bytes are TLV encoded records. Currently, these are + // only the custom records provided by the user to be sent to the first + // hop. But this can easily be extended with further records by merging + // the records into a single TLV stream. + c.FirstHopCustomRecords, err = lnwire.ParseCustomRecordsFrom(r) + if err != nil { + return nil, err + } + + return c, nil +} + +func serializeHTLCAttemptInfo(w io.Writer, a *HTLCAttemptInfo) error { + if err := WriteElements(w, a.sessionKey); err != nil { + return err + } + + if err := SerializeRoute(w, a.Route); err != nil { + return err + } + + if err := serializeTime(w, a.AttemptTime); err != nil { + return err + } + + // If the hash is nil we can just return. + if a.Hash == nil { + return nil + } + + if _, err := w.Write(a.Hash[:]); err != nil { + return err + } + + // Merge the fixed/known records together with the custom records to + // serialize them as a single blob. We can't do this in SerializeRoute + // because we're in the middle of the byte stream there. We can only do + // TLV serialization at the end of the stream, since EOF is allowed for + // a stream if no more data is expected. + producers := []tlv.RecordProducer{ + &a.Route.FirstHopAmount, + } + tlvData, err := lnwire.MergeAndEncode( + producers, nil, a.Route.FirstHopWireCustomRecords, + ) + if err != nil { + return err + } + + if _, err := w.Write(tlvData); err != nil { + return err + } + + return nil +} + +func deserializeHTLCAttemptInfo(r io.Reader) (*HTLCAttemptInfo, error) { + a := &HTLCAttemptInfo{} + err := ReadElements(r, &a.sessionKey) + if err != nil { + return nil, err + } + + a.Route, err = DeserializeRoute(r) + if err != nil { + return nil, err + } + + a.AttemptTime, err = deserializeTime(r) + if err != nil { + return nil, err + } + + hash := lntypes.Hash{} + _, err = io.ReadFull(r, hash[:]) + + switch { + // Older payment attempts wouldn't have the hash set, in which case we + // can just return. + case errors.Is(err, io.EOF), errors.Is(err, io.ErrUnexpectedEOF): + return a, nil + + case err != nil: + return nil, err + + default: + } + + a.Hash = &hash + + // Read any remaining data (if any) and parse it into the known records + // and custom records. + extraData, err := io.ReadAll(r) + if err != nil { + return nil, err + } + + customRecords, _, _, err := lnwire.ParseAndExtractCustomRecords( + extraData, &a.Route.FirstHopAmount, + ) + if err != nil { + return nil, err + } + + a.Route.FirstHopWireCustomRecords = customRecords + + return a, nil +} + +func serializeHop(w io.Writer, h *route.Hop) error { + if err := WriteElements(w, + h.PubKeyBytes[:], + h.ChannelID, + h.OutgoingTimeLock, + h.AmtToForward, + ); err != nil { + return err + } + + if err := binary.Write(w, byteOrder, h.LegacyPayload); err != nil { + return err + } + + // For legacy payloads, we don't need to write any TLV records, so + // we'll write a zero indicating the our serialized TLV map has no + // records. + if h.LegacyPayload { + return WriteElements(w, uint32(0)) + } + + // Gather all non-primitive TLV records so that they can be serialized + // as a single blob. + // + // TODO(conner): add migration to unify all fields in a single TLV + // blobs. The split approach will cause headaches down the road as more + // fields are added, which we can avoid by having a single TLV stream + // for all payload fields. + var records []tlv.Record + if h.MPP != nil { + records = append(records, h.MPP.Record()) + } + + // Add blinding point and encrypted data if present. + if h.EncryptedData != nil { + records = append(records, record.NewEncryptedDataRecord( + &h.EncryptedData, + )) + } + + if h.BlindingPoint != nil { + records = append(records, record.NewBlindingPointRecord( + &h.BlindingPoint, + )) + } + + if h.AMP != nil { + records = append(records, h.AMP.Record()) + } + + if h.Metadata != nil { + records = append(records, record.NewMetadataRecord(&h.Metadata)) + } + + if h.TotalAmtMsat != 0 { + totalMsatInt := uint64(h.TotalAmtMsat) + records = append( + records, record.NewTotalAmtMsatBlinded(&totalMsatInt), + ) + } + + // Final sanity check to absolutely rule out custom records that are not + // custom and write into the standard range. + if err := h.CustomRecords.Validate(); err != nil { + return err + } + + // Convert custom records to tlv and add to the record list. + // MapToRecords sorts the list, so adding it here will keep the list + // canonical. + tlvRecords := tlv.MapToRecords(h.CustomRecords) + records = append(records, tlvRecords...) + + // Otherwise, we'll transform our slice of records into a map of the + // raw bytes, then serialize them in-line with a length (number of + // elements) prefix. + mapRecords, err := tlv.RecordsToMap(records) + if err != nil { + return err + } + + numRecords := uint32(len(mapRecords)) + if err := WriteElements(w, numRecords); err != nil { + return err + } + + for recordType, rawBytes := range mapRecords { + if err := WriteElements(w, recordType); err != nil { + return err + } + + if err := wire.WriteVarBytes(w, 0, rawBytes); err != nil { + return err + } + } + + return nil +} + +// maxOnionPayloadSize is the largest Sphinx payload possible, so we don't need +// to read/write a TLV stream larger than this. +const maxOnionPayloadSize = 1300 + +func deserializeHop(r io.Reader) (*route.Hop, error) { + h := &route.Hop{} + + var pub []byte + if err := ReadElements(r, &pub); err != nil { + return nil, err + } + copy(h.PubKeyBytes[:], pub) + + if err := ReadElements(r, + &h.ChannelID, &h.OutgoingTimeLock, &h.AmtToForward, + ); err != nil { + return nil, err + } + + // TODO(roasbeef): change field to allow LegacyPayload false to be the + // legacy default? + err := binary.Read(r, byteOrder, &h.LegacyPayload) + if err != nil { + return nil, err + } + + var numElements uint32 + if err := ReadElements(r, &numElements); err != nil { + return nil, err + } + + // If there're no elements, then we can return early. + if numElements == 0 { + return h, nil + } + + tlvMap := make(map[uint64][]byte) + for i := uint32(0); i < numElements; i++ { + var tlvType uint64 + if err := ReadElements(r, &tlvType); err != nil { + return nil, err + } + + rawRecordBytes, err := wire.ReadVarBytes( + r, 0, maxOnionPayloadSize, "tlv", + ) + if err != nil { + return nil, err + } + + tlvMap[tlvType] = rawRecordBytes + } + + // If the MPP type is present, remove it from the generic TLV map and + // parse it back into a proper MPP struct. + // + // TODO(conner): add migration to unify all fields in a single TLV + // blobs. The split approach will cause headaches down the road as more + // fields are added, which we can avoid by having a single TLV stream + // for all payload fields. + mppType := uint64(record.MPPOnionType) + if mppBytes, ok := tlvMap[mppType]; ok { + delete(tlvMap, mppType) + + var ( + mpp = &record.MPP{} + mppRec = mpp.Record() + r = bytes.NewReader(mppBytes) + ) + err := mppRec.Decode(r, uint64(len(mppBytes))) + if err != nil { + return nil, err + } + h.MPP = mpp + } + + // If encrypted data or blinding key are present, remove them from + // the TLV map and parse into proper types. + encryptedDataType := uint64(record.EncryptedDataOnionType) + if data, ok := tlvMap[encryptedDataType]; ok { + delete(tlvMap, encryptedDataType) + h.EncryptedData = data + } + + blindingType := uint64(record.BlindingPointOnionType) + if blindingPoint, ok := tlvMap[blindingType]; ok { + delete(tlvMap, blindingType) + + h.BlindingPoint, err = btcec.ParsePubKey(blindingPoint) + if err != nil { + return nil, fmt.Errorf("invalid blinding point: %w", + err) + } + } + + ampType := uint64(record.AMPOnionType) + if ampBytes, ok := tlvMap[ampType]; ok { + delete(tlvMap, ampType) + + var ( + amp = &record.AMP{} + ampRec = amp.Record() + r = bytes.NewReader(ampBytes) + ) + err := ampRec.Decode(r, uint64(len(ampBytes))) + if err != nil { + return nil, err + } + h.AMP = amp + } + + // If the metadata type is present, remove it from the tlv map and + // populate directly on the hop. + metadataType := uint64(record.MetadataOnionType) + if metadata, ok := tlvMap[metadataType]; ok { + delete(tlvMap, metadataType) + + h.Metadata = metadata + } + + totalAmtMsatType := uint64(record.TotalAmtMsatBlindedType) + if totalAmtMsat, ok := tlvMap[totalAmtMsatType]; ok { + delete(tlvMap, totalAmtMsatType) + + var ( + totalAmtMsatInt uint64 + buf [8]byte + ) + if err := tlv.DTUint64( + bytes.NewReader(totalAmtMsat), + &totalAmtMsatInt, + &buf, + uint64(len(totalAmtMsat)), + ); err != nil { + return nil, err + } + + h.TotalAmtMsat = lnwire.MilliSatoshi(totalAmtMsatInt) + } + + h.CustomRecords = tlvMap + + return h, nil +} + +// SerializeRoute serializes a route. +func SerializeRoute(w io.Writer, r route.Route) error { + if err := WriteElements(w, + r.TotalTimeLock, r.TotalAmount, r.SourcePubKey[:], + ); err != nil { + return err + } + + if err := WriteElements(w, uint32(len(r.Hops))); err != nil { + return err + } + + for _, h := range r.Hops { + if err := serializeHop(w, h); err != nil { + return err + } + } + + // Any new/extra TLV data is encoded in serializeHTLCAttemptInfo! + + return nil +} + +// DeserializeRoute deserializes a route. +func DeserializeRoute(r io.Reader) (route.Route, error) { + rt := route.Route{} + if err := ReadElements(r, + &rt.TotalTimeLock, &rt.TotalAmount, + ); err != nil { + return rt, err + } + + var pub []byte + if err := ReadElements(r, &pub); err != nil { + return rt, err + } + copy(rt.SourcePubKey[:], pub) + + var numHops uint32 + if err := ReadElements(r, &numHops); err != nil { + return rt, err + } + + var hops []*route.Hop + for i := uint32(0); i < numHops; i++ { + hop, err := deserializeHop(r) + if err != nil { + return rt, err + } + hops = append(hops, hop) + } + rt.Hops = hops + + // Any new/extra TLV data is decoded in deserializeHTLCAttemptInfo! + + return rt, nil +} + +// serializeHTLCSettleInfo serializes the details of a settled htlc. +func serializeHTLCSettleInfo(w io.Writer, s *HTLCSettleInfo) error { + if _, err := w.Write(s.Preimage[:]); err != nil { + return err + } + + if err := serializeTime(w, s.SettleTime); err != nil { + return err + } + + return nil +} + +// deserializeHTLCSettleInfo deserializes the details of a settled htlc. +func deserializeHTLCSettleInfo(r io.Reader) (*HTLCSettleInfo, error) { + s := &HTLCSettleInfo{} + if _, err := io.ReadFull(r, s.Preimage[:]); err != nil { + return nil, err + } + + var err error + s.SettleTime, err = deserializeTime(r) + if err != nil { + return nil, err + } + + return s, nil +} + +// serializeHTLCFailInfo serializes the details of a failed htlc including the +// wire failure. +func serializeHTLCFailInfo(w io.Writer, f *HTLCFailInfo) error { + if err := serializeTime(w, f.FailTime); err != nil { + return err + } + + // Write failure. If there is no failure message, write an empty + // byte slice. + var messageBytes bytes.Buffer + if f.Message != nil { + err := lnwire.EncodeFailureMessage(&messageBytes, f.Message, 0) + if err != nil { + return err + } + } + if err := wire.WriteVarBytes(w, 0, messageBytes.Bytes()); err != nil { + return err + } + + return WriteElements(w, byte(f.Reason), f.FailureSourceIndex) +} + +// deserializeHTLCFailInfo deserializes the details of a failed htlc including +// the wire failure. +func deserializeHTLCFailInfo(r io.Reader) (*HTLCFailInfo, error) { + f := &HTLCFailInfo{} + var err error + f.FailTime, err = deserializeTime(r) + if err != nil { + return nil, err + } + + // Read failure. + failureBytes, err := wire.ReadVarBytes( + r, 0, math.MaxUint16, "failure", + ) + if err != nil { + return nil, err + } + if len(failureBytes) > 0 { + f.Message, err = lnwire.DecodeFailureMessage( + bytes.NewReader(failureBytes), 0, + ) + if err != nil && + !errors.Is(err, lnwire.ErrParsingExtraTLVBytes) { + + return nil, err + } + + // In case we have an invalid TLV stream regarding the extra + // tlv data we still continue with the decoding of the + // HTLCFailInfo. + if errors.Is(err, lnwire.ErrParsingExtraTLVBytes) { + log.Warnf("Failed to decode extra TLV bytes for "+ + "failure message: %v", err) + } + } + + var reason byte + err = ReadElements(r, &reason, &f.FailureSourceIndex) + if err != nil { + return nil, err + } + f.Reason = HTLCFailReason(reason) + + return f, nil +} diff --git a/payments/db/migration1/log.go b/payments/db/migration1/log.go new file mode 100644 index 00000000000..52f1f750589 --- /dev/null +++ b/payments/db/migration1/log.go @@ -0,0 +1,32 @@ +package migration1 + +import ( + "github.com/btcsuite/btclog/v2" + "github.com/lightningnetwork/lnd/build" +) + +// log is a logger that is initialized with no output filters. This +// means the package will not perform any logging by default until the caller +// requests it. +var log btclog.Logger + +// Subsystem defines the logging identifier for this subsystem. +const Subsystem = "PYDB" + +// The default amount of logging is none. +func init() { + UseLogger(build.NewSubLogger(Subsystem, nil)) +} + +// DisableLog disables all library log output. Logging output is disabled +// by default until UseLogger is called. +func DisableLog() { + UseLogger(btclog.Disabled) +} + +// UseLogger uses a specified Logger to output package logging info. +// This should be used in preference to SetLogWriter if the caller is also +// using btclog. +func UseLogger(logger btclog.Logger) { + log = logger +} diff --git a/payments/db/migration1/options.go b/payments/db/migration1/options.go new file mode 100644 index 00000000000..382afb26c02 --- /dev/null +++ b/payments/db/migration1/options.go @@ -0,0 +1,26 @@ +package migration1 + +// StoreOptions holds parameters for the KVStore. +type StoreOptions struct { + // NoMigration allows to open the database in readonly mode + NoMigration bool +} + +// DefaultOptions returns a StoreOptions populated with default values. +func DefaultOptions() *StoreOptions { + return &StoreOptions{ + NoMigration: false, + } +} + +// OptionModifier is a function signature for modifying the default +// StoreOptions. +type OptionModifier func(*StoreOptions) + +// WithNoMigration allows the database to be opened in read only mode by +// disabling migrations. +func WithNoMigration(b bool) OptionModifier { + return func(o *StoreOptions) { + o.NoMigration = b + } +} diff --git a/payments/db/migration1/payment.go b/payments/db/migration1/payment.go new file mode 100644 index 00000000000..76d22154131 --- /dev/null +++ b/payments/db/migration1/payment.go @@ -0,0 +1,836 @@ +package migration1 + +import ( + "bytes" + "errors" + "fmt" + "time" + + "github.com/btcsuite/btcd/btcec/v2" + "github.com/davecgh/go-spew/spew" + sphinx "github.com/lightningnetwork/lightning-onion" + "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/lnutils" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/routing/route" +) + +// FailureReason encodes the reason a payment ultimately failed. +type FailureReason byte + +const ( + // FailureReasonTimeout indicates that the payment did timeout before a + // successful payment attempt was made. + FailureReasonTimeout FailureReason = 0 + + // FailureReasonNoRoute indicates no successful route to the + // destination was found during path finding. + FailureReasonNoRoute FailureReason = 1 + + // FailureReasonError indicates that an unexpected error happened during + // payment. + FailureReasonError FailureReason = 2 + + // FailureReasonPaymentDetails indicates that either the hash is unknown + // or the final cltv delta or amount is incorrect. + FailureReasonPaymentDetails FailureReason = 3 + + // FailureReasonInsufficientBalance indicates that we didn't have enough + // balance to complete the payment. + FailureReasonInsufficientBalance FailureReason = 4 + + // FailureReasonCanceled indicates that the payment was canceled by the + // user. + FailureReasonCanceled FailureReason = 5 + + // TODO(joostjager): Add failure reasons for: + // LocalLiquidityInsufficient, RemoteCapacityInsufficient. +) + +// Error returns a human-readable error string for the FailureReason. +func (r FailureReason) Error() string { + return r.String() +} + +// String returns a human-readable FailureReason. +func (r FailureReason) String() string { + switch r { + case FailureReasonTimeout: + return "timeout" + case FailureReasonNoRoute: + return "no_route" + case FailureReasonError: + return "error" + case FailureReasonPaymentDetails: + return "incorrect_payment_details" + case FailureReasonInsufficientBalance: + return "insufficient_balance" + case FailureReasonCanceled: + return "canceled" + } + + return "unknown" +} + +// PaymentCreationInfo is the information necessary to have ready when +// initiating a payment, moving it into state InFlight. +type PaymentCreationInfo struct { + // PaymentIdentifier is the hash this payment is paying to in case of + // non-AMP payments, and the SetID for AMP payments. + PaymentIdentifier lntypes.Hash + + // Value is the amount we are paying. + Value lnwire.MilliSatoshi + + // CreationTime is the time when this payment was initiated. + CreationTime time.Time + + // PaymentRequest is the full payment request, if any. + PaymentRequest []byte + + // FirstHopCustomRecords are the TLV records that are to be sent to the + // first hop of this payment. These records will be transmitted via the + // wire message (UpdateAddHTLC) only and therefore do not affect the + // onion payload size. + FirstHopCustomRecords lnwire.CustomRecords +} + +// String returns a human-readable description of the payment creation info. +func (p *PaymentCreationInfo) String() string { + return fmt.Sprintf("payment_id=%v, amount=%v, created_at=%v", + p.PaymentIdentifier, p.Value, p.CreationTime) +} + +// HTLCAttemptInfo contains static information about a specific HTLC attempt +// for a payment. This information is used by the router to handle any errors +// coming back after an attempt is made, and to query the switch about the +// status of the attempt. +type HTLCAttemptInfo struct { + // AttemptID is the unique ID used for this attempt. + AttemptID uint64 + + // sessionKey is the raw bytes ephemeral key used for this attempt. + // These bytes are lazily read off disk to save ourselves the expensive + // EC operations used by btcec.PrivKeyFromBytes. + sessionKey [btcec.PrivKeyBytesLen]byte + + // cachedSessionKey is our fully deserialized sesionKey. This value + // may be nil if the attempt has just been read from disk and its + // session key has not been used yet. + cachedSessionKey *btcec.PrivateKey + + // Route is the route attempted to send the HTLC. + Route route.Route + + // AttemptTime is the time at which this HTLC was attempted. + AttemptTime time.Time + + // Hash is the hash used for this single HTLC attempt. For AMP payments + // this will differ across attempts, for non-AMP payments each attempt + // will use the same hash. This can be nil for older payment attempts, + // in which the payment's PaymentHash in the PaymentCreationInfo should + // be used. + Hash *lntypes.Hash + + // onionBlob is the cached value for onion blob created from the sphinx + // construction. + onionBlob [lnwire.OnionPacketSize]byte + + // circuit is the cached value for sphinx circuit. + circuit *sphinx.Circuit +} + +// NewHtlcAttempt creates a htlc attempt. +func NewHtlcAttempt(attemptID uint64, sessionKey *btcec.PrivateKey, + route route.Route, attemptTime time.Time, + hash *lntypes.Hash) (*HTLCAttempt, error) { + + var scratch [btcec.PrivKeyBytesLen]byte + copy(scratch[:], sessionKey.Serialize()) + + info := HTLCAttemptInfo{ + AttemptID: attemptID, + sessionKey: scratch, + cachedSessionKey: sessionKey, + Route: route, + AttemptTime: attemptTime, + Hash: hash, + } + + if err := info.attachOnionBlobAndCircuit(); err != nil { + return nil, err + } + + return &HTLCAttempt{HTLCAttemptInfo: info}, nil +} + +// SessionKey returns the ephemeral key used for a htlc attempt. This function +// performs expensive ec-ops to obtain the session key if it is not cached. +func (h *HTLCAttemptInfo) SessionKey() *btcec.PrivateKey { + if h.cachedSessionKey == nil { + h.cachedSessionKey, _ = btcec.PrivKeyFromBytes( + h.sessionKey[:], + ) + } + + return h.cachedSessionKey +} + +// setSessionKey sets the session key for the htlc attempt. +// +// NOTE: Only used for testing. +// +//nolint:unused +func (h *HTLCAttemptInfo) setSessionKey(sessionKey *btcec.PrivateKey) { + h.cachedSessionKey = sessionKey + + // Also set the session key as a raw bytes. + var scratch [btcec.PrivKeyBytesLen]byte + copy(scratch[:], sessionKey.Serialize()) + h.sessionKey = scratch +} + +// OnionBlob returns the onion blob created from the sphinx construction. +func (h *HTLCAttemptInfo) OnionBlob() ([lnwire.OnionPacketSize]byte, error) { + var zeroBytes [lnwire.OnionPacketSize]byte + if h.onionBlob == zeroBytes { + if err := h.attachOnionBlobAndCircuit(); err != nil { + return zeroBytes, err + } + } + + return h.onionBlob, nil +} + +// Circuit returns the sphinx circuit for this attempt. +func (h *HTLCAttemptInfo) Circuit() (*sphinx.Circuit, error) { + if h.circuit == nil { + if err := h.attachOnionBlobAndCircuit(); err != nil { + return nil, err + } + } + + return h.circuit, nil +} + +// attachOnionBlobAndCircuit creates a sphinx packet and caches the onion blob +// and circuit for this attempt. +func (h *HTLCAttemptInfo) attachOnionBlobAndCircuit() error { + onionBlob, circuit, err := generateSphinxPacket( + &h.Route, h.Hash[:], h.SessionKey(), + ) + if err != nil { + return err + } + + copy(h.onionBlob[:], onionBlob) + h.circuit = circuit + + return nil +} + +// HTLCAttempt contains information about a specific HTLC attempt for a given +// payment. It contains the HTLCAttemptInfo used to send the HTLC, as well +// as a timestamp and any known outcome of the attempt. +type HTLCAttempt struct { + HTLCAttemptInfo + + // Settle is the preimage of a successful payment. This serves as a + // proof of payment. It will only be non-nil for settled payments. + // + // NOTE: Can be nil if payment is not settled. + Settle *HTLCSettleInfo + + // Fail is a failure reason code indicating the reason the payment + // failed. It is only non-nil for failed payments. + // + // NOTE: Can be nil if payment is not failed. + Failure *HTLCFailInfo +} + +// HTLCSettleInfo encapsulates the information that augments an HTLCAttempt in +// the event that the HTLC is successful. +type HTLCSettleInfo struct { + // Preimage is the preimage of a successful HTLC. This serves as a proof + // of payment. + Preimage lntypes.Preimage + + // SettleTime is the time at which this HTLC was settled. + SettleTime time.Time +} + +// HTLCFailReason is the reason an htlc failed. +type HTLCFailReason byte + +const ( + // HTLCFailUnknown is recorded for htlcs that failed with an unknown + // reason. + HTLCFailUnknown HTLCFailReason = 0 + + // HTLCFailUnreadable is recorded for htlcs that had a failure message + // that couldn't be decrypted. + HTLCFailUnreadable HTLCFailReason = 1 + + // HTLCFailInternal is recorded for htlcs that failed because of an + // internal error. + HTLCFailInternal HTLCFailReason = 2 + + // HTLCFailMessage is recorded for htlcs that failed with a network + // failure message. + HTLCFailMessage HTLCFailReason = 3 +) + +// HTLCFailInfo encapsulates the information that augments an HTLCAttempt in the +// event that the HTLC fails. +type HTLCFailInfo struct { + // FailTime is the time at which this HTLC was failed. + FailTime time.Time + + // Message is the wire message that failed this HTLC. This field will be + // populated when the failure reason is HTLCFailMessage. + Message lnwire.FailureMessage + + // Reason is the failure reason for this HTLC. + Reason HTLCFailReason + + // The position in the path of the intermediate or final node that + // generated the failure message. Position zero is the sender node. This + // field will be populated when the failure reason is either + // HTLCFailMessage or HTLCFailUnknown. + FailureSourceIndex uint32 +} + +// MPPaymentState wraps a series of info needed for a given payment, which is +// used by both MPP and AMP. This is a memory representation of the payment's +// current state and is updated whenever the payment is read from disk. +type MPPaymentState struct { + // NumAttemptsInFlight specifies the number of HTLCs the payment is + // waiting results for. + NumAttemptsInFlight int + + // RemainingAmt specifies how much more money to be sent. + RemainingAmt lnwire.MilliSatoshi + + // FeesPaid specifies the total fees paid so far that can be used to + // calculate remaining fee budget. + FeesPaid lnwire.MilliSatoshi + + // HasSettledHTLC is true if at least one of the payment's HTLCs is + // settled. + HasSettledHTLC bool + + // PaymentFailed is true if the payment has been marked as failed with + // a reason. + PaymentFailed bool +} + +// MPPayment is a wrapper around a payment's PaymentCreationInfo and +// HTLCAttempts. All payments will have the PaymentCreationInfo set, any +// HTLCs made in attempts to be completed will populated in the HTLCs slice. +// Each populated HTLCAttempt represents an attempted HTLC, each of which may +// have the associated Settle or Fail struct populated if the HTLC is no longer +// in-flight. +type MPPayment struct { + // SequenceNum is a unique identifier used to sort the payments in + // order of creation. + SequenceNum uint64 + + // Info holds all static information about this payment, and is + // populated when the payment is initiated. + Info *PaymentCreationInfo + + // HTLCs holds the information about individual HTLCs that we send in + // order to make the payment. + HTLCs []HTLCAttempt + + // FailureReason is the failure reason code indicating the reason the + // payment failed. + // + // NOTE: Will only be set once the daemon has given up on the payment + // altogether. + FailureReason *FailureReason + + // Status is the current PaymentStatus of this payment. + Status PaymentStatus + + // State is the current state of the payment that holds a number of key + // insights and is used to determine what to do on each payment loop + // iteration. + State *MPPaymentState +} + +// Terminated returns a bool to specify whether the payment is in a terminal +// state. +func (m *MPPayment) Terminated() bool { + // If the payment is in terminal state, it cannot be updated. + return m.Status.updatable() != nil +} + +// TerminalInfo returns any HTLC settle info recorded. If no settle info is +// recorded, any payment level failure will be returned. If neither a settle +// nor a failure is recorded, both return values will be nil. +func (m *MPPayment) TerminalInfo() (*HTLCAttempt, *FailureReason) { + for _, h := range m.HTLCs { + if h.Settle != nil { + return &h, nil + } + } + + return nil, m.FailureReason +} + +// SentAmt returns the sum of sent amount and fees for HTLCs that are either +// settled or still in flight. +func (m *MPPayment) SentAmt() (lnwire.MilliSatoshi, lnwire.MilliSatoshi) { + var sent, fees lnwire.MilliSatoshi + for _, h := range m.HTLCs { + if h.Failure != nil { + continue + } + + // The attempt was not failed, meaning the amount was + // potentially sent to the receiver. + sent += h.Route.ReceiverAmt() + fees += h.Route.TotalFees() + } + + return sent, fees +} + +// InFlightHTLCs returns the HTLCs that are still in-flight, meaning they have +// not been settled or failed. +func (m *MPPayment) InFlightHTLCs() []HTLCAttempt { + var inflights []HTLCAttempt + for _, h := range m.HTLCs { + if h.Settle != nil || h.Failure != nil { + continue + } + + inflights = append(inflights, h) + } + + return inflights +} + +// GetAttempt returns the specified htlc attempt on the payment. +func (m *MPPayment) GetAttempt(id uint64) (*HTLCAttempt, error) { + // TODO(yy): iteration can be slow, make it into a tree or use BS. + for _, htlc := range m.HTLCs { + htlc := htlc + if htlc.AttemptID == id { + return &htlc, nil + } + } + + return nil, errors.New("htlc attempt not found on payment") +} + +// Registrable returns an error to specify whether adding more HTLCs to the +// payment with its current status is allowed. A payment can accept new HTLC +// registrations when it's newly created, or none of its HTLCs is in a terminal +// state. +func (m *MPPayment) Registrable() error { + // If updating the payment is not allowed, we can't register new HTLCs. + // Otherwise, the status must be either `StatusInitiated` or + // `StatusInFlight`. + if err := m.Status.updatable(); err != nil { + return err + } + + // Exit early if this is not inflight. + if m.Status != StatusInFlight { + return nil + } + + // There are still inflight HTLCs and we need to check whether there + // are settled HTLCs or the payment is failed. If we already have + // settled HTLCs, we won't allow adding more HTLCs. + if m.State.HasSettledHTLC { + return ErrPaymentPendingSettled + } + + // If the payment is already failed, we won't allow adding more HTLCs. + if m.State.PaymentFailed { + return ErrPaymentPendingFailed + } + + // Otherwise we can add more HTLCs. + return nil +} + +// setState creates and attaches a new MPPaymentState to the payment. It also +// updates the payment's status based on its current state. +func (m *MPPayment) setState() error { + // Fetch the total amount and fees that has already been sent in + // settled and still in-flight shards. + sentAmt, fees := m.SentAmt() + + // Sanity check we haven't sent a value larger than the payment amount. + totalAmt := m.Info.Value + if sentAmt > totalAmt { + return fmt.Errorf("%w: sent=%v, total=%v", + ErrSentExceedsTotal, sentAmt, totalAmt) + } + + // Get any terminal info for this payment. + settle, failure := m.TerminalInfo() + + // Now determine the payment's status. + status, err := decidePaymentStatus(m.HTLCs, m.FailureReason) + if err != nil { + return err + } + + // Update the payment state and status. + m.State = &MPPaymentState{ + NumAttemptsInFlight: len(m.InFlightHTLCs()), + RemainingAmt: totalAmt - sentAmt, + FeesPaid: fees, + HasSettledHTLC: settle != nil, + PaymentFailed: failure != nil, + } + m.Status = status + + return nil +} + +// SetState calls the internal method setState. This is a temporary method +// to be used by the tests in routing. Once the tests are updated to use mocks, +// this method can be removed. +// +// TODO(yy): delete. +func (m *MPPayment) SetState() error { + return m.setState() +} + +// NeedWaitAttempts decides whether we need to hold creating more HTLC attempts +// and wait for the results of the payment's inflight HTLCs. Return an error if +// the payment is in an unexpected state. +func (m *MPPayment) NeedWaitAttempts() (bool, error) { + // Check when the remainingAmt is not zero, which means we have more + // money to be sent. + if m.State.RemainingAmt != 0 { + switch m.Status { + // If the payment is newly created, no need to wait for HTLC + // results. + case StatusInitiated: + return false, nil + + // If we have inflight HTLCs, we'll check if we have terminal + // states to decide if we need to wait. + case StatusInFlight: + // We still have money to send, and one of the HTLCs is + // settled. We'd stop sending money and wait for all + // inflight HTLC attempts to finish. + if m.State.HasSettledHTLC { + log.Warnf("payment=%v has remaining amount "+ + "%v, yet at least one of its HTLCs is "+ + "settled", m.Info.PaymentIdentifier, + m.State.RemainingAmt) + + return true, nil + } + + // The payment has a failure reason though we still + // have money to send, we'd stop sending money and wait + // for all inflight HTLC attempts to finish. + if m.State.PaymentFailed { + return true, nil + } + + // Otherwise we don't need to wait for inflight HTLCs + // since we still have money to be sent. + return false, nil + + // We need to send more money, yet the payment is already + // succeeded. Return an error in this case as the receiver is + // violating the protocol. + case StatusSucceeded: + return false, fmt.Errorf("%w: parts of the payment "+ + "already succeeded but still have remaining "+ + "amount %v", ErrPaymentInternal, + m.State.RemainingAmt) + + // The payment is failed and we have no inflight HTLCs, no need + // to wait. + case StatusFailed: + return false, nil + + // Unknown payment status. + default: + return false, fmt.Errorf("%w: %s", + ErrUnknownPaymentStatus, m.Status) + } + } + + // Now we determine whether we need to wait when the remainingAmt is + // already zero. + switch m.Status { + // When the payment is newly created, yet the payment has no remaining + // amount, return an error. + case StatusInitiated: + return false, fmt.Errorf("%w: %v", + ErrPaymentInternal, m.Status) + + // If the payment is inflight, we must wait. + // + // NOTE: an edge case is when all HTLCs are failed while the payment is + // not failed we'd still be in this inflight state. However, since the + // remainingAmt is zero here, it means we cannot be in that state as + // otherwise the remainingAmt would not be zero. + case StatusInFlight: + return true, nil + + // If the payment is already succeeded, no need to wait. + case StatusSucceeded: + return false, nil + + // If the payment is already failed, yet the remaining amount is zero, + // return an error as this indicates an error state. We will only each + // this status when there are no inflight HTLCs and the payment is + // marked as failed with a reason, which means the remainingAmt must + // not be zero because our sentAmt is zero. + case StatusFailed: + return false, fmt.Errorf("%w: %v", + ErrPaymentInternal, m.Status) + + // Unknown payment status. + default: + return false, fmt.Errorf("%w: %s", + ErrUnknownPaymentStatus, m.Status) + } +} + +// GetState returns the internal state of the payment. +func (m *MPPayment) GetState() *MPPaymentState { + return m.State +} + +// GetStatus returns the current status of the payment. +func (m *MPPayment) GetStatus() PaymentStatus { + return m.Status +} + +// GetHTLCs returns all the HTLCs for this payment. +func (m *MPPayment) GetHTLCs() []HTLCAttempt { + return m.HTLCs +} + +// AllowMoreAttempts is used to decide whether we can safely attempt more HTLCs +// for a given payment state. Return an error if the payment is in an +// unexpected state. +func (m *MPPayment) AllowMoreAttempts() (bool, error) { + // Now check whether the remainingAmt is zero or not. If we don't have + // any remainingAmt, no more HTLCs should be made. + if m.State.RemainingAmt == 0 { + // If the payment is newly created, yet we don't have any + // remainingAmt, return an error. + if m.Status == StatusInitiated { + return false, fmt.Errorf("%w: initiated payment has "+ + "zero remainingAmt", + ErrPaymentInternal) + } + + // Otherwise, exit early since all other statuses with zero + // remainingAmt indicate no more HTLCs can be made. + return false, nil + } + + // Otherwise, the remaining amount is not zero, we now decide whether + // to make more attempts based on the payment's current status. + // + // If at least one of the payment's attempts is settled, yet we haven't + // sent all the amount, it indicates something is wrong with the peer + // as the preimage is received. In this case, return an error state. + if m.Status == StatusSucceeded { + return false, fmt.Errorf("%w: payment already succeeded but "+ + "still have remaining amount %v", + ErrPaymentInternal, m.State.RemainingAmt) + } + + // Now check if we can register a new HTLC. + err := m.Registrable() + if err != nil { + log.Warnf("Payment(%v): cannot register HTLC attempt: %v, "+ + "current status: %s", m.Info.PaymentIdentifier, + err, m.Status) + + return false, nil + } + + // Now we know we can register new HTLCs. + return true, nil +} + +// generateSphinxPacket generates then encodes a sphinx packet which encodes +// the onion route specified by the passed layer 3 route. The blob returned +// from this function can immediately be included within an HTLC add packet to +// be sent to the first hop within the route. +func generateSphinxPacket(rt *route.Route, paymentHash []byte, + sessionKey *btcec.PrivateKey) ([]byte, *sphinx.Circuit, error) { + + // Now that we know we have an actual route, we'll map the route into a + // sphinx payment path which includes per-hop payloads for each hop + // that give each node within the route the necessary information + // (fees, CLTV value, etc.) to properly forward the payment. + sphinxPath, err := rt.ToSphinxPath() + if err != nil { + return nil, nil, err + } + + log.Tracef("Constructed per-hop payloads for payment_hash=%x: %v", + paymentHash, lnutils.NewLogClosure(func() string { + path := make( + []sphinx.OnionHop, sphinxPath.TrueRouteLength(), + ) + for i := range path { + hopCopy := sphinxPath[i] + path[i] = hopCopy + } + + return spew.Sdump(path) + }), + ) + + // Next generate the onion routing packet which allows us to perform + // privacy preserving source routing across the network. + sphinxPacket, err := sphinx.NewOnionPacket( + sphinxPath, sessionKey, paymentHash, + sphinx.DeterministicPacketFiller, + ) + if err != nil { + return nil, nil, err + } + + // Finally, encode Sphinx packet using its wire representation to be + // included within the HTLC add packet. + var onionBlob bytes.Buffer + if err := sphinxPacket.Encode(&onionBlob); err != nil { + return nil, nil, err + } + + log.Tracef("Generated sphinx packet: %v", + lnutils.NewLogClosure(func() string { + // We make a copy of the ephemeral key and unset the + // internal curve here in order to keep the logs from + // getting noisy. + key := *sphinxPacket.EphemeralKey + packetCopy := *sphinxPacket + packetCopy.EphemeralKey = &key + + return spew.Sdump(packetCopy) + }), + ) + + return onionBlob.Bytes(), &sphinx.Circuit{ + SessionKey: sessionKey, + PaymentPath: sphinxPath.NodeKeys(), + }, nil +} + +// verifyAttempt validates that a new HTLC attempt is compatible with the +// existing payment and its in-flight HTLCs. This function checks: +// 1. MPP (Multi-Path Payment) compatibility between attempts +// 2. Blinded payment consistency +// 3. Amount validation +// 4. Total payment amount limits +func verifyAttempt(payment *MPPayment, attempt *HTLCAttemptInfo) error { + // If the final hop has encrypted data, then we know this is a + // blinded payment. In blinded payments, MPP records are not set + // for split payments and the recipient is responsible for using + // a consistent PathID across the various encrypted data + // payloads that we received from them for this payment. All we + // need to check is that the total amount field for each HTLC + // in the split payment is correct. + isBlinded := len(attempt.Route.FinalHop().EncryptedData) != 0 + + // For blinded payments, the last hop must set the total amount. + if isBlinded { + if attempt.Route.FinalHop().TotalAmtMsat == 0 { + return ErrBlindedPaymentMissingTotalAmount + } + } + + // Make sure any existing shards match the new one with regards + // to MPP options. + mpp := attempt.Route.FinalHop().MPP + + // MPP records should not be set for attempts to blinded paths. + if isBlinded && mpp != nil { + return ErrMPPRecordInBlindedPayment + } + + for _, h := range payment.InFlightHTLCs() { + hMpp := h.Route.FinalHop().MPP + hBlinded := len(h.Route.FinalHop().EncryptedData) != 0 + + // If this is a blinded payment, then no existing HTLCs + // should have MPP records. + if isBlinded && hMpp != nil { + return ErrMPPRecordInBlindedPayment + } + + // If the payment is blinded (previous attempts used blinded + // paths) and the attempt is not, or vice versa, return an + // error. + if isBlinded != hBlinded { + return ErrMixedBlindedAndNonBlindedPayments + } + + // If this is a blinded payment, then we just need to + // check that the TotalAmtMsat field for this shard + // is equal to that of any other shard in the same + // payment. + if isBlinded { + if attempt.Route.FinalHop().TotalAmtMsat != + h.Route.FinalHop().TotalAmtMsat { + + return ErrBlindedPaymentTotalAmountMismatch + } + + continue + } + + switch { + // We tried to register a non-MPP attempt for a MPP + // payment. + case mpp == nil && hMpp != nil: + return ErrMPPayment + + // We tried to register a MPP shard for a non-MPP + // payment. + case mpp != nil && hMpp == nil: + return ErrNonMPPayment + + // Non-MPP payment, nothing more to validate. + case mpp == nil: + continue + } + + // Check that MPP options match. + if mpp.PaymentAddr() != hMpp.PaymentAddr() { + return ErrMPPPaymentAddrMismatch + } + + if mpp.TotalMsat() != hMpp.TotalMsat() { + return ErrMPPTotalAmountMismatch + } + } + + // If this is a non-MPP attempt, it must match the total amount + // exactly. Note that a blinded payment is considered an MPP + // attempt. + amt := attempt.Route.ReceiverAmt() + if !isBlinded && mpp == nil && amt != payment.Info.Value { + return ErrValueMismatch + } + + // Ensure we aren't sending more than the total payment amount. + sentAmt, _ := payment.SentAmt() + if sentAmt+amt > payment.Info.Value { + return fmt.Errorf("%w: attempted=%v, payment amount=%v", + ErrValueExceedsAmt, sentAmt+amt, payment.Info.Value) + } + + return nil +} diff --git a/payments/db/migration1/payment_status.go b/payments/db/migration1/payment_status.go new file mode 100644 index 00000000000..16c4b90fba2 --- /dev/null +++ b/payments/db/migration1/payment_status.go @@ -0,0 +1,257 @@ +package migration1 + +import ( + "fmt" +) + +// PaymentStatus represent current status of payment. +type PaymentStatus byte + +const ( + // NOTE: PaymentStatus = 0 was previously used for status unknown and + // is now deprecated. + + // StatusInitiated is the status where a payment has just been + // initiated. + StatusInitiated PaymentStatus = 1 + + // StatusInFlight is the status where a payment has been initiated, but + // a response has not been received. + StatusInFlight PaymentStatus = 2 + + // StatusSucceeded is the status where a payment has been initiated and + // the payment was completed successfully. + StatusSucceeded PaymentStatus = 3 + + // StatusFailed is the status where a payment has been initiated and a + // failure result has come back. + StatusFailed PaymentStatus = 4 +) + +// errPaymentStatusUnknown is returned when a payment has an unknown status. +var errPaymentStatusUnknown = fmt.Errorf("unknown payment status") + +// String returns readable representation of payment status. +func (ps PaymentStatus) String() string { + switch ps { + case StatusInitiated: + return "Initiated" + + case StatusInFlight: + return "In Flight" + + case StatusSucceeded: + return "Succeeded" + + case StatusFailed: + return "Failed" + + default: + return "Unknown" + } +} + +// initializable returns an error to specify whether initiating the payment +// with its current status is allowed. A payment can only be initialized if it +// hasn't been created yet or already failed. +func (ps PaymentStatus) initializable() error { + switch ps { + // The payment has been created already. We will disallow creating it + // again in case other goroutines have already been creating HTLCs for + // it. + case StatusInitiated: + return ErrPaymentExists + + // We already have an InFlight payment on the network. We will disallow + // any new payments. + case StatusInFlight: + return ErrPaymentInFlight + + // The payment has been attempted and is succeeded so we won't allow + // creating it again. + case StatusSucceeded: + return ErrAlreadyPaid + + // We allow retrying failed payments. + case StatusFailed: + return nil + + default: + return fmt.Errorf("%w: %v", ErrUnknownPaymentStatus, + ps) + } +} + +// removable returns an error to specify whether deleting the payment with its +// current status is allowed. A payment cannot be safely deleted if it has +// inflight HTLCs. +func (ps PaymentStatus) removable() error { + switch ps { + // The payment has been created but has no HTLCs and can be removed. + case StatusInitiated: + return nil + + // There are still inflight HTLCs and the payment needs to wait for the + // final outcomes. + case StatusInFlight: + return ErrPaymentInFlight + + // The payment has been attempted and is succeeded and is allowed to be + // removed. + case StatusSucceeded: + return nil + + // Failed payments are allowed to be removed. + case StatusFailed: + return nil + + default: + return fmt.Errorf("%w: %v", ErrUnknownPaymentStatus, + ps) + } +} + +// updatable returns an error to specify whether the payment's HTLCs can be +// updated. A payment can update its HTLCs when it has inflight HTLCs. +func (ps PaymentStatus) updatable() error { + switch ps { + // Newly created payments can be updated. + case StatusInitiated: + return nil + + // Inflight payments can be updated. + case StatusInFlight: + return nil + + // If the payment has a terminal condition, we won't allow any updates. + case StatusSucceeded: + return ErrPaymentAlreadySucceeded + + case StatusFailed: + return ErrPaymentAlreadyFailed + + default: + return fmt.Errorf("%w: %v", ErrUnknownPaymentStatus, + ps) + } +} + +// decidePaymentStatus uses the payment's DB state to determine a memory status +// that's used by the payment router to decide following actions. +// Together, we use four variables to determine the payment's status, +// - inflight: whether there are any pending HTLCs. +// - settled: whether any of the HTLCs has been settled. +// - htlc failed: whether any of the HTLCs has been failed. +// - payment failed: whether the payment has been marked as failed. +// +// Based on the above variables, we derive the status using the following +// table, +// | inflight | settled | htlc failed | payment failed | status | +// |:--------:|:-------:|:-----------:|:--------------:|:--------------------:| +// | true | true | true | true | StatusInFlight | +// | true | true | true | false | StatusInFlight | +// | true | true | false | true | StatusInFlight | +// | true | true | false | false | StatusInFlight | +// | true | false | true | true | StatusInFlight | +// | true | false | true | false | StatusInFlight | +// | true | false | false | true | StatusInFlight | +// | true | false | false | false | StatusInFlight | +// | false | true | true | true | StatusSucceeded | +// | false | true | true | false | StatusSucceeded | +// | false | true | false | true | StatusSucceeded | +// | false | true | false | false | StatusSucceeded | +// | false | false | true | true | StatusFailed | +// | false | false | true | false | StatusInFlight | +// | false | false | false | true | StatusFailed | +// | false | false | false | false | StatusInitiated | +// +// When `inflight`, `settled`, `htlc failed`, and `payment failed` are false, +// this indicates the payment is newly created and hasn't made any HTLCs yet. +// When `inflight` and `settled` are false, `htlc failed` is true yet `payment +// failed` is false, this indicates all the payment's HTLCs have occurred a +// temporarily failure and the payment is still in-flight. +func decidePaymentStatus(htlcs []HTLCAttempt, + reason *FailureReason) (PaymentStatus, error) { + + var ( + inflight bool + htlcSettled bool + htlcFailed bool + paymentFailed bool + ) + + // If we have a failure reason, the payment is failed. + if reason != nil { + paymentFailed = true + } + + // Go through all HTLCs for this payment, check whether we have any + // settled HTLC, and any still in-flight. + for _, h := range htlcs { + if h.Failure != nil { + htlcFailed = true + continue + } + + if h.Settle != nil { + htlcSettled = true + continue + } + + // If any of the HTLCs are not failed nor settled, we + // still have inflight HTLCs. + inflight = true + } + + // Use the DB state to determine the status of the payment. + switch { + // If we have inflight HTLCs, no matter we have settled or failed + // HTLCs, or the payment failed, we still consider it inflight so we + // inform upper systems to wait for the results. + case inflight: + return StatusInFlight, nil + + // If we have no in-flight HTLCs, and at least one of the HTLCs is + // settled, the payment succeeded. + // + // NOTE: when reaching this case, paymentFailed could be true, which + // means we have a conflicting state for this payment. We choose to + // mark the payment as succeeded because it's the receiver's + // responsibility to only settle the payment iff all HTLCs are + // received. + case htlcSettled: + return StatusSucceeded, nil + + // If we have no in-flight HTLCs, and the payment failure is set, the + // payment is considered failed. + // + // NOTE: when reaching this case, settled must be false. + case paymentFailed: + return StatusFailed, nil + + // If we have no in-flight HTLCs, yet the payment is NOT failed, it + // means all the HTLCs are failed. In this case we can attempt more + // HTLCs. + // + // NOTE: when reaching this case, both settled and paymentFailed must + // be false. + case htlcFailed: + return StatusInFlight, nil + + // If none of the HTLCs is either settled or failed, and we have no + // inflight HTLCs, this means the payment has no HTLCs created yet. + // + // NOTE: when reaching this case, both settled and paymentFailed must + // be false. + case !htlcFailed: + return StatusInitiated, nil + + // Otherwise an impossible state is reached. + // + // NOTE: we should never end up here. + default: + log.Error("Impossible payment state reached") + return 0, fmt.Errorf("%w: payment is corrupted", + errPaymentStatusUnknown) + } +} diff --git a/payments/db/migration1/query.go b/payments/db/migration1/query.go new file mode 100644 index 00000000000..1fab2fbd9b8 --- /dev/null +++ b/payments/db/migration1/query.go @@ -0,0 +1,75 @@ +package migration1 + +const ( + // DefaultMaxPayments is the default maximum number of payments returned + // in the payments query pagination. + DefaultMaxPayments = 100 +) + +// Query represents a query to the payments database starting or ending +// at a certain offset index. The number of retrieved records can be limited. +type Query struct { + // IndexOffset determines the starting point of the payments query and + // is always exclusive. In normal order, the query starts at the next + // higher (available) index compared to IndexOffset. In reversed order, + // the query ends at the next lower (available) index compared to the + // IndexOffset. In the case of a zero index_offset, the query will start + // with the oldest payment when paginating forwards, or will end with + // the most recent payment when paginating backwards. + IndexOffset uint64 + + // MaxPayments is the maximal number of payments returned in the + // payments query. + MaxPayments uint64 + + // Reversed gives a meaning to the IndexOffset. If reversed is set to + // true, the query will fetch payments with indices lower than the + // IndexOffset, otherwise, it will return payments with indices greater + // than the IndexOffset. + Reversed bool + + // If IncludeIncomplete is true, then return payments that have not yet + // fully completed. This means that pending payments, as well as failed + // payments will show up if this field is set to true. + IncludeIncomplete bool + + // CountTotal indicates that all payments currently present in the + // payment index (complete and incomplete) should be counted. + CountTotal bool + + // CreationDateStart, expressed in Unix seconds, if set, filters out + // all payments with a creation date greater than or equal to it. + CreationDateStart int64 + + // CreationDateEnd, expressed in Unix seconds, if set, filters out all + // payments with a creation date less than or equal to it. + CreationDateEnd int64 +} + +// Response contains the result of a query to the payments database. +// It includes the set of payments that match the query and integers which +// represent the index of the first and last item returned in the series of +// payments. These integers allow callers to resume their query in the event +// that the query's response exceeds the max number of returnable events. +type Response struct { + // Payments is the set of payments returned from the database for the + // Query. + Payments []*MPPayment + + // FirstIndexOffset is the index of the first element in the set of + // returned MPPayments. Callers can use this to resume their query + // in the event that the slice has too many events to fit into a single + // response. The offset can be used to continue reverse pagination. + FirstIndexOffset uint64 + + // LastIndexOffset is the index of the last element in the set of + // returned MPPayments. Callers can use this to resume their query + // in the event that the slice has too many events to fit into a single + // response. The offset can be used to continue forward pagination. + LastIndexOffset uint64 + + // TotalCount represents the total number of payments that are currently + // stored in the payment database. This will only be set if the + // CountTotal field in the query was set to true. + TotalCount uint64 +} diff --git a/payments/db/migration1/sql_converters.go b/payments/db/migration1/sql_converters.go new file mode 100644 index 00000000000..ebd4764d0b6 --- /dev/null +++ b/payments/db/migration1/sql_converters.go @@ -0,0 +1,275 @@ +package migration1 + +import ( + "bytes" + "fmt" + "strconv" + "time" + + "github.com/btcsuite/btcd/btcec/v2" + "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/payments/db/migration1/sqlc" + "github.com/lightningnetwork/lnd/record" + "github.com/lightningnetwork/lnd/routing/route" + "github.com/lightningnetwork/lnd/tlv" +) + +// dbPaymentToCreationInfo converts database payment data to the +// PaymentCreationInfo struct. +func dbPaymentToCreationInfo(paymentIdentifier []byte, amountMsat int64, + createdAt time.Time, intentPayload []byte, + firstHopCustomRecords lnwire.CustomRecords) *PaymentCreationInfo { + + // This is the payment hash for non-AMP payments and the SetID for AMP + // payments. + var identifier lntypes.Hash + copy(identifier[:], paymentIdentifier) + + return &PaymentCreationInfo{ + PaymentIdentifier: identifier, + Value: lnwire.MilliSatoshi(amountMsat), + // The creation time is stored in the database as UTC but here + // we convert it to local time. + CreationTime: createdAt.Local(), + PaymentRequest: intentPayload, + FirstHopCustomRecords: firstHopCustomRecords, + } +} + +// dbAttemptToHTLCAttempt converts a database HTLC attempt to an HTLCAttempt. +func dbAttemptToHTLCAttempt(dbAttempt sqlc.FetchHtlcAttemptsForPaymentsRow, + hops []sqlc.FetchHopsForAttemptsRow, + hopCustomRecords map[int64][]sqlc.PaymentHopCustomRecord, + routeCustomRecords []sqlc.PaymentAttemptFirstHopCustomRecord) ( + *HTLCAttempt, error) { + + // Convert route-level first hop custom records to CustomRecords map. + var firstHopWireCustomRecords lnwire.CustomRecords + if len(routeCustomRecords) > 0 { + firstHopWireCustomRecords = make(lnwire.CustomRecords) + for _, record := range routeCustomRecords { + firstHopWireCustomRecords[uint64(record.Key)] = + record.Value + } + } + + // Build the route from the database data. + route, err := dbDataToRoute( + hops, hopCustomRecords, dbAttempt.FirstHopAmountMsat, + dbAttempt.RouteTotalTimeLock, dbAttempt.RouteTotalAmount, + dbAttempt.RouteSourceKey, firstHopWireCustomRecords, + ) + if err != nil { + return nil, fmt.Errorf("failed to convert to route: %w", + err) + } + + hash, err := lntypes.MakeHash(dbAttempt.PaymentHash) + if err != nil { + return nil, fmt.Errorf("failed to parse payment "+ + "hash: %w", err) + } + + // Create the attempt info. + var sessionKey [32]byte + copy(sessionKey[:], dbAttempt.SessionKey) + + info := HTLCAttemptInfo{ + AttemptID: uint64(dbAttempt.AttemptIndex), + sessionKey: sessionKey, + Route: *route, + AttemptTime: dbAttempt.AttemptTime, + Hash: &hash, + } + + attempt := &HTLCAttempt{ + HTLCAttemptInfo: info, + } + + // If there's no resolution type, the attempt is still in-flight. + // Return early without processing settlement or failure info. + if !dbAttempt.ResolutionType.Valid { + return attempt, nil + } + + // Add settlement info if present. + if HTLCAttemptResolutionType(dbAttempt.ResolutionType.Int32) == + HTLCAttemptResolutionSettled { + + var preimage lntypes.Preimage + copy(preimage[:], dbAttempt.SettlePreimage) + + attempt.Settle = &HTLCSettleInfo{ + Preimage: preimage, + SettleTime: dbAttempt.ResolutionTime.Time, + } + } + + // Add failure info if present. + if HTLCAttemptResolutionType(dbAttempt.ResolutionType.Int32) == + HTLCAttemptResolutionFailed { + + failure := &HTLCFailInfo{ + FailTime: dbAttempt.ResolutionTime.Time, + } + + if dbAttempt.HtlcFailReason.Valid { + failure.Reason = HTLCFailReason( + dbAttempt.HtlcFailReason.Int32, + ) + } + + if dbAttempt.FailureSourceIndex.Valid { + failure.FailureSourceIndex = uint32( + dbAttempt.FailureSourceIndex.Int32, + ) + } + + // Decode the failure message if present. + if len(dbAttempt.FailureMsg) > 0 { + msg, err := lnwire.DecodeFailureMessage( + bytes.NewReader(dbAttempt.FailureMsg), 0, + ) + if err != nil { + return nil, fmt.Errorf("failed to decode "+ + "failure message: %w", err) + } + failure.Message = msg + } + + attempt.Failure = failure + } + + return attempt, nil +} + +// dbDataToRoute converts database route data to a route.Route. +func dbDataToRoute(hops []sqlc.FetchHopsForAttemptsRow, + hopCustomRecords map[int64][]sqlc.PaymentHopCustomRecord, + firstHopAmountMsat int64, totalTimeLock int32, totalAmount int64, + sourceKey []byte, firstHopWireCustomRecords lnwire.CustomRecords) ( + *route.Route, error) { + + if len(hops) == 0 { + return nil, fmt.Errorf("no hops provided") + } + + // Hops are already sorted by hop_index from the SQL query. + routeHops := make([]*route.Hop, len(hops)) + + for i, hop := range hops { + pubKey, err := route.NewVertexFromBytes(hop.PubKey) + if err != nil { + return nil, fmt.Errorf("failed to parse pub key: %w", + err) + } + + var channelID uint64 + if hop.Scid != "" { + // The SCID is stored as a string representation + // of the uint64. + var err error + channelID, err = strconv.ParseUint(hop.Scid, 10, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse "+ + "scid: %w", err) + } + } + + routeHop := &route.Hop{ + PubKeyBytes: pubKey, + ChannelID: channelID, + OutgoingTimeLock: uint32(hop.OutgoingTimeLock), + AmtToForward: lnwire.MilliSatoshi(hop.AmtToForward), + } + + // Add MPP record if present. + if len(hop.MppPaymentAddr) > 0 { + var paymentAddr [32]byte + copy(paymentAddr[:], hop.MppPaymentAddr) + routeHop.MPP = record.NewMPP( + lnwire.MilliSatoshi(hop.MppTotalMsat.Int64), + paymentAddr, + ) + } + + // Add AMP record if present. + if len(hop.AmpRootShare) > 0 { + var rootShare [32]byte + copy(rootShare[:], hop.AmpRootShare) + var setID [32]byte + copy(setID[:], hop.AmpSetID) + + routeHop.AMP = record.NewAMP( + rootShare, setID, + uint32(hop.AmpChildIndex.Int32), + ) + } + + // Add blinding point if present (only for introduction node + // in blinded route). + if len(hop.BlindingPoint) > 0 { + pubKey, err := btcec.ParsePubKey(hop.BlindingPoint) + if err != nil { + return nil, fmt.Errorf("failed to parse "+ + "blinding point: %w", err) + } + routeHop.BlindingPoint = pubKey + } + + // Add encrypted data if present (for all blinded hops). + if len(hop.EncryptedData) > 0 { + routeHop.EncryptedData = hop.EncryptedData + } + + // Add total amount if present (only for final hop in blinded + // route). + if hop.BlindedPathTotalAmt.Valid { + routeHop.TotalAmtMsat = lnwire.MilliSatoshi( + hop.BlindedPathTotalAmt.Int64, + ) + } + + // Add hop-level custom records. + if records, ok := hopCustomRecords[hop.ID]; ok { + routeHop.CustomRecords = make( + record.CustomSet, + ) + for _, rec := range records { + routeHop.CustomRecords[uint64(rec.Key)] = + rec.Value + } + } + + // Add metadata if present. + if len(hop.MetaData) > 0 { + routeHop.Metadata = hop.MetaData + } + + routeHops[i] = routeHop + } + + // Parse the source node public key. + var sourceNode route.Vertex + copy(sourceNode[:], sourceKey) + + route := &route.Route{ + TotalTimeLock: uint32(totalTimeLock), + TotalAmount: lnwire.MilliSatoshi(totalAmount), + SourcePubKey: sourceNode, + Hops: routeHops, + FirstHopWireCustomRecords: firstHopWireCustomRecords, + } + + // Set the first hop amount if it is set. + if firstHopAmountMsat != 0 { + route.FirstHopAmount = tlv.NewRecordT[tlv.TlvType0]( + tlv.NewBigSizeT(lnwire.MilliSatoshi( + firstHopAmountMsat, + )), + ) + } + + return route, nil +} diff --git a/payments/db/migration1/sql_store.go b/payments/db/migration1/sql_store.go new file mode 100644 index 00000000000..0725bfe59ba --- /dev/null +++ b/payments/db/migration1/sql_store.go @@ -0,0 +1,1972 @@ +package migration1 + +import ( + "bytes" + "context" + "database/sql" + "errors" + "fmt" + "math" + "strconv" + "time" + + "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/payments/db/migration1/sqlc" + "github.com/lightningnetwork/lnd/routing/route" + "github.com/lightningnetwork/lnd/sqldb" +) + +// PaymentIntentType represents the type of payment intent. +type PaymentIntentType int16 + +const ( + // PaymentIntentTypeBolt11 indicates a BOLT11 invoice payment. + PaymentIntentTypeBolt11 PaymentIntentType = 0 +) + +// HTLCAttemptResolutionType represents the type of HTLC attempt resolution. +type HTLCAttemptResolutionType int32 + +const ( + // HTLCAttemptResolutionSettled indicates the HTLC attempt was settled + // successfully with a preimage. + HTLCAttemptResolutionSettled HTLCAttemptResolutionType = 1 + + // HTLCAttemptResolutionFailed indicates the HTLC attempt failed. + HTLCAttemptResolutionFailed HTLCAttemptResolutionType = 2 +) + +// SQLQueries is a subset of the sqlc.Querier interface that can be used to +// execute queries against the SQL payments tables. +// +//nolint:ll,interfacebloat +type SQLQueries interface { + /* + Payment DB read operations. + */ + FilterPayments(ctx context.Context, query sqlc.FilterPaymentsParams) ([]sqlc.FilterPaymentsRow, error) + FetchPayment(ctx context.Context, paymentIdentifier []byte) (sqlc.FetchPaymentRow, error) + FetchPaymentsByIDs(ctx context.Context, paymentIDs []int64) ([]sqlc.FetchPaymentsByIDsRow, error) + + CountPayments(ctx context.Context) (int64, error) + + FetchHtlcAttemptsForPayments(ctx context.Context, paymentIDs []int64) ([]sqlc.FetchHtlcAttemptsForPaymentsRow, error) + FetchHtlcAttemptResolutionsForPayments(ctx context.Context, paymentIDs []int64) ([]sqlc.FetchHtlcAttemptResolutionsForPaymentsRow, error) + FetchAllInflightAttempts(ctx context.Context, arg sqlc.FetchAllInflightAttemptsParams) ([]sqlc.PaymentHtlcAttempt, error) + FetchHopsForAttempts(ctx context.Context, htlcAttemptIndices []int64) ([]sqlc.FetchHopsForAttemptsRow, error) + + FetchPaymentDuplicates(ctx context.Context, paymentID int64) ([]sqlc.PaymentDuplicate, error) + + FetchPaymentLevelFirstHopCustomRecords(ctx context.Context, paymentIDs []int64) ([]sqlc.PaymentFirstHopCustomRecord, error) + FetchRouteLevelFirstHopCustomRecords(ctx context.Context, htlcAttemptIndices []int64) ([]sqlc.PaymentAttemptFirstHopCustomRecord, error) + FetchHopLevelCustomRecords(ctx context.Context, hopIDs []int64) ([]sqlc.PaymentHopCustomRecord, error) + + /* + Payment DB write operations. + */ + InsertPaymentIntent(ctx context.Context, arg sqlc.InsertPaymentIntentParams) (int64, error) + InsertPayment(ctx context.Context, arg sqlc.InsertPaymentParams) (int64, error) + InsertPaymentFirstHopCustomRecord(ctx context.Context, arg sqlc.InsertPaymentFirstHopCustomRecordParams) error + + InsertHtlcAttempt(ctx context.Context, arg sqlc.InsertHtlcAttemptParams) (int64, error) + InsertRouteHop(ctx context.Context, arg sqlc.InsertRouteHopParams) (int64, error) + InsertRouteHopMpp(ctx context.Context, arg sqlc.InsertRouteHopMppParams) error + InsertRouteHopAmp(ctx context.Context, arg sqlc.InsertRouteHopAmpParams) error + InsertRouteHopBlinded(ctx context.Context, arg sqlc.InsertRouteHopBlindedParams) error + + InsertPaymentAttemptFirstHopCustomRecord(ctx context.Context, arg sqlc.InsertPaymentAttemptFirstHopCustomRecordParams) error + InsertPaymentHopCustomRecord(ctx context.Context, arg sqlc.InsertPaymentHopCustomRecordParams) error + + SettleAttempt(ctx context.Context, arg sqlc.SettleAttemptParams) error + FailAttempt(ctx context.Context, arg sqlc.FailAttemptParams) error + + FailPayment(ctx context.Context, arg sqlc.FailPaymentParams) (sql.Result, error) + + DeletePayment(ctx context.Context, paymentID int64) error + + // DeleteFailedAttempts removes all failed HTLCs from the db for a + // given payment. + DeleteFailedAttempts(ctx context.Context, paymentID int64) error + + /* + Migration specific queries. + + These queries are used ONLY for the one-time migration from KV + to SQL. + */ + + // InsertPaymentMig is a migration-only variant of InsertPayment that + // allows setting fail_reason when inserting historical payments. + InsertPaymentMig(ctx context.Context, arg sqlc.InsertPaymentMigParams) (int64, error) + + // InsertPaymentDuplicateMig inserts a duplicate payment record during + // migration. + InsertPaymentDuplicateMig(ctx context.Context, arg sqlc.InsertPaymentDuplicateMigParams) (int64, error) +} + +// BatchedSQLQueries is a version of the SQLQueries that's capable +// of batched database operations. +type BatchedSQLQueries interface { + SQLQueries + sqldb.BatchedTx[SQLQueries] +} + +// SQLStore represents a storage backend. +type SQLStore struct { + cfg *SQLStoreConfig + db BatchedSQLQueries +} + +// A compile-time constraint to ensure SQLStore implements DB. +var _ DB = (*SQLStore)(nil) + +// SQLStoreConfig holds the configuration for the SQLStore. +type SQLStoreConfig struct { + // QueryConfig holds configuration values for SQL queries. + QueryCfg *sqldb.QueryConfig +} + +// NewSQLStore creates a new SQLStore instance given an open +// BatchedSQLPaymentsQueries storage backend. +func NewSQLStore(cfg *SQLStoreConfig, db BatchedSQLQueries, + options ...OptionModifier) (*SQLStore, error) { + + opts := DefaultOptions() + for _, applyOption := range options { + applyOption(opts) + } + + if opts.NoMigration { + return nil, fmt.Errorf("the NoMigration option is not yet " + + "supported for SQL stores") + } + + return &SQLStore{ + cfg: cfg, + db: db, + }, nil +} + +// A compile-time constraint to ensure SQLStore implements DB. +var _ DB = (*SQLStore)(nil) + +// fetchPaymentWithCompleteData fetches a payment with all its related data +// including attempts, hops, and custom records from the database. +// This is a convenience wrapper around the batch loading functions for single +// payment operations. +func fetchPaymentWithCompleteData(ctx context.Context, + cfg *sqldb.QueryConfig, db SQLQueries, + dbPayment sqlc.PaymentAndIntent) (*MPPayment, error) { + + payment := dbPayment.GetPayment() + + // Load batch data for this single payment. + batchData, err := batchLoadPaymentDetailsData( + ctx, cfg, db, []int64{payment.ID}, + ) + if err != nil { + return nil, fmt.Errorf("failed to load batch data: %w", err) + } + + // Build the payment from the batch data. + return buildPaymentFromBatchData(dbPayment, batchData) +} + +// paymentsCompleteData holds the full payment data when batch loading base +// payment data and all the related data for a payment. +type paymentsCompleteData struct { + *paymentsBaseData + *paymentsDetailsData +} + +// batchLoadPayments loads the full payment data for a batch of payment IDs. +func batchLoadPayments(ctx context.Context, cfg *sqldb.QueryConfig, + db SQLQueries, paymentIDs []int64) (*paymentsCompleteData, error) { + + baseData, err := batchLoadpaymentsBaseData(ctx, cfg, db, paymentIDs) + if err != nil { + return nil, fmt.Errorf("failed to load payment base data: %w", + err) + } + + batchData, err := batchLoadPaymentDetailsData(ctx, cfg, db, paymentIDs) + if err != nil { + return nil, fmt.Errorf("failed to load payment batch data: %w", + err) + } + + return &paymentsCompleteData{ + paymentsBaseData: baseData, + paymentsDetailsData: batchData, + }, nil +} + +// paymentsBaseData holds the base payment and intent data for a batch of +// payments. +type paymentsBaseData struct { + // paymentsAndIntents maps payment ID to its payment and intent data. + paymentsAndIntents map[int64]sqlc.PaymentAndIntent +} + +// batchLoadpaymentsBaseData loads the base payment and payment intent data for +// a batch of payment IDs. This complements loadPaymentsBatchData which loads +// related data (attempts, hops, custom records) but not the payment table +// and payment intent table data. +func batchLoadpaymentsBaseData(ctx context.Context, + cfg *sqldb.QueryConfig, db SQLQueries, + paymentIDs []int64) (*paymentsBaseData, error) { + + baseData := &paymentsBaseData{ + paymentsAndIntents: make(map[int64]sqlc.PaymentAndIntent), + } + + if len(paymentIDs) == 0 { + return baseData, nil + } + + err := sqldb.ExecuteBatchQuery( + ctx, cfg, paymentIDs, + func(id int64) int64 { return id }, + func(ctx context.Context, ids []int64) ( + []sqlc.FetchPaymentsByIDsRow, error) { + + records, err := db.FetchPaymentsByIDs( + ctx, ids, + ) + + return records, err + }, + func(ctx context.Context, + payment sqlc.FetchPaymentsByIDsRow) error { + + baseData.paymentsAndIntents[payment.ID] = payment + + return nil + }, + ) + if err != nil { + return nil, fmt.Errorf("failed to fetch payment base "+ + "data: %w", err) + } + + return baseData, nil +} + +// paymentsRelatedData holds all the batch-loaded data for multiple payments. +// This does not include the base payment and intent data which is fetched +// separately. It includes the additional data like attempts, hops, hop custom +// records, and route custom records. +type paymentsDetailsData struct { + // paymentCustomRecords maps payment ID to its custom records. + paymentCustomRecords map[int64][]sqlc.PaymentFirstHopCustomRecord + + // attempts maps payment ID to its HTLC attempts. + attempts map[int64][]sqlc.FetchHtlcAttemptsForPaymentsRow + + // hopsByAttempt maps attempt index to its hops. + hopsByAttempt map[int64][]sqlc.FetchHopsForAttemptsRow + + // hopCustomRecords maps hop ID to its custom records. + hopCustomRecords map[int64][]sqlc.PaymentHopCustomRecord + + // routeCustomRecords maps attempt index to its route-level custom + // records. + routeCustomRecords map[int64][]sqlc.PaymentAttemptFirstHopCustomRecord +} + +// batchLoadPaymentCustomRecords loads payment-level custom records for a given +// set of payment IDs. It uses a batch query to fetch all custom records for +// the given payment IDs. +func batchLoadPaymentCustomRecords(ctx context.Context, + cfg *sqldb.QueryConfig, db SQLQueries, paymentIDs []int64, + batchData *paymentsDetailsData) error { + + return sqldb.ExecuteBatchQuery( + ctx, cfg, paymentIDs, + func(id int64) int64 { return id }, + func(ctx context.Context, ids []int64) ( + []sqlc.PaymentFirstHopCustomRecord, error) { + + //nolint:ll + records, err := db.FetchPaymentLevelFirstHopCustomRecords( + ctx, ids, + ) + + return records, err + }, + func(ctx context.Context, + record sqlc.PaymentFirstHopCustomRecord) error { + + paymentRecords := + batchData.paymentCustomRecords[record.PaymentID] + + batchData.paymentCustomRecords[record.PaymentID] = + append(paymentRecords, record) + + return nil + }, + ) +} + +// batchLoadHtlcAttempts loads HTLC attempts for all payments and returns all +// attempt indices. It uses a batch query to fetch all attempts for the given +// payment IDs. +func batchLoadHtlcAttempts(ctx context.Context, cfg *sqldb.QueryConfig, + db SQLQueries, paymentIDs []int64, + batchData *paymentsDetailsData) ([]int64, error) { + + var allAttemptIndices []int64 + + err := sqldb.ExecuteBatchQuery( + ctx, cfg, paymentIDs, + func(id int64) int64 { return id }, + func(ctx context.Context, ids []int64) ( + []sqlc.FetchHtlcAttemptsForPaymentsRow, error) { + + return db.FetchHtlcAttemptsForPayments(ctx, ids) + }, + func(ctx context.Context, + attempt sqlc.FetchHtlcAttemptsForPaymentsRow) error { + + batchData.attempts[attempt.PaymentID] = append( + batchData.attempts[attempt.PaymentID], attempt, + ) + allAttemptIndices = append( + allAttemptIndices, attempt.AttemptIndex, + ) + + return nil + }, + ) + + return allAttemptIndices, err +} + +// batchLoadHopsForAttempts loads hops for all attempts and returns all hop IDs. +// It uses a batch query to fetch all hops for the given attempt indices. +func batchLoadHopsForAttempts(ctx context.Context, cfg *sqldb.QueryConfig, + db SQLQueries, attemptIndices []int64, + batchData *paymentsDetailsData) ([]int64, error) { + + var hopIDs []int64 + + err := sqldb.ExecuteBatchQuery( + ctx, cfg, attemptIndices, + func(idx int64) int64 { return idx }, + func(ctx context.Context, indices []int64) ( + []sqlc.FetchHopsForAttemptsRow, error) { + + return db.FetchHopsForAttempts(ctx, indices) + }, + func(ctx context.Context, + hop sqlc.FetchHopsForAttemptsRow) error { + + attemptHops := + batchData.hopsByAttempt[hop.HtlcAttemptIndex] + + batchData.hopsByAttempt[hop.HtlcAttemptIndex] = + append(attemptHops, hop) + + hopIDs = append(hopIDs, hop.ID) + + return nil + }, + ) + + return hopIDs, err +} + +// batchLoadHopCustomRecords loads hop-level custom records for all hops. It +// uses a batch query to fetch all custom records for the given hop IDs. +func batchLoadHopCustomRecords(ctx context.Context, cfg *sqldb.QueryConfig, + db SQLQueries, hopIDs []int64, batchData *paymentsDetailsData) error { + + return sqldb.ExecuteBatchQuery( + ctx, cfg, hopIDs, + func(id int64) int64 { return id }, + func(ctx context.Context, ids []int64) ( + []sqlc.PaymentHopCustomRecord, error) { + + return db.FetchHopLevelCustomRecords(ctx, ids) + }, + func(ctx context.Context, + record sqlc.PaymentHopCustomRecord) error { + + // TODO(ziggie): Can we get rid of this? + // This has to be in place otherwise the + // comparison will not match. + if record.Value == nil { + record.Value = []byte{} + } + + batchData.hopCustomRecords[record.HopID] = append( + batchData.hopCustomRecords[record.HopID], + record, + ) + + return nil + }, + ) +} + +// batchLoadRouteCustomRecords loads route-level first hop custom records for +// all attempts. It uses a batch query to fetch all custom records for the given +// attempt indices. +func batchLoadRouteCustomRecords(ctx context.Context, cfg *sqldb.QueryConfig, + db SQLQueries, attemptIndices []int64, + batchData *paymentsDetailsData) error { + + return sqldb.ExecuteBatchQuery( + ctx, cfg, attemptIndices, + func(idx int64) int64 { return idx }, + func(ctx context.Context, indices []int64) ( + []sqlc.PaymentAttemptFirstHopCustomRecord, error) { + + return db.FetchRouteLevelFirstHopCustomRecords( + ctx, indices, + ) + }, + func(ctx context.Context, + record sqlc.PaymentAttemptFirstHopCustomRecord) error { + + idx := record.HtlcAttemptIndex + attemptRecords := batchData.routeCustomRecords[idx] + + batchData.routeCustomRecords[idx] = + append(attemptRecords, record) + + return nil + }, + ) +} + +// paymentStatusData holds lightweight resolution data for computing +// payment status efficiently during deletion operations. +type paymentStatusData struct { + // resolutionTypes maps payment ID to a list of resolution types + // for that payment's HTLC attempts. + resolutionTypes map[int64][]sql.NullInt32 +} + +// batchLoadPaymentResolutions loads only HTLC resolution types for multiple +// payments. This is a lightweight alternative to batchLoadPaymentsRelatedData +// that's optimized for operations that only need to determine payment status. +func batchLoadPaymentResolutions(ctx context.Context, cfg *sqldb.QueryConfig, + db SQLQueries, paymentIDs []int64) (*paymentStatusData, error) { + + batchStatusData := &paymentStatusData{ + resolutionTypes: make(map[int64][]sql.NullInt32), + } + + if len(paymentIDs) == 0 { + return batchStatusData, nil + } + + // Use a batch query to fetch all resolution types for the given payment + // IDs. + err := sqldb.ExecuteBatchQuery( + ctx, cfg, paymentIDs, + func(id int64) int64 { return id }, + func(ctx context.Context, ids []int64) ( + []sqlc.FetchHtlcAttemptResolutionsForPaymentsRow, + error) { + + return db.FetchHtlcAttemptResolutionsForPayments( + ctx, ids, + ) + }, + //nolint:ll + func(ctx context.Context, + res sqlc.FetchHtlcAttemptResolutionsForPaymentsRow) error { + + // Group resolutions by payment ID. + batchStatusData.resolutionTypes[res.PaymentID] = append( + batchStatusData.resolutionTypes[res.PaymentID], + res.ResolutionType, + ) + + return nil + }, + ) + if err != nil { + return nil, fmt.Errorf("failed to fetch HTLC resolutions: %w", + err) + } + + return batchStatusData, nil +} + +// loadPaymentResolutions is a single-payment wrapper around +// batchLoadPaymentResolutions for convenience and to prevent duplicate queries +// so we reuse the same batch query for all payments. +func loadPaymentResolutions(ctx context.Context, cfg *sqldb.QueryConfig, + db SQLQueries, paymentID int64) ([]sql.NullInt32, error) { + + batchData, err := batchLoadPaymentResolutions( + ctx, cfg, db, []int64{paymentID}, + ) + if err != nil { + return nil, err + } + + return batchData.resolutionTypes[paymentID], nil +} + +// computePaymentStatusFromResolutions determines the payment status from +// resolution types and failure reason without building the complete MPPayment +// structure. This is a lightweight version that builds minimal HTLCAttempt +// structures and delegates to decidePaymentStatus for consistency. +func computePaymentStatusFromResolutions(resolutionTypes []sql.NullInt32, + failReason sql.NullInt32) (PaymentStatus, error) { + + // Build minimal HTLCAttempt slice with only resolution info. + htlcs := make([]HTLCAttempt, len(resolutionTypes)) + for i, resType := range resolutionTypes { + if !resType.Valid { + // NULL resolution_type means in-flight (no Settle, no + // Failure). + continue + } + + switch HTLCAttemptResolutionType(resType.Int32) { + case HTLCAttemptResolutionSettled: + // Mark as settled (preimage details not needed for + // status). + htlcs[i].Settle = &HTLCSettleInfo{} + + case HTLCAttemptResolutionFailed: + // Mark as failed (failure details not needed for + // status). + htlcs[i].Failure = &HTLCFailInfo{} + + default: + return 0, fmt.Errorf("unknown resolution type: %v", + resType.Int32) + } + } + + // Convert fail reason to FailureReason pointer. + var failureReason *FailureReason + if failReason.Valid { + reason := FailureReason(failReason.Int32) + failureReason = &reason + } + + // Use the existing status decision logic. + return decidePaymentStatus(htlcs, failureReason) +} + +// batchLoadPaymentDetailsData loads all related data for multiple payments in +// batch. It uses a batch queries to fetch all data for the given payment IDs. +func batchLoadPaymentDetailsData(ctx context.Context, cfg *sqldb.QueryConfig, + db SQLQueries, paymentIDs []int64) (*paymentsDetailsData, error) { + + batchData := &paymentsDetailsData{ + paymentCustomRecords: make( + map[int64][]sqlc.PaymentFirstHopCustomRecord, + ), + attempts: make( + map[int64][]sqlc.FetchHtlcAttemptsForPaymentsRow, + ), + hopsByAttempt: make( + map[int64][]sqlc.FetchHopsForAttemptsRow, + ), + hopCustomRecords: make( + map[int64][]sqlc.PaymentHopCustomRecord, + ), + routeCustomRecords: make( + map[int64][]sqlc.PaymentAttemptFirstHopCustomRecord, + ), + } + + if len(paymentIDs) == 0 { + return batchData, nil + } + + // Load payment-level custom records. + err := batchLoadPaymentCustomRecords( + ctx, cfg, db, paymentIDs, batchData, + ) + if err != nil { + return nil, fmt.Errorf("failed to fetch payment custom "+ + "records: %w", err) + } + + // Load HTLC attempts and collect attempt indices. + allAttemptIndices, err := batchLoadHtlcAttempts( + ctx, cfg, db, paymentIDs, batchData, + ) + if err != nil { + return nil, fmt.Errorf("failed to fetch HTLC attempts: %w", + err) + } + + if len(allAttemptIndices) == 0 { + // No attempts, return early. + return batchData, nil + } + + // Load hops for all attempts and collect hop IDs. + hopIDs, err := batchLoadHopsForAttempts( + ctx, cfg, db, allAttemptIndices, batchData, + ) + if err != nil { + return nil, fmt.Errorf("failed to fetch hops for attempts: %w", + err) + } + + // Load hop-level custom records if there are any hops. + if len(hopIDs) > 0 { + err = batchLoadHopCustomRecords(ctx, cfg, db, hopIDs, batchData) + if err != nil { + return nil, fmt.Errorf("failed to fetch hop custom "+ + "records: %w", err) + } + } + + // Load route-level first hop custom records. + err = batchLoadRouteCustomRecords( + ctx, cfg, db, allAttemptIndices, batchData, + ) + if err != nil { + return nil, fmt.Errorf("failed to fetch route custom "+ + "records: %w", err) + } + + return batchData, nil +} + +// buildPaymentFromBatchData builds a complete MPPayment from a database payment +// and pre-loaded batch data. +func buildPaymentFromBatchData(dbPayment sqlc.PaymentAndIntent, + batchData *paymentsDetailsData) (*MPPayment, error) { + + // The query will only return BOLT 11 payment intents or intents with + // no intent type set. + paymentIntent := dbPayment.GetPaymentIntent() + paymentRequest := paymentIntent.IntentPayload + + payment := dbPayment.GetPayment() + + // Get payment-level custom records from batch data. + customRecords := batchData.paymentCustomRecords[payment.ID] + + // Convert to the FirstHopCustomRecords map. + var firstHopCustomRecords lnwire.CustomRecords + if len(customRecords) > 0 { + firstHopCustomRecords = make(lnwire.CustomRecords) + for _, record := range customRecords { + firstHopCustomRecords[uint64(record.Key)] = record.Value + } + } + + // Convert database payment data to the PaymentCreationInfo struct. + info := dbPaymentToCreationInfo( + payment.PaymentIdentifier, payment.AmountMsat, + payment.CreatedAt, paymentRequest, firstHopCustomRecords, + ) + + // Get all HTLC attempts from batch data for a given payment. + dbAttempts := batchData.attempts[payment.ID] + + // Convert all attempts to HTLCAttempt structs using the pre-loaded + // batch data. + attempts := make([]HTLCAttempt, 0, len(dbAttempts)) + for _, dbAttempt := range dbAttempts { + attemptIndex := dbAttempt.AttemptIndex + // Convert the batch row type to the single row type. + attempt, err := dbAttemptToHTLCAttempt( + dbAttempt, batchData.hopsByAttempt[attemptIndex], + batchData.hopCustomRecords, + batchData.routeCustomRecords[attemptIndex], + ) + if err != nil { + return nil, fmt.Errorf("failed to convert attempt "+ + "%d: %w", attemptIndex, err) + } + attempts = append(attempts, *attempt) + } + + // Set the failure reason if present. + // + // TODO(ziggie): Rename it to Payment Memo in the database? + var failureReason *FailureReason + if payment.FailReason.Valid { + reason := FailureReason(payment.FailReason.Int32) + failureReason = &reason + } + + mpPayment := &MPPayment{ + SequenceNum: uint64(payment.ID), + Info: info, + HTLCs: attempts, + FailureReason: failureReason, + } + + // The status and state will be determined by calling + // SetState after construction. + if err := mpPayment.SetState(); err != nil { + return nil, fmt.Errorf("failed to set payment state: %w", err) + } + + return mpPayment, nil +} + +// QueryPayments queries and retrieves payments from the database with support +// for filtering, pagination, and efficient batch loading of related data. +// +// The function accepts a Query parameter that controls: +// - Pagination: IndexOffset specifies where to start (exclusive), and +// MaxPayments limits the number of results returned +// - Ordering: Reversed flag determines if results are returned in reverse +// chronological order +// - Filtering: CreationDateStart/End filter by creation time, and +// IncludeIncomplete controls whether non-succeeded payments are included +// - Metadata: CountTotal flag determines if the total payment count should +// be calculated +// +// The function optimizes performance by loading all related data (HTLCs, +// sequences, failure reasons, etc.) for multiple payments in a single batch +// query, rather than fetching each payment's data individually. +// +// Returns a Response containing: +// - Payments: the list of matching payments with complete data +// - FirstIndexOffset/LastIndexOffset: pagination cursors for the first and +// last payment in the result set +// - TotalCount: total number of payments in the database (if CountTotal was +// requested, otherwise 0) +// +// This is part of the DB interface. +func (s *SQLStore) QueryPayments(ctx context.Context, query Query) (Response, + error) { + + if query.MaxPayments == 0 { + return Response{}, fmt.Errorf("max payments must be non-zero") + } + + var ( + allPayments []*MPPayment + totalCount int64 + initialCursor int64 + ) + + extractCursor := func(row sqlc.FilterPaymentsRow) int64 { + return row.Payment.ID + } + + err := s.db.ExecTx(ctx, sqldb.ReadTxOpt(), func(db SQLQueries) error { + // We first count all payments to determine the total count + // if requested. + if query.CountTotal { + totalPayments, err := db.CountPayments(ctx) + if err != nil { + return fmt.Errorf("failed to count "+ + "payments: %w", err) + } + totalCount = totalPayments + } + + // collectFunc extracts the payment ID from each payment row. + collectFunc := func(row sqlc.FilterPaymentsRow) (int64, error) { + return row.Payment.ID, nil + } + + // batchDataFunc loads all related data for a batch of payments. + batchDataFunc := func(ctx context.Context, paymentIDs []int64) ( + *paymentsDetailsData, error) { + + return batchLoadPaymentDetailsData( + ctx, s.cfg.QueryCfg, db, paymentIDs, + ) + } + + // processPayment processes each payment with the batch-loaded + // data. + processPayment := func(ctx context.Context, + dbPayment sqlc.FilterPaymentsRow, + batchData *paymentsDetailsData) error { + + // Build the payment from the pre-loaded batch data. + mpPayment, err := buildPaymentFromBatchData( + dbPayment, batchData, + ) + if err != nil { + return fmt.Errorf("failed to fetch payment "+ + "with complete data: %w", err) + } + + // To keep compatibility with the old API, we only + // return non-succeeded payments if requested. + if mpPayment.Status != StatusSucceeded && + !query.IncludeIncomplete { + + return nil + } + + if uint64(len(allPayments)) >= query.MaxPayments { + return errMaxPaymentsReached + } + + allPayments = append(allPayments, mpPayment) + + return nil + } + + queryFunc := func(ctx context.Context, lastID int64, + limit int32) ([]sqlc.FilterPaymentsRow, error) { + + filterParams := sqlc.FilterPaymentsParams{ + NumLimit: limit, + Reverse: query.Reversed, + // For now there only BOLT 11 payment intents + // exist. + IntentType: sqldb.SQLInt16( + PaymentIntentTypeBolt11, + ), + } + + if query.Reversed { + filterParams.IndexOffsetLet = sqldb.SQLInt64( + lastID, + ) + } else { + filterParams.IndexOffsetGet = sqldb.SQLInt64( + lastID, + ) + } + + // Add potential date filters if specified. + if query.CreationDateStart != 0 { + filterParams.CreatedAfter = sqldb.SQLTime( + time.Unix(query.CreationDateStart, 0). + UTC(), + ) + } + if query.CreationDateEnd != 0 { + filterParams.CreatedBefore = sqldb.SQLTime( + time.Unix(query.CreationDateEnd, 0). + UTC(), + ) + } + + return db.FilterPayments(ctx, filterParams) + } + + if query.Reversed { + if query.IndexOffset == 0 { + initialCursor = int64(math.MaxInt64) + } else { + initialCursor = int64(query.IndexOffset) + } + } else { + initialCursor = int64(query.IndexOffset) + } + + return sqldb.ExecuteCollectAndBatchWithSharedDataQuery( + ctx, s.cfg.QueryCfg, initialCursor, queryFunc, + extractCursor, collectFunc, batchDataFunc, + processPayment, + ) + }, func() { + allPayments = nil + }) + + // We make sure we don't return an error if we reached the maximum + // number of payments. Which is the pagination limit for the query + // itself. + if err != nil && !errors.Is(err, errMaxPaymentsReached) { + return Response{}, fmt.Errorf("failed to query payments: %w", + err) + } + + // Handle case where no payments were found + if len(allPayments) == 0 { + return Response{ + Payments: allPayments, + FirstIndexOffset: 0, + LastIndexOffset: 0, + TotalCount: uint64(totalCount), + }, nil + } + + // If the query was reversed, we need to reverse the payment list + // to match the kvstore behavior and return payments in forward order. + if query.Reversed { + for i, j := 0, len(allPayments)-1; i < j; i, j = i+1, j-1 { + allPayments[i], allPayments[j] = allPayments[j], + allPayments[i] + } + } + + return Response{ + Payments: allPayments, + FirstIndexOffset: allPayments[0].SequenceNum, + LastIndexOffset: allPayments[len(allPayments)-1].SequenceNum, + TotalCount: uint64(totalCount), + }, nil +} + +// fetchPaymentByHash fetches a payment by its hash from the database. It is a +// convenience wrapper around the FetchPayment method and checks for +// no rows error and returns ErrPaymentNotInitiated if no payment is found. +func fetchPaymentByHash(ctx context.Context, db SQLQueries, + paymentHash lntypes.Hash) (sqlc.FetchPaymentRow, error) { + + dbPayment, err := db.FetchPayment(ctx, paymentHash[:]) + if err != nil && !errors.Is(err, sql.ErrNoRows) { + return dbPayment, fmt.Errorf("failed to fetch payment: %w", err) + } + + if errors.Is(err, sql.ErrNoRows) { + return dbPayment, ErrPaymentNotInitiated + } + + return dbPayment, nil +} + +// FetchPayment retrieves a complete payment record from the database by its +// payment hash. The returned MPPayment includes all payment metadata such as +// creation info, payment status, current state, all HTLC attempts (both +// successful and failed), and the failure reason if the payment has been +// marked as failed. +// +// Returns ErrPaymentNotInitiated if no payment with the given hash exists. +// +// This is part of the DB interface. +func (s *SQLStore) FetchPayment(ctx context.Context, + paymentHash lntypes.Hash) (*MPPayment, error) { + + var mpPayment *MPPayment + + err := s.db.ExecTx(ctx, sqldb.ReadTxOpt(), func(db SQLQueries) error { + dbPayment, err := fetchPaymentByHash(ctx, db, paymentHash) + if err != nil { + return err + } + + mpPayment, err = fetchPaymentWithCompleteData( + ctx, s.cfg.QueryCfg, db, dbPayment, + ) + if err != nil { + return fmt.Errorf("failed to fetch payment with "+ + "complete data: %w", err) + } + + return nil + }, sqldb.NoOpReset) + if err != nil { + return nil, err + } + + return mpPayment, nil +} + +// FetchInFlightPayments retrieves all payments that have HTLC attempts +// currently in flight (not yet settled or failed). These are payments with at +// least one HTLC attempt that has been registered but has no resolution record. +// +// The SQLStore implementation provides a significant performance improvement +// over the KVStore implementation by using targeted SQL queries instead of +// scanning all payments. +// +// This method is part of the PaymentReader interface, which is embedded in the +// DB interface. It's typically called during node startup to resume monitoring +// of pending payments and ensure HTLCs are properly tracked. +// +// TODO(ziggie): Consider changing the interface to use a callback or iterator +// pattern instead of returning all payments at once. This would allow +// processing payments one at a time without holding them all in memory +// simultaneously: +// - Callback: func FetchInFlightPayments(ctx, func(*MPPayment) error) error +// - Iterator: func FetchInFlightPayments(ctx) (PaymentIterator, error) +// +// While inflight payments are typically a small subset, this would improve +// memory efficiency for nodes with unusually high numbers of concurrent +// payments and would better leverage the existing pagination infrastructure. +func (s *SQLStore) FetchInFlightPayments(ctx context.Context) ([]*MPPayment, + error) { + + var mpPayments []*MPPayment + + err := s.db.ExecTx(ctx, sqldb.ReadTxOpt(), func(db SQLQueries) error { + // Track which payment IDs we've already processed across all + // pages to avoid loading the same payment multiple times when + // multiple inflight attempts belong to the same payment. + processedPayments := make(map[int64]*MPPayment) + + extractCursor := func(row sqlc.PaymentHtlcAttempt) int64 { + return row.AttemptIndex + } + + // collectFunc extracts the payment ID from each attempt row. + collectFunc := func(row sqlc.PaymentHtlcAttempt) ( + int64, error) { + + return row.PaymentID, nil + } + + // batchDataFunc loads payment data for a batch of payment IDs, + // but only for IDs we haven't processed yet. + batchDataFunc := func(ctx context.Context, + paymentIDs []int64) (*paymentsCompleteData, error) { + + // Filter out already-processed payment IDs. + uniqueIDs := make([]int64, 0, len(paymentIDs)) + for _, id := range paymentIDs { + _, processed := processedPayments[id] + if !processed { + uniqueIDs = append(uniqueIDs, id) + } + } + + // If uniqueIDs is empty, the batch load will return + // empty batch data. + return batchLoadPayments( + ctx, s.cfg.QueryCfg, db, uniqueIDs, + ) + } + + // processAttempt processes each attempt. We only build and + // store the payment once per unique payment ID. + processAttempt := func(ctx context.Context, + row sqlc.PaymentHtlcAttempt, + batchData *paymentsCompleteData) error { + + // Skip if we've already processed this payment. + _, processed := processedPayments[row.PaymentID] + if processed { + return nil + } + + dbPayment := batchData.paymentsAndIntents[row.PaymentID] + + // Build the payment from batch data. + mpPayment, err := buildPaymentFromBatchData( + dbPayment, batchData.paymentsDetailsData, + ) + if err != nil { + return fmt.Errorf("failed to build payment: %w", + err) + } + + // Store in our processed map. + processedPayments[row.PaymentID] = mpPayment + + return nil + } + + queryFunc := func(ctx context.Context, lastAttemptIndex int64, + limit int32) ([]sqlc.PaymentHtlcAttempt, + error) { + + return db.FetchAllInflightAttempts(ctx, + sqlc.FetchAllInflightAttemptsParams{ + AttemptIndex: lastAttemptIndex, + Limit: limit, + }, + ) + } + + err := sqldb.ExecuteCollectAndBatchWithSharedDataQuery( + ctx, s.cfg.QueryCfg, int64(-1), queryFunc, + extractCursor, collectFunc, batchDataFunc, + processAttempt, + ) + if err != nil { + return err + } + + // Convert map to slice. + mpPayments = make([]*MPPayment, 0, len(processedPayments)) + for _, payment := range processedPayments { + mpPayments = append(mpPayments, payment) + } + + return nil + }, func() { + mpPayments = nil + }) + if err != nil { + return nil, fmt.Errorf("failed to fetch inflight "+ + "payments: %w", err) + } + + return mpPayments, nil +} + +// DeleteFailedAttempts removes all failed HTLC attempts from the database for +// the specified payment, while preserving the payment record itself and any +// successful or in-flight attempts. +// +// The method performs the following validations before deletion: +// - StatusInitiated: Can delete failed attempts +// - StatusInFlight: Cannot delete, returns ErrPaymentInFlight (active HTLCs +// still on the network) +// - StatusSucceeded: Can delete failed attempts (payment completed) +// - StatusFailed: Can delete failed attempts (payment permanently failed) +// +// This method is idempotent - calling it multiple times on the same payment +// has no adverse effects. +// +// This method is part of the PaymentControl interface, which is embedded in +// the PaymentWriter interface and ultimately the DB interface. It represents +// the final step (step 5) in the payment lifecycle control flow and should be +// called after a payment reaches a terminal state (succeeded or permanently +// failed) to clean up historical failed attempts. +func (s *SQLStore) DeleteFailedAttempts(ctx context.Context, + paymentHash lntypes.Hash) error { + + err := s.db.ExecTx(ctx, sqldb.WriteTxOpt(), func(db SQLQueries) error { + dbPayment, err := fetchPaymentByHash(ctx, db, paymentHash) + if err != nil { + return err + } + + paymentStatus, err := computePaymentStatusFromDB( + ctx, s.cfg.QueryCfg, db, dbPayment, + ) + if err != nil { + return fmt.Errorf("failed to compute payment "+ + "status: %w", err) + } + + if err := paymentStatus.removable(); err != nil { + return fmt.Errorf("cannot delete failed "+ + "attempts for payment %v: %w", paymentHash, err) + } + + // Then we delete the failed attempts for this payment. + return db.DeleteFailedAttempts(ctx, dbPayment.GetPayment().ID) + }, sqldb.NoOpReset) + if err != nil { + return fmt.Errorf("failed to delete failed attempts for "+ + "payment %v: %w", paymentHash, err) + } + + return nil +} + +// computePaymentStatusFromDB computes the payment status by fetching minimal +// data from the database. This is a lightweight query optimized for SQL that +// doesn't load route data, making it significantly more efficient than +// FetchPayment when only the status is needed. +func computePaymentStatusFromDB(ctx context.Context, cfg *sqldb.QueryConfig, + db SQLQueries, dbPayment sqlc.PaymentAndIntent) (PaymentStatus, error) { + + payment := dbPayment.GetPayment() + + // Load the resolution types for the payment. + resolutionTypes, err := loadPaymentResolutions( + ctx, cfg, db, payment.ID, + ) + if err != nil { + return 0, fmt.Errorf("failed to load payment resolutions: %w", + err) + } + + // Use the lightweight status computation. + status, err := computePaymentStatusFromResolutions( + resolutionTypes, payment.FailReason, + ) + if err != nil { + return 0, fmt.Errorf("failed to compute payment status: %w", + err) + } + + return status, nil +} + +// DeletePayment removes a payment or its failed HTLC attempts from the +// database based on the failedAttemptsOnly flag. +// +// If failedAttemptsOnly is true, this method deletes only the failed HTLC +// attempts for the payment while preserving the payment record itself and any +// successful or in-flight attempts. This is useful for cleaning up historical +// failed attempts after a payment reaches a terminal state. +// +// If failedAttemptsOnly is false, this method deletes the entire payment +// record including all payment metadata, payment creation info, all HTLC +// attempts (both failed and successful), and associated data such as payment +// intents and custom records. +// +// Before deletion, this method validates the payment status to ensure it's +// safe to delete: +// - StatusInitiated: Can be deleted (no HTLCs sent yet) +// - StatusInFlight: Cannot be deleted, returns ErrPaymentInFlight (active +// HTLCs on the network) +// - StatusSucceeded: Can be deleted (payment completed successfully) +// - StatusFailed: Can be deleted (payment has failed permanently) +// +// Returns an error if the payment has in-flight HTLCs or if the payment +// doesn't exist. +// +// This method is part of the PaymentWriter interface, which is embedded in +// the DB interface. +func (s *SQLStore) DeletePayment(ctx context.Context, paymentHash lntypes.Hash, + failedHtlcsOnly bool) error { + + err := s.db.ExecTx(ctx, sqldb.WriteTxOpt(), func(db SQLQueries) error { + dbPayment, err := fetchPaymentByHash(ctx, db, paymentHash) + if err != nil { + return err + } + + paymentStatus, err := computePaymentStatusFromDB( + ctx, s.cfg.QueryCfg, db, dbPayment, + ) + if err != nil { + return fmt.Errorf("failed to compute payment "+ + "status: %w", err) + } + + if err := paymentStatus.removable(); err != nil { + return fmt.Errorf("payment %v cannot be deleted: %w", + paymentHash, err) + } + + // If we are only deleting failed HTLCs, we delete them. + if failedHtlcsOnly { + return db.DeleteFailedAttempts( + ctx, dbPayment.GetPayment().ID, + ) + } + + // In case we are not deleting failed HTLCs, we delete the + // payment which will cascade delete all related data. + return db.DeletePayment(ctx, dbPayment.GetPayment().ID) + }, sqldb.NoOpReset) + if err != nil { + return fmt.Errorf("failed to delete failed attempts for "+ + "payment %v: %w", paymentHash, err) + } + + return nil +} + +// InitPayment creates a new payment record in the database with the given +// payment hash and creation info. +// +// Before creating the payment, this method checks if a payment with the same +// hash already exists and validates whether initialization is allowed based on +// the existing payment's status: +// - StatusInitiated: Returns ErrPaymentExists (payment already created, +// HTLCs may be in flight) +// - StatusInFlight: Returns ErrPaymentInFlight (payment currently being +// attempted) +// - StatusSucceeded: Returns ErrAlreadyPaid (payment already succeeded) +// - StatusFailed: Allows retry by deleting the old payment record and +// creating a new one +// +// If no existing payment is found, a new payment record is created with +// StatusInitiated and stored with all associated metadata. +// +// This method is part of the PaymentControl interface, which is embedded in +// the PaymentWriter interface and ultimately the DB interface, representing +// the first step in the payment lifecycle control flow. +func (s *SQLStore) InitPayment(ctx context.Context, paymentHash lntypes.Hash, + paymentCreationInfo *PaymentCreationInfo) error { + + // Create the payment in the database. + err := s.db.ExecTx(ctx, sqldb.WriteTxOpt(), func(db SQLQueries) error { + existingPayment, err := db.FetchPayment(ctx, paymentHash[:]) + switch { + // A payment with this hash already exists. We need to check its + // status to see if we can re-initialize. + case err == nil: + paymentStatus, err := computePaymentStatusFromDB( + ctx, s.cfg.QueryCfg, db, existingPayment, + ) + if err != nil { + return fmt.Errorf("failed to compute payment "+ + "status: %w", err) + } + + // Check if the payment is initializable otherwise + // we'll return early. + if err := paymentStatus.initializable(); err != nil { + return fmt.Errorf("payment is not "+ + "initializable: %w", err) + } + + // If the initializable check above passes, then the + // existing payment has failed. So we delete it and + // all of its previous artifacts. We rely on + // cascading deletes to clean up the rest. + err = db.DeletePayment(ctx, existingPayment.Payment.ID) + if err != nil { + return fmt.Errorf("failed to delete "+ + "payment: %w", err) + } + + // An unexpected error occurred while fetching the payment. + case !errors.Is(err, sql.ErrNoRows): + // Some other error occurred + return fmt.Errorf("failed to check existing "+ + "payment: %w", err) + + // The payment does not yet exist, so we can proceed. + default: + } + + // Insert the payment first to get its ID. + paymentID, err := db.InsertPayment( + ctx, sqlc.InsertPaymentParams{ + AmountMsat: int64( + paymentCreationInfo.Value, + ), + CreatedAt: paymentCreationInfo. + CreationTime.UTC(), + PaymentIdentifier: paymentHash[:], + }, + ) + if err != nil { + return fmt.Errorf("failed to insert payment: %w", err) + } + + // If there's a payment request, insert the payment intent. + if len(paymentCreationInfo.PaymentRequest) > 0 { + _, err = db.InsertPaymentIntent( + ctx, sqlc.InsertPaymentIntentParams{ + PaymentID: paymentID, + IntentType: int16( + PaymentIntentTypeBolt11, + ), + IntentPayload: paymentCreationInfo. + PaymentRequest, + }, + ) + if err != nil { + return fmt.Errorf("failed to insert "+ + "payment intent: %w", err) + } + } + + firstHopCustomRecords := paymentCreationInfo. + FirstHopCustomRecords + + for key, value := range firstHopCustomRecords { + err = db.InsertPaymentFirstHopCustomRecord( + ctx, + sqlc.InsertPaymentFirstHopCustomRecordParams{ + PaymentID: paymentID, + Key: int64(key), + Value: value, + }, + ) + if err != nil { + return fmt.Errorf("failed to insert "+ + "payment first hop custom "+ + "record: %w", err) + } + } + + return nil + }, sqldb.NoOpReset) + if err != nil { + return fmt.Errorf("failed to initialize payment: %w", err) + } + + return nil +} + +// insertRouteHops inserts all route hop data for a given set of hops. +func (s *SQLStore) insertRouteHops(ctx context.Context, db SQLQueries, + hops []*route.Hop, attemptID uint64) error { + + for i, hop := range hops { + // Insert the basic route hop data and get the generated ID. + hopID, err := db.InsertRouteHop(ctx, sqlc.InsertRouteHopParams{ + HtlcAttemptIndex: int64(attemptID), + HopIndex: int32(i), + PubKey: hop.PubKeyBytes[:], + Scid: strconv.FormatUint( + hop.ChannelID, 10, + ), + OutgoingTimeLock: int32(hop.OutgoingTimeLock), + AmtToForward: int64(hop.AmtToForward), + MetaData: hop.Metadata, + }) + if err != nil { + return fmt.Errorf("failed to insert route hop: %w", err) + } + + // Insert the per-hop custom records. + if len(hop.CustomRecords) > 0 { + for key, value := range hop.CustomRecords { + err = db.InsertPaymentHopCustomRecord( + ctx, + sqlc.InsertPaymentHopCustomRecordParams{ + HopID: hopID, + Key: int64(key), + Value: value, + }) + if err != nil { + return fmt.Errorf("failed to insert "+ + "payment hop custom record: %w", + err) + } + } + } + + // Insert MPP data if present. + if hop.MPP != nil { + paymentAddr := hop.MPP.PaymentAddr() + err = db.InsertRouteHopMpp( + ctx, sqlc.InsertRouteHopMppParams{ + HopID: hopID, + PaymentAddr: paymentAddr[:], + TotalMsat: int64(hop.MPP.TotalMsat()), + }) + if err != nil { + return fmt.Errorf("failed to insert "+ + "route hop MPP: %w", err) + } + } + + // Insert AMP data if present. + if hop.AMP != nil { + rootShare := hop.AMP.RootShare() + setID := hop.AMP.SetID() + err = db.InsertRouteHopAmp( + ctx, sqlc.InsertRouteHopAmpParams{ + HopID: hopID, + RootShare: rootShare[:], + SetID: setID[:], + ChildIndex: int32(hop.AMP.ChildIndex()), + }) + if err != nil { + return fmt.Errorf("failed to insert "+ + "route hop AMP: %w", err) + } + } + + // Insert blinded route data if present. Every hop in the + // blinded path must have an encrypted data record. If the + // encrypted data is not present, we skip the insertion. + if hop.EncryptedData == nil { + continue + } + + // The introduction point has a blinding point set. + var blindingPointBytes []byte + if hop.BlindingPoint != nil { + blindingPointBytes = hop.BlindingPoint. + SerializeCompressed() + } + + // The total amount is only set for the final hop in a + // blinded path. + totalAmtMsat := sql.NullInt64{} + if i == len(hops)-1 { + totalAmtMsat = sql.NullInt64{ + Int64: int64(hop.TotalAmtMsat), + Valid: true, + } + } + + err = db.InsertRouteHopBlinded(ctx, + sqlc.InsertRouteHopBlindedParams{ + HopID: hopID, + EncryptedData: hop.EncryptedData, + BlindingPoint: blindingPointBytes, + BlindedPathTotalAmt: totalAmtMsat, + }, + ) + if err != nil { + return fmt.Errorf("failed to insert "+ + "route hop blinded: %w", err) + } + } + + return nil +} + +// RegisterAttempt atomically records a new HTLC attempt for the specified +// payment. The attempt includes the attempt ID, session key, route information +// (hops, timelocks, amounts), and optional data such as MPP/AMP parameters, +// blinded route data, and custom records. +// +// Returns the updated MPPayment with the new attempt appended to the HTLCs +// slice, and the payment state recalculated. Returns an error if the payment +// doesn't exist or validation fails. +// +// This method is part of the PaymentControl interface, which is embedded in +// the PaymentWriter interface and ultimately the DB interface. It represents +// step 2 in the payment lifecycle control flow, called after InitPayment and +// potentially multiple times for multi-path payments. +func (s *SQLStore) RegisterAttempt(ctx context.Context, + paymentHash lntypes.Hash, attempt *HTLCAttemptInfo) (*MPPayment, + error) { + + var mpPayment *MPPayment + + err := s.db.ExecTx(ctx, sqldb.WriteTxOpt(), func(db SQLQueries) error { + // Make sure the payment exists. + dbPayment, err := db.FetchPayment(ctx, paymentHash[:]) + if err != nil { + return err + } + + // We fetch the complete payment to determine if the payment is + // registrable. + // + // TODO(ziggie): We could improve the query here since only + // the last hop data is needed here not the complete payment + // data. + mpPayment, err = fetchPaymentWithCompleteData( + ctx, s.cfg.QueryCfg, db, dbPayment, + ) + if err != nil { + return fmt.Errorf("failed to fetch payment with "+ + "complete data: %w", err) + } + + if err := mpPayment.Registrable(); err != nil { + return fmt.Errorf("htlc attempt not registrable: %w", + err) + } + + // Verify the attempt is compatible with the existing payment. + if err := verifyAttempt(mpPayment, attempt); err != nil { + return fmt.Errorf("failed to verify attempt: %w", err) + } + + // Register the plain HTLC attempt next. + sessionKey := attempt.SessionKey() + sessionKeyBytes := sessionKey.Serialize() + + _, err = db.InsertHtlcAttempt(ctx, sqlc.InsertHtlcAttemptParams{ + PaymentID: dbPayment.Payment.ID, + AttemptIndex: int64(attempt.AttemptID), + SessionKey: sessionKeyBytes, + AttemptTime: attempt.AttemptTime, + PaymentHash: paymentHash[:], + FirstHopAmountMsat: int64( + attempt.Route.FirstHopAmount.Val.Int(), + ), + RouteTotalTimeLock: int32(attempt.Route.TotalTimeLock), + RouteTotalAmount: int64(attempt.Route.TotalAmount), + RouteSourceKey: attempt.Route.SourcePubKey[:], + }) + if err != nil { + return fmt.Errorf("failed to insert HTLC "+ + "attempt: %w", err) + } + + // Insert the route level first hop custom records. + attemptFirstHopCustomRecords := attempt.Route. + FirstHopWireCustomRecords + + for key, value := range attemptFirstHopCustomRecords { + //nolint:ll + err = db.InsertPaymentAttemptFirstHopCustomRecord( + ctx, + sqlc.InsertPaymentAttemptFirstHopCustomRecordParams{ + HtlcAttemptIndex: int64(attempt.AttemptID), + Key: int64(key), + Value: value, + }, + ) + if err != nil { + return fmt.Errorf("failed to insert "+ + "payment attempt first hop custom "+ + "record: %w", err) + } + } + + // Insert the route hops. + err = s.insertRouteHops( + ctx, db, attempt.Route.Hops, attempt.AttemptID, + ) + if err != nil { + return fmt.Errorf("failed to insert route hops: %w", + err) + } + + // We fetch the HTLC attempts again to recalculate the payment + // state after the attempt is registered. This also makes sure + // we have the right data in case multiple attempts are + // registered concurrently. + // + // NOTE: While the caller is responsible for serializing calls + // to RegisterAttempt per payment hash (see PaymentControl + // interface), we still refetch here to guarantee we return + // consistent, up-to-date data that reflects all changes made + // within this transaction. + mpPayment, err = fetchPaymentWithCompleteData( + ctx, s.cfg.QueryCfg, db, dbPayment, + ) + if err != nil { + return fmt.Errorf("failed to fetch payment with "+ + "complete data: %w", err) + } + + return nil + }, func() { + mpPayment = nil + }) + if err != nil { + return nil, fmt.Errorf("failed to register attempt: %w", err) + } + + return mpPayment, nil +} + +// SettleAttempt marks the specified HTLC attempt as successfully settled, +// recording the payment preimage and settlement time. The preimage serves as +// cryptographic proof of payment and is atomically saved to the database. +// +// This method is part of the PaymentControl interface, which is embedded in +// the PaymentWriter interface and ultimately the DB interface. It represents +// step 3a in the payment lifecycle control flow (step 3b is FailAttempt), +// called after RegisterAttempt when an HTLC successfully completes. +func (s *SQLStore) SettleAttempt(ctx context.Context, paymentHash lntypes.Hash, + attemptID uint64, settleInfo *HTLCSettleInfo) (*MPPayment, error) { + + var mpPayment *MPPayment + + err := s.db.ExecTx(ctx, sqldb.WriteTxOpt(), func(db SQLQueries) error { + dbPayment, err := fetchPaymentByHash(ctx, db, paymentHash) + if err != nil { + return err + } + + paymentStatus, err := computePaymentStatusFromDB( + ctx, s.cfg.QueryCfg, db, dbPayment, + ) + if err != nil { + return fmt.Errorf("failed to compute payment "+ + "status: %w", err) + } + + if err := paymentStatus.updatable(); err != nil { + return fmt.Errorf("payment is not updatable: %w", err) + } + + err = db.SettleAttempt(ctx, sqlc.SettleAttemptParams{ + AttemptIndex: int64(attemptID), + ResolutionTime: time.Now(), + ResolutionType: int32(HTLCAttemptResolutionSettled), + SettlePreimage: settleInfo.Preimage[:], + }) + if err != nil { + return fmt.Errorf("failed to settle attempt: %w", err) + } + + // Fetch the complete payment after we settled the attempt. + mpPayment, err = fetchPaymentWithCompleteData( + ctx, s.cfg.QueryCfg, db, dbPayment, + ) + if err != nil { + return fmt.Errorf("failed to fetch payment with "+ + "complete data: %w", err) + } + + return nil + }, func() { + mpPayment = nil + }) + if err != nil { + return nil, fmt.Errorf("failed to settle attempt: %w", err) + } + + return mpPayment, nil +} + +// FailAttempt marks the specified HTLC attempt as failed, recording the +// failure reason, failure time, optional failure message, and the index of the +// node in the route that generated the failure. This information is atomically +// saved to the database for debugging and route optimization purposes. +// +// For single-path payments, failing the only attempt may lead to the payment +// being retried or ultimately failed via the Fail method. For multi-shard +// (MPP/AMP) payments, individual shard failures don't necessarily fail the +// entire payment; additional attempts can be registered until sufficient shards +// succeed or the payment is permanently failed. +// +// Returns the updated MPPayment with the attempt marked as failed and the +// payment state recalculated. The payment status remains StatusInFlight if +// other attempts are still in flight, or may transition based on the overall +// payment state. +// +// This method is part of the PaymentControl interface, which is embedded in +// the PaymentWriter interface and ultimately the DB interface. It represents +// step 3b in the payment lifecycle control flow (step 3a is SettleAttempt), +// called after RegisterAttempt when an HTLC fails. +func (s *SQLStore) FailAttempt(ctx context.Context, paymentHash lntypes.Hash, + attemptID uint64, failInfo *HTLCFailInfo) (*MPPayment, error) { + + var mpPayment *MPPayment + + err := s.db.ExecTx(ctx, sqldb.WriteTxOpt(), func(db SQLQueries) error { + // Make sure the payment exists. + dbPayment, err := fetchPaymentByHash(ctx, db, paymentHash) + if err != nil { + return err + } + + paymentStatus, err := computePaymentStatusFromDB( + ctx, s.cfg.QueryCfg, db, dbPayment, + ) + if err != nil { + return fmt.Errorf("failed to compute payment "+ + "status: %w", err) + } + + // We check if the payment is updatable before failing the + // attempt. + if err := paymentStatus.updatable(); err != nil { + return fmt.Errorf("payment is not updatable: %w", err) + } + + var failureMsg bytes.Buffer + if failInfo.Message != nil { + err := lnwire.EncodeFailureMessage( + &failureMsg, failInfo.Message, 0, + ) + if err != nil { + return fmt.Errorf("failed to encode "+ + "failure message: %w", err) + } + } + + err = db.FailAttempt(ctx, sqlc.FailAttemptParams{ + AttemptIndex: int64(attemptID), + ResolutionTime: time.Now(), + ResolutionType: int32(HTLCAttemptResolutionFailed), + FailureSourceIndex: sqldb.SQLInt32( + failInfo.FailureSourceIndex, + ), + HtlcFailReason: sqldb.SQLInt32(failInfo.Reason), + FailureMsg: failureMsg.Bytes(), + }) + if err != nil { + return fmt.Errorf("failed to fail attempt: %w", err) + } + + mpPayment, err = fetchPaymentWithCompleteData( + ctx, s.cfg.QueryCfg, db, dbPayment, + ) + if err != nil { + return fmt.Errorf("failed to fetch payment with "+ + "complete data: %w", err) + } + + return nil + }, func() { + mpPayment = nil + }) + if err != nil { + return nil, fmt.Errorf("failed to fail attempt: %w", err) + } + + return mpPayment, nil +} + +// Fail records the ultimate reason why a payment failed. This method stores +// the failure reason for record keeping but does not enforce that all HTLC +// attempts are resolved - HTLCs may still be in flight when this is called. +// +// The payment's actual status transition to StatusFailed is determined by the +// payment state calculation, which considers both the recorded failure reason +// and the current state of all HTLC attempts. The status will transition to +// StatusFailed once all HTLCs are resolved and/or a failure reason is recorded. +// +// NOTE: According to the interface contract, this should only be called when +// all active attempts are already failed. However, the implementation allows +// concurrent calls and does not validate this precondition, enabling the last +// failing attempt to record the failure reason without synchronization. +// +// This method is part of the PaymentControl interface, which is embedded in +// the PaymentWriter interface and ultimately the DB interface. It represents +// step 4 in the payment lifecycle control flow. +func (s *SQLStore) Fail(ctx context.Context, paymentHash lntypes.Hash, + reason FailureReason) (*MPPayment, error) { + + var mpPayment *MPPayment + + err := s.db.ExecTx(ctx, sqldb.WriteTxOpt(), func(db SQLQueries) error { + result, err := db.FailPayment(ctx, sqlc.FailPaymentParams{ + PaymentIdentifier: paymentHash[:], + FailReason: sqldb.SQLInt32(reason), + }) + if err != nil { + return err + } + + rowsAffected, err := result.RowsAffected() + if err != nil { + return err + } + if rowsAffected == 0 { + return ErrPaymentNotInitiated + } + + payment, err := db.FetchPayment(ctx, paymentHash[:]) + if err != nil { + return fmt.Errorf("failed to fetch payment: %w", err) + } + mpPayment, err = fetchPaymentWithCompleteData( + ctx, s.cfg.QueryCfg, db, payment, + ) + if err != nil { + return fmt.Errorf("failed to fetch payment with "+ + "complete data: %w", err) + } + + return nil + }, func() { + mpPayment = nil + }) + if err != nil { + return nil, fmt.Errorf("failed to fail payment: %w", err) + } + + return mpPayment, nil +} + +// DeletePayments performs a batch deletion of payments or their failed HTLC +// attempts from the database based on the specified flags. This is a bulk +// operation that iterates through all payments and selectively deletes based +// on the criteria. +// The behavior is controlled by two flags: +// +// If failedAttemptsOnly is true, only failed HTLC attempts are deleted while +// preserving the payment records and any successful or in-flight attempts. +// The return value is always 0 when deleting attempts only. +// +// If failedAttemptsOnly is false, entire payment records are deleted including +// all associated data (HTLCs, metadata, intents). The return value is the +// number of payments deleted. +// +// The failedOnly flag further filters which payments are processed: +// - failedOnly=true, failedAttemptsOnly=true: Delete failed attempts for +// StatusFailed payments only +// - failedOnly=false, failedAttemptsOnly=true: Delete failed attempts for +// all removable payments +// - failedOnly=true, failedAttemptsOnly=false: Delete entire payment records +// for StatusFailed payments only +// - failedOnly=false, failedAttemptsOnly=false: Delete all removable payment +// records (StatusInitiated, StatusSucceeded, StatusFailed) +// +// Safety checks applied to all operations: +// - Payments with StatusInFlight are always skipped (cannot be safely deleted +// while HTLCs are on the network) +// - The payment status must pass the removable() check +// +// Returns the number of complete payments deleted (0 if only deleting failed +// attempts). This is useful for cleanup operations, administrative maintenance, +// or freeing up database storage. +// +// This method is part of the PaymentWriter interface, which is embedded in +// the DB interface. +// +// TODO(ziggie): batch and use iterator instead, moreover we dont need to fetch +// the complete payment data for each payment, we can just fetch the payment ID +// and the resolution types to decide if the payment is removable. +func (s *SQLStore) DeletePayments(ctx context.Context, failedOnly, + failedHtlcsOnly bool) (int, error) { + + var numPayments int + + extractCursor := func(row sqlc.FilterPaymentsRow) int64 { + return row.Payment.ID + } + + err := s.db.ExecTx(ctx, sqldb.WriteTxOpt(), func(db SQLQueries) error { + // collectFunc extracts the payment ID from each payment row. + collectFunc := func(row sqlc.FilterPaymentsRow) (int64, error) { + return row.Payment.ID, nil + } + + // batchDataFunc loads only HTLC resolution types for a batch + // of payments, which is sufficient to determine payment status. + batchDataFunc := func(ctx context.Context, paymentIDs []int64) ( + *paymentStatusData, error) { + + return batchLoadPaymentResolutions( + ctx, s.cfg.QueryCfg, db, paymentIDs, + ) + } + + // processPayment processes each payment with the lightweight + // batch-loaded resolution data. + processPayment := func(ctx context.Context, + dbPayment sqlc.FilterPaymentsRow, + batchData *paymentStatusData) error { + + payment := dbPayment.Payment + + // Compute the payment status from resolution types and + // failure reason without building the complete payment. + resolutionTypes := batchData.resolutionTypes[payment.ID] + status, err := computePaymentStatusFromResolutions( + resolutionTypes, payment.FailReason, + ) + if err != nil { + return fmt.Errorf("failed to compute payment "+ + "status: %w", err) + } + + // Payments which are not final yet cannot be deleted. + // we skip them. + if err := status.removable(); err != nil { + return nil + } + + // If we are only deleting failed payments, we skip + // if the payment is not failed. + if failedOnly && status != StatusFailed { + return nil + } + + // If we are only deleting failed HTLCs, we delete them + // and return early. + if failedHtlcsOnly { + return db.DeleteFailedAttempts( + ctx, payment.ID, + ) + } + + // Otherwise we delete the payment. + err = db.DeletePayment(ctx, payment.ID) + if err != nil { + return fmt.Errorf("failed to delete "+ + "payment: %w", err) + } + + numPayments++ + + return nil + } + + queryFunc := func(ctx context.Context, lastID int64, + limit int32) ([]sqlc.FilterPaymentsRow, error) { + + filterParams := sqlc.FilterPaymentsParams{ + NumLimit: limit, + IndexOffsetGet: sqldb.SQLInt64( + lastID, + ), + } + + return db.FilterPayments(ctx, filterParams) + } + + return sqldb.ExecuteCollectAndBatchWithSharedDataQuery( + ctx, s.cfg.QueryCfg, int64(-1), queryFunc, + extractCursor, collectFunc, batchDataFunc, + processPayment, + ) + }, func() { + numPayments = 0 + }) + if err != nil { + return 0, fmt.Errorf("failed to delete payments "+ + "(failedOnly: %v, failedHtlcsOnly: %v): %w", + failedOnly, failedHtlcsOnly, err) + } + + return numPayments, nil +} diff --git a/payments/db/migration1/sqlc/db.go b/payments/db/migration1/sqlc/db.go new file mode 100644 index 00000000000..e4d78283b21 --- /dev/null +++ b/payments/db/migration1/sqlc/db.go @@ -0,0 +1,31 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 + +package sqlc + +import ( + "context" + "database/sql" +) + +type DBTX interface { + ExecContext(context.Context, string, ...interface{}) (sql.Result, error) + PrepareContext(context.Context, string) (*sql.Stmt, error) + QueryContext(context.Context, string, ...interface{}) (*sql.Rows, error) + QueryRowContext(context.Context, string, ...interface{}) *sql.Row +} + +func New(db DBTX) *Queries { + return &Queries{db: db} +} + +type Queries struct { + db DBTX +} + +func (q *Queries) WithTx(tx *sql.Tx) *Queries { + return &Queries{ + db: tx, + } +} diff --git a/payments/db/migration1/sqlc/db_custom.go b/payments/db/migration1/sqlc/db_custom.go new file mode 100644 index 00000000000..625e65adcca --- /dev/null +++ b/payments/db/migration1/sqlc/db_custom.go @@ -0,0 +1,123 @@ +package sqlc + +import ( + "fmt" + "strings" +) + +// GetTx returns the underlying DBTX (either *sql.DB or *sql.Tx) used by the +// Queries struct. +func (q *Queries) GetTx() DBTX { + return q.db +} + +// makeQueryParams generates a string of query parameters for a SQL query. It is +// meant to replace the `?` placeholders in a SQL query with numbered parameters +// like `$1`, `$2`, etc. This is required for the sqlc /*SLICE:*/ +// workaround. See scripts/gen_sqlc_docker.sh for more details. +func makeQueryParams(numTotalArgs, numListArgs int) string { + if numListArgs == 0 { + return "" + } + + var b strings.Builder + + // Pre-allocate a rough estimation of the buffer size to avoid + // re-allocations. A parameter like $1000, takes 6 bytes. + b.Grow(numListArgs * 6) + + diff := numTotalArgs - numListArgs + for i := 0; i < numListArgs; i++ { + if i > 0 { + // We don't need to check the error here because the + // WriteString method of strings.Builder always returns + // nil. + _, _ = b.WriteString(",") + } + + // We don't need to check the error here because the + // Write method (called by fmt.Fprintf) of strings.Builder + // always returns nil. + _, _ = fmt.Fprintf(&b, "$%d", i+diff+1) + } + + return b.String() +} + +// PaymentAndIntent is an interface that provides access to a payment and its +// associated payment intent. +type PaymentAndIntent interface { + // GetPayment returns the Payment associated with this interface. + GetPayment() Payment + + // GetPaymentIntent returns the PaymentIntent associated with this + // payment. + GetPaymentIntent() PaymentIntent +} + +// GetPayment returns the Payment associated with this interface. +// +// NOTE: This method is part of the PaymentAndIntent interface. +func (r FilterPaymentsRow) GetPayment() Payment { + return r.Payment +} + +// GetPaymentIntent returns the PaymentIntent associated with this payment. +// If the payment has no intent (IntentType is NULL), this returns a zero-value +// PaymentIntent. +// +// NOTE: This method is part of the PaymentAndIntent interface. +func (r FilterPaymentsRow) GetPaymentIntent() PaymentIntent { + if !r.IntentType.Valid { + return PaymentIntent{} + } + + return PaymentIntent{ + IntentType: r.IntentType.Int16, + IntentPayload: r.IntentPayload, + } +} + +// GetPayment returns the Payment associated with this interface. +// +// NOTE: This method is part of the PaymentAndIntent interface. +func (r FetchPaymentRow) GetPayment() Payment { + return r.Payment +} + +// GetPaymentIntent returns the PaymentIntent associated with this payment. +// If the payment has no intent (IntentType is NULL), this returns a zero-value +// PaymentIntent. +// +// NOTE: This method is part of the PaymentAndIntent interface. +func (r FetchPaymentRow) GetPaymentIntent() PaymentIntent { + if !r.IntentType.Valid { + return PaymentIntent{} + } + + return PaymentIntent{ + IntentType: r.IntentType.Int16, + IntentPayload: r.IntentPayload, + } +} + +func (r FetchPaymentsByIDsRow) GetPayment() Payment { + return Payment{ + ID: r.ID, + AmountMsat: r.AmountMsat, + CreatedAt: r.CreatedAt, + PaymentIdentifier: r.PaymentIdentifier, + FailReason: r.FailReason, + } +} + +func (r FetchPaymentsByIDsRow) GetPaymentIntent() PaymentIntent { + if !r.IntentType.Valid { + return PaymentIntent{} + } + + return PaymentIntent{ + IntentType: r.IntentType.Int16, + IntentPayload: r.IntentPayload, + } +} diff --git a/payments/db/migration1/sqlc/models.go b/payments/db/migration1/sqlc/models.go new file mode 100644 index 00000000000..6faa701d99a --- /dev/null +++ b/payments/db/migration1/sqlc/models.go @@ -0,0 +1,111 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 + +package sqlc + +import ( + "database/sql" + "time" +) + +type Payment struct { + ID int64 + AmountMsat int64 + CreatedAt time.Time + PaymentIdentifier []byte + FailReason sql.NullInt32 +} + +type PaymentAttemptFirstHopCustomRecord struct { + ID int64 + HtlcAttemptIndex int64 + Key int64 + Value []byte +} + +type PaymentDuplicate struct { + ID int64 + PaymentID int64 + PaymentIdentifier []byte + AmountMsat int64 + CreatedAt time.Time + FailReason sql.NullInt32 + SettlePreimage []byte + SettleTime sql.NullTime +} + +type PaymentFirstHopCustomRecord struct { + ID int64 + PaymentID int64 + Key int64 + Value []byte +} + +type PaymentHopCustomRecord struct { + ID int64 + HopID int64 + Key int64 + Value []byte +} + +type PaymentHtlcAttempt struct { + ID int64 + AttemptIndex int64 + PaymentID int64 + SessionKey []byte + AttemptTime time.Time + PaymentHash []byte + FirstHopAmountMsat int64 + RouteTotalTimeLock int32 + RouteTotalAmount int64 + RouteSourceKey []byte +} + +type PaymentHtlcAttemptResolution struct { + AttemptIndex int64 + ResolutionTime time.Time + ResolutionType int32 + SettlePreimage []byte + FailureSourceIndex sql.NullInt32 + HtlcFailReason sql.NullInt32 + FailureMsg []byte +} + +type PaymentIntent struct { + ID int64 + PaymentID int64 + IntentType int16 + IntentPayload []byte +} + +type PaymentRouteHop struct { + ID int64 + HtlcAttemptIndex int64 + HopIndex int32 + PubKey []byte + Scid string + OutgoingTimeLock int32 + AmtToForward int64 + MetaData []byte +} + +type PaymentRouteHopAmp struct { + HopID int64 + RootShare []byte + SetID []byte + ChildIndex int32 +} + +type PaymentRouteHopBlinded struct { + HopID int64 + EncryptedData []byte + BlindingPoint []byte + BlindedPathTotalAmt sql.NullInt64 +} + +type PaymentRouteHopMpp struct { + HopID int64 + PaymentAddr []byte + TotalMsat int64 +} diff --git a/payments/db/migration1/sqlc/payments.sql.go b/payments/db/migration1/sqlc/payments.sql.go new file mode 100644 index 00000000000..f658654ae93 --- /dev/null +++ b/payments/db/migration1/sqlc/payments.sql.go @@ -0,0 +1,1229 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 +// source: payments.sql + +package sqlc + +import ( + "context" + "database/sql" + "strings" + "time" +) + +const countPayments = `-- name: CountPayments :one +SELECT COUNT(*) FROM payments +` + +func (q *Queries) CountPayments(ctx context.Context) (int64, error) { + row := q.db.QueryRowContext(ctx, countPayments) + var count int64 + err := row.Scan(&count) + return count, err +} + +const deleteFailedAttempts = `-- name: DeleteFailedAttempts :exec +DELETE FROM payment_htlc_attempts WHERE payment_id = $1 AND attempt_index IN ( + SELECT attempt_index FROM payment_htlc_attempt_resolutions WHERE resolution_type = 2 +) +` + +// Delete all failed HTLC attempts for the given payment. Resolution type 2 +// indicates a failed attempt. +func (q *Queries) DeleteFailedAttempts(ctx context.Context, paymentID int64) error { + _, err := q.db.ExecContext(ctx, deleteFailedAttempts, paymentID) + return err +} + +const deletePayment = `-- name: DeletePayment :exec +DELETE FROM payments WHERE id = $1 +` + +func (q *Queries) DeletePayment(ctx context.Context, id int64) error { + _, err := q.db.ExecContext(ctx, deletePayment, id) + return err +} + +const failAttempt = `-- name: FailAttempt :exec +INSERT INTO payment_htlc_attempt_resolutions ( + attempt_index, + resolution_time, + resolution_type, + failure_source_index, + htlc_fail_reason, + failure_msg +) +VALUES ( + $1, + $2, + $3, + $4, + $5, + $6 +) +` + +type FailAttemptParams struct { + AttemptIndex int64 + ResolutionTime time.Time + ResolutionType int32 + FailureSourceIndex sql.NullInt32 + HtlcFailReason sql.NullInt32 + FailureMsg []byte +} + +func (q *Queries) FailAttempt(ctx context.Context, arg FailAttemptParams) error { + _, err := q.db.ExecContext(ctx, failAttempt, + arg.AttemptIndex, + arg.ResolutionTime, + arg.ResolutionType, + arg.FailureSourceIndex, + arg.HtlcFailReason, + arg.FailureMsg, + ) + return err +} + +const failPayment = `-- name: FailPayment :execresult +UPDATE payments SET fail_reason = $1 WHERE payment_identifier = $2 +` + +type FailPaymentParams struct { + FailReason sql.NullInt32 + PaymentIdentifier []byte +} + +func (q *Queries) FailPayment(ctx context.Context, arg FailPaymentParams) (sql.Result, error) { + return q.db.ExecContext(ctx, failPayment, arg.FailReason, arg.PaymentIdentifier) +} + +const fetchAllInflightAttempts = `-- name: FetchAllInflightAttempts :many +SELECT + ha.id, + ha.attempt_index, + ha.payment_id, + ha.session_key, + ha.attempt_time, + ha.payment_hash, + ha.first_hop_amount_msat, + ha.route_total_time_lock, + ha.route_total_amount, + ha.route_source_key +FROM payment_htlc_attempts ha +WHERE NOT EXISTS ( + SELECT 1 FROM payment_htlc_attempt_resolutions hr + WHERE hr.attempt_index = ha.attempt_index +) +AND ha.attempt_index > $1 +ORDER BY ha.attempt_index ASC +LIMIT $2 +` + +type FetchAllInflightAttemptsParams struct { + AttemptIndex int64 + Limit int32 +} + +// Fetch all inflight attempts with their payment data using pagination. +// Returns attempt data joined with payment and intent data to avoid separate queries. +func (q *Queries) FetchAllInflightAttempts(ctx context.Context, arg FetchAllInflightAttemptsParams) ([]PaymentHtlcAttempt, error) { + rows, err := q.db.QueryContext(ctx, fetchAllInflightAttempts, arg.AttemptIndex, arg.Limit) + if err != nil { + return nil, err + } + defer rows.Close() + var items []PaymentHtlcAttempt + for rows.Next() { + var i PaymentHtlcAttempt + if err := rows.Scan( + &i.ID, + &i.AttemptIndex, + &i.PaymentID, + &i.SessionKey, + &i.AttemptTime, + &i.PaymentHash, + &i.FirstHopAmountMsat, + &i.RouteTotalTimeLock, + &i.RouteTotalAmount, + &i.RouteSourceKey, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const fetchHopLevelCustomRecords = `-- name: FetchHopLevelCustomRecords :many +SELECT + l.id, + l.hop_id, + l.key, + l.value +FROM payment_hop_custom_records l +WHERE l.hop_id IN (/*SLICE:hop_ids*/?) +ORDER BY l.hop_id ASC, l.key ASC +` + +func (q *Queries) FetchHopLevelCustomRecords(ctx context.Context, hopIds []int64) ([]PaymentHopCustomRecord, error) { + query := fetchHopLevelCustomRecords + var queryParams []interface{} + if len(hopIds) > 0 { + for _, v := range hopIds { + queryParams = append(queryParams, v) + } + query = strings.Replace(query, "/*SLICE:hop_ids*/?", makeQueryParams(len(queryParams), len(hopIds)), 1) + } else { + query = strings.Replace(query, "/*SLICE:hop_ids*/?", "NULL", 1) + } + rows, err := q.db.QueryContext(ctx, query, queryParams...) + if err != nil { + return nil, err + } + defer rows.Close() + var items []PaymentHopCustomRecord + for rows.Next() { + var i PaymentHopCustomRecord + if err := rows.Scan( + &i.ID, + &i.HopID, + &i.Key, + &i.Value, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const fetchHopsForAttempts = `-- name: FetchHopsForAttempts :many +SELECT + h.id, + h.htlc_attempt_index, + h.hop_index, + h.pub_key, + h.scid, + h.outgoing_time_lock, + h.amt_to_forward, + h.meta_data, + m.payment_addr AS mpp_payment_addr, + m.total_msat AS mpp_total_msat, + a.root_share AS amp_root_share, + a.set_id AS amp_set_id, + a.child_index AS amp_child_index, + b.encrypted_data, + b.blinding_point, + b.blinded_path_total_amt +FROM payment_route_hops h +LEFT JOIN payment_route_hop_mpp m ON m.hop_id = h.id +LEFT JOIN payment_route_hop_amp a ON a.hop_id = h.id +LEFT JOIN payment_route_hop_blinded b ON b.hop_id = h.id +WHERE h.htlc_attempt_index IN (/*SLICE:htlc_attempt_indices*/?) +ORDER BY h.htlc_attempt_index ASC, h.hop_index ASC +` + +type FetchHopsForAttemptsRow struct { + ID int64 + HtlcAttemptIndex int64 + HopIndex int32 + PubKey []byte + Scid string + OutgoingTimeLock int32 + AmtToForward int64 + MetaData []byte + MppPaymentAddr []byte + MppTotalMsat sql.NullInt64 + AmpRootShare []byte + AmpSetID []byte + AmpChildIndex sql.NullInt32 + EncryptedData []byte + BlindingPoint []byte + BlindedPathTotalAmt sql.NullInt64 +} + +func (q *Queries) FetchHopsForAttempts(ctx context.Context, htlcAttemptIndices []int64) ([]FetchHopsForAttemptsRow, error) { + query := fetchHopsForAttempts + var queryParams []interface{} + if len(htlcAttemptIndices) > 0 { + for _, v := range htlcAttemptIndices { + queryParams = append(queryParams, v) + } + query = strings.Replace(query, "/*SLICE:htlc_attempt_indices*/?", makeQueryParams(len(queryParams), len(htlcAttemptIndices)), 1) + } else { + query = strings.Replace(query, "/*SLICE:htlc_attempt_indices*/?", "NULL", 1) + } + rows, err := q.db.QueryContext(ctx, query, queryParams...) + if err != nil { + return nil, err + } + defer rows.Close() + var items []FetchHopsForAttemptsRow + for rows.Next() { + var i FetchHopsForAttemptsRow + if err := rows.Scan( + &i.ID, + &i.HtlcAttemptIndex, + &i.HopIndex, + &i.PubKey, + &i.Scid, + &i.OutgoingTimeLock, + &i.AmtToForward, + &i.MetaData, + &i.MppPaymentAddr, + &i.MppTotalMsat, + &i.AmpRootShare, + &i.AmpSetID, + &i.AmpChildIndex, + &i.EncryptedData, + &i.BlindingPoint, + &i.BlindedPathTotalAmt, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const fetchHtlcAttemptResolutionsForPayments = `-- name: FetchHtlcAttemptResolutionsForPayments :many +SELECT + ha.payment_id, + hr.resolution_type +FROM payment_htlc_attempts ha +LEFT JOIN payment_htlc_attempt_resolutions hr ON hr.attempt_index = ha.attempt_index +WHERE ha.payment_id IN (/*SLICE:payment_ids*/?) +` + +type FetchHtlcAttemptResolutionsForPaymentsRow struct { + PaymentID int64 + ResolutionType sql.NullInt32 +} + +// Batch query to fetch only HTLC resolution status for multiple payments. +// We don't need to order by payment_id and attempt_time because we will +// group the resolutions by payment_id in the background. +func (q *Queries) FetchHtlcAttemptResolutionsForPayments(ctx context.Context, paymentIds []int64) ([]FetchHtlcAttemptResolutionsForPaymentsRow, error) { + query := fetchHtlcAttemptResolutionsForPayments + var queryParams []interface{} + if len(paymentIds) > 0 { + for _, v := range paymentIds { + queryParams = append(queryParams, v) + } + query = strings.Replace(query, "/*SLICE:payment_ids*/?", makeQueryParams(len(queryParams), len(paymentIds)), 1) + } else { + query = strings.Replace(query, "/*SLICE:payment_ids*/?", "NULL", 1) + } + rows, err := q.db.QueryContext(ctx, query, queryParams...) + if err != nil { + return nil, err + } + defer rows.Close() + var items []FetchHtlcAttemptResolutionsForPaymentsRow + for rows.Next() { + var i FetchHtlcAttemptResolutionsForPaymentsRow + if err := rows.Scan(&i.PaymentID, &i.ResolutionType); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const fetchHtlcAttemptsForPayments = `-- name: FetchHtlcAttemptsForPayments :many +SELECT + ha.id, + ha.attempt_index, + ha.payment_id, + ha.session_key, + ha.attempt_time, + ha.payment_hash, + ha.first_hop_amount_msat, + ha.route_total_time_lock, + ha.route_total_amount, + ha.route_source_key, + hr.resolution_type, + hr.resolution_time, + hr.failure_source_index, + hr.htlc_fail_reason, + hr.failure_msg, + hr.settle_preimage +FROM payment_htlc_attempts ha +LEFT JOIN payment_htlc_attempt_resolutions hr ON hr.attempt_index = ha.attempt_index +WHERE ha.payment_id IN (/*SLICE:payment_ids*/?) +ORDER BY ha.payment_id ASC, ha.attempt_time ASC +` + +type FetchHtlcAttemptsForPaymentsRow struct { + ID int64 + AttemptIndex int64 + PaymentID int64 + SessionKey []byte + AttemptTime time.Time + PaymentHash []byte + FirstHopAmountMsat int64 + RouteTotalTimeLock int32 + RouteTotalAmount int64 + RouteSourceKey []byte + ResolutionType sql.NullInt32 + ResolutionTime sql.NullTime + FailureSourceIndex sql.NullInt32 + HtlcFailReason sql.NullInt32 + FailureMsg []byte + SettlePreimage []byte +} + +func (q *Queries) FetchHtlcAttemptsForPayments(ctx context.Context, paymentIds []int64) ([]FetchHtlcAttemptsForPaymentsRow, error) { + query := fetchHtlcAttemptsForPayments + var queryParams []interface{} + if len(paymentIds) > 0 { + for _, v := range paymentIds { + queryParams = append(queryParams, v) + } + query = strings.Replace(query, "/*SLICE:payment_ids*/?", makeQueryParams(len(queryParams), len(paymentIds)), 1) + } else { + query = strings.Replace(query, "/*SLICE:payment_ids*/?", "NULL", 1) + } + rows, err := q.db.QueryContext(ctx, query, queryParams...) + if err != nil { + return nil, err + } + defer rows.Close() + var items []FetchHtlcAttemptsForPaymentsRow + for rows.Next() { + var i FetchHtlcAttemptsForPaymentsRow + if err := rows.Scan( + &i.ID, + &i.AttemptIndex, + &i.PaymentID, + &i.SessionKey, + &i.AttemptTime, + &i.PaymentHash, + &i.FirstHopAmountMsat, + &i.RouteTotalTimeLock, + &i.RouteTotalAmount, + &i.RouteSourceKey, + &i.ResolutionType, + &i.ResolutionTime, + &i.FailureSourceIndex, + &i.HtlcFailReason, + &i.FailureMsg, + &i.SettlePreimage, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const fetchPayment = `-- name: FetchPayment :one +SELECT + p.id, p.amount_msat, p.created_at, p.payment_identifier, p.fail_reason, + i.intent_type AS "intent_type", + i.intent_payload AS "intent_payload" +FROM payments p +LEFT JOIN payment_intents i ON i.payment_id = p.id +WHERE p.payment_identifier = $1 +` + +type FetchPaymentRow struct { + Payment Payment + IntentType sql.NullInt16 + IntentPayload []byte +} + +func (q *Queries) FetchPayment(ctx context.Context, paymentIdentifier []byte) (FetchPaymentRow, error) { + row := q.db.QueryRowContext(ctx, fetchPayment, paymentIdentifier) + var i FetchPaymentRow + err := row.Scan( + &i.Payment.ID, + &i.Payment.AmountMsat, + &i.Payment.CreatedAt, + &i.Payment.PaymentIdentifier, + &i.Payment.FailReason, + &i.IntentType, + &i.IntentPayload, + ) + return i, err +} + +const fetchPaymentDuplicates = `-- name: FetchPaymentDuplicates :many +SELECT + id, + payment_id, + payment_identifier, + amount_msat, + created_at, + fail_reason, + settle_preimage, + settle_time +FROM payment_duplicates +WHERE payment_id = $1 +ORDER BY id ASC +` + +// Fetch all duplicate payment records from the payment_duplicates table for +// a given payment ID. +func (q *Queries) FetchPaymentDuplicates(ctx context.Context, paymentID int64) ([]PaymentDuplicate, error) { + rows, err := q.db.QueryContext(ctx, fetchPaymentDuplicates, paymentID) + if err != nil { + return nil, err + } + defer rows.Close() + var items []PaymentDuplicate + for rows.Next() { + var i PaymentDuplicate + if err := rows.Scan( + &i.ID, + &i.PaymentID, + &i.PaymentIdentifier, + &i.AmountMsat, + &i.CreatedAt, + &i.FailReason, + &i.SettlePreimage, + &i.SettleTime, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const fetchPaymentLevelFirstHopCustomRecords = `-- name: FetchPaymentLevelFirstHopCustomRecords :many +SELECT + l.id, + l.payment_id, + l.key, + l.value +FROM payment_first_hop_custom_records l +WHERE l.payment_id IN (/*SLICE:payment_ids*/?) +ORDER BY l.payment_id ASC, l.key ASC +` + +func (q *Queries) FetchPaymentLevelFirstHopCustomRecords(ctx context.Context, paymentIds []int64) ([]PaymentFirstHopCustomRecord, error) { + query := fetchPaymentLevelFirstHopCustomRecords + var queryParams []interface{} + if len(paymentIds) > 0 { + for _, v := range paymentIds { + queryParams = append(queryParams, v) + } + query = strings.Replace(query, "/*SLICE:payment_ids*/?", makeQueryParams(len(queryParams), len(paymentIds)), 1) + } else { + query = strings.Replace(query, "/*SLICE:payment_ids*/?", "NULL", 1) + } + rows, err := q.db.QueryContext(ctx, query, queryParams...) + if err != nil { + return nil, err + } + defer rows.Close() + var items []PaymentFirstHopCustomRecord + for rows.Next() { + var i PaymentFirstHopCustomRecord + if err := rows.Scan( + &i.ID, + &i.PaymentID, + &i.Key, + &i.Value, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const fetchPaymentsByIDs = `-- name: FetchPaymentsByIDs :many +SELECT + p.id, + p.amount_msat, + p.created_at, + p.payment_identifier, + p.fail_reason, + pi.intent_type, + pi.intent_payload +FROM payments p +LEFT JOIN payment_intents pi ON pi.payment_id = p.id +WHERE p.id IN (/*SLICE:payment_ids*/?) +ORDER BY p.id ASC +` + +type FetchPaymentsByIDsRow struct { + ID int64 + AmountMsat int64 + CreatedAt time.Time + PaymentIdentifier []byte + FailReason sql.NullInt32 + IntentType sql.NullInt16 + IntentPayload []byte +} + +// Batch fetch payment and intent data for a set of payment IDs. +// Used to avoid fetching redundant payment data when processing multiple +// attempts for the same payment. +func (q *Queries) FetchPaymentsByIDs(ctx context.Context, paymentIds []int64) ([]FetchPaymentsByIDsRow, error) { + query := fetchPaymentsByIDs + var queryParams []interface{} + if len(paymentIds) > 0 { + for _, v := range paymentIds { + queryParams = append(queryParams, v) + } + query = strings.Replace(query, "/*SLICE:payment_ids*/?", makeQueryParams(len(queryParams), len(paymentIds)), 1) + } else { + query = strings.Replace(query, "/*SLICE:payment_ids*/?", "NULL", 1) + } + rows, err := q.db.QueryContext(ctx, query, queryParams...) + if err != nil { + return nil, err + } + defer rows.Close() + var items []FetchPaymentsByIDsRow + for rows.Next() { + var i FetchPaymentsByIDsRow + if err := rows.Scan( + &i.ID, + &i.AmountMsat, + &i.CreatedAt, + &i.PaymentIdentifier, + &i.FailReason, + &i.IntentType, + &i.IntentPayload, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const fetchRouteLevelFirstHopCustomRecords = `-- name: FetchRouteLevelFirstHopCustomRecords :many +SELECT + l.id, + l.htlc_attempt_index, + l.key, + l.value +FROM payment_attempt_first_hop_custom_records l +WHERE l.htlc_attempt_index IN (/*SLICE:htlc_attempt_indices*/?) +ORDER BY l.htlc_attempt_index ASC, l.key ASC +` + +func (q *Queries) FetchRouteLevelFirstHopCustomRecords(ctx context.Context, htlcAttemptIndices []int64) ([]PaymentAttemptFirstHopCustomRecord, error) { + query := fetchRouteLevelFirstHopCustomRecords + var queryParams []interface{} + if len(htlcAttemptIndices) > 0 { + for _, v := range htlcAttemptIndices { + queryParams = append(queryParams, v) + } + query = strings.Replace(query, "/*SLICE:htlc_attempt_indices*/?", makeQueryParams(len(queryParams), len(htlcAttemptIndices)), 1) + } else { + query = strings.Replace(query, "/*SLICE:htlc_attempt_indices*/?", "NULL", 1) + } + rows, err := q.db.QueryContext(ctx, query, queryParams...) + if err != nil { + return nil, err + } + defer rows.Close() + var items []PaymentAttemptFirstHopCustomRecord + for rows.Next() { + var i PaymentAttemptFirstHopCustomRecord + if err := rows.Scan( + &i.ID, + &i.HtlcAttemptIndex, + &i.Key, + &i.Value, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const filterPayments = `-- name: FilterPayments :many +/* ───────────────────────────────────────────── + fetch queries + ───────────────────────────────────────────── +*/ + +SELECT + p.id, p.amount_msat, p.created_at, p.payment_identifier, p.fail_reason, + i.intent_type AS "intent_type", + i.intent_payload AS "intent_payload" +FROM payments p +LEFT JOIN payment_intents i ON i.payment_id = p.id +WHERE ( + p.id > $1 OR + $1 IS NULL +) AND ( + p.id < $2 OR + $2 IS NULL +) AND ( + p.created_at >= $3 OR + $3 IS NULL +) AND ( + p.created_at <= $4 OR + $4 IS NULL +) AND ( + i.intent_type = $5 OR + $5 IS NULL OR i.intent_type IS NULL +) +ORDER BY + CASE WHEN $6 = false OR $6 IS NULL THEN p.id END ASC, + CASE WHEN $6 = true THEN p.id END DESC +LIMIT $7 +` + +type FilterPaymentsParams struct { + IndexOffsetGet sql.NullInt64 + IndexOffsetLet sql.NullInt64 + CreatedAfter sql.NullTime + CreatedBefore sql.NullTime + IntentType sql.NullInt16 + Reverse interface{} + NumLimit int32 +} + +type FilterPaymentsRow struct { + Payment Payment + IntentType sql.NullInt16 + IntentPayload []byte +} + +func (q *Queries) FilterPayments(ctx context.Context, arg FilterPaymentsParams) ([]FilterPaymentsRow, error) { + rows, err := q.db.QueryContext(ctx, filterPayments, + arg.IndexOffsetGet, + arg.IndexOffsetLet, + arg.CreatedAfter, + arg.CreatedBefore, + arg.IntentType, + arg.Reverse, + arg.NumLimit, + ) + if err != nil { + return nil, err + } + defer rows.Close() + var items []FilterPaymentsRow + for rows.Next() { + var i FilterPaymentsRow + if err := rows.Scan( + &i.Payment.ID, + &i.Payment.AmountMsat, + &i.Payment.CreatedAt, + &i.Payment.PaymentIdentifier, + &i.Payment.FailReason, + &i.IntentType, + &i.IntentPayload, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const insertHtlcAttempt = `-- name: InsertHtlcAttempt :one +INSERT INTO payment_htlc_attempts ( + payment_id, + attempt_index, + session_key, + attempt_time, + payment_hash, + first_hop_amount_msat, + route_total_time_lock, + route_total_amount, + route_source_key) +VALUES ( + $1, + $2, + $3, + $4, + $5, + $6, + $7, + $8, + $9) +RETURNING id +` + +type InsertHtlcAttemptParams struct { + PaymentID int64 + AttemptIndex int64 + SessionKey []byte + AttemptTime time.Time + PaymentHash []byte + FirstHopAmountMsat int64 + RouteTotalTimeLock int32 + RouteTotalAmount int64 + RouteSourceKey []byte +} + +func (q *Queries) InsertHtlcAttempt(ctx context.Context, arg InsertHtlcAttemptParams) (int64, error) { + row := q.db.QueryRowContext(ctx, insertHtlcAttempt, + arg.PaymentID, + arg.AttemptIndex, + arg.SessionKey, + arg.AttemptTime, + arg.PaymentHash, + arg.FirstHopAmountMsat, + arg.RouteTotalTimeLock, + arg.RouteTotalAmount, + arg.RouteSourceKey, + ) + var id int64 + err := row.Scan(&id) + return id, err +} + +const insertPayment = `-- name: InsertPayment :one +INSERT INTO payments ( + amount_msat, + created_at, + payment_identifier, + fail_reason) +VALUES ( + $1, + $2, + $3, + NULL +) +RETURNING id +` + +type InsertPaymentParams struct { + AmountMsat int64 + CreatedAt time.Time + PaymentIdentifier []byte +} + +// Insert a new payment and return its ID. +// When creating a payment we don't have a fail reason because we start the +// payment process. +func (q *Queries) InsertPayment(ctx context.Context, arg InsertPaymentParams) (int64, error) { + row := q.db.QueryRowContext(ctx, insertPayment, arg.AmountMsat, arg.CreatedAt, arg.PaymentIdentifier) + var id int64 + err := row.Scan(&id) + return id, err +} + +const insertPaymentAttemptFirstHopCustomRecord = `-- name: InsertPaymentAttemptFirstHopCustomRecord :exec +INSERT INTO payment_attempt_first_hop_custom_records ( + htlc_attempt_index, + key, + value +) +VALUES ( + $1, + $2, + $3 +) +` + +type InsertPaymentAttemptFirstHopCustomRecordParams struct { + HtlcAttemptIndex int64 + Key int64 + Value []byte +} + +func (q *Queries) InsertPaymentAttemptFirstHopCustomRecord(ctx context.Context, arg InsertPaymentAttemptFirstHopCustomRecordParams) error { + _, err := q.db.ExecContext(ctx, insertPaymentAttemptFirstHopCustomRecord, arg.HtlcAttemptIndex, arg.Key, arg.Value) + return err +} + +const insertPaymentDuplicateMig = `-- name: InsertPaymentDuplicateMig :one +INSERT INTO payment_duplicates ( + payment_id, + payment_identifier, + amount_msat, + created_at, + fail_reason, + settle_preimage, + settle_time +) +VALUES ( + $1, + $2, + $3, + $4, + $5, + $6, + $7 +) +RETURNING id +` + +type InsertPaymentDuplicateMigParams struct { + PaymentID int64 + PaymentIdentifier []byte + AmountMsat int64 + CreatedAt time.Time + FailReason sql.NullInt32 + SettlePreimage []byte + SettleTime sql.NullTime +} + +// Insert a duplicate payment record into the payment_duplicates table and +// return its ID. +func (q *Queries) InsertPaymentDuplicateMig(ctx context.Context, arg InsertPaymentDuplicateMigParams) (int64, error) { + row := q.db.QueryRowContext(ctx, insertPaymentDuplicateMig, + arg.PaymentID, + arg.PaymentIdentifier, + arg.AmountMsat, + arg.CreatedAt, + arg.FailReason, + arg.SettlePreimage, + arg.SettleTime, + ) + var id int64 + err := row.Scan(&id) + return id, err +} + +const insertPaymentFirstHopCustomRecord = `-- name: InsertPaymentFirstHopCustomRecord :exec +INSERT INTO payment_first_hop_custom_records ( + payment_id, + key, + value +) +VALUES ( + $1, + $2, + $3 +) +` + +type InsertPaymentFirstHopCustomRecordParams struct { + PaymentID int64 + Key int64 + Value []byte +} + +func (q *Queries) InsertPaymentFirstHopCustomRecord(ctx context.Context, arg InsertPaymentFirstHopCustomRecordParams) error { + _, err := q.db.ExecContext(ctx, insertPaymentFirstHopCustomRecord, arg.PaymentID, arg.Key, arg.Value) + return err +} + +const insertPaymentHopCustomRecord = `-- name: InsertPaymentHopCustomRecord :exec +INSERT INTO payment_hop_custom_records ( + hop_id, + key, + value +) +VALUES ( + $1, + $2, + $3 +) +` + +type InsertPaymentHopCustomRecordParams struct { + HopID int64 + Key int64 + Value []byte +} + +func (q *Queries) InsertPaymentHopCustomRecord(ctx context.Context, arg InsertPaymentHopCustomRecordParams) error { + _, err := q.db.ExecContext(ctx, insertPaymentHopCustomRecord, arg.HopID, arg.Key, arg.Value) + return err +} + +const insertPaymentIntent = `-- name: InsertPaymentIntent :one +INSERT INTO payment_intents ( + payment_id, + intent_type, + intent_payload) +VALUES ( + $1, + $2, + $3 +) +RETURNING id +` + +type InsertPaymentIntentParams struct { + PaymentID int64 + IntentType int16 + IntentPayload []byte +} + +// Insert a payment intent for a given payment and return its ID. +func (q *Queries) InsertPaymentIntent(ctx context.Context, arg InsertPaymentIntentParams) (int64, error) { + row := q.db.QueryRowContext(ctx, insertPaymentIntent, arg.PaymentID, arg.IntentType, arg.IntentPayload) + var id int64 + err := row.Scan(&id) + return id, err +} + +const insertPaymentMig = `-- name: InsertPaymentMig :one +/* ───────────────────────────────────────────── + Migration-specific queries + + These queries are used ONLY for the one-time migration from KV to SQL. + ───────────────────────────────────────────── +*/ + +INSERT INTO payments ( + amount_msat, + created_at, + payment_identifier, + fail_reason) +VALUES ( + $1, + $2, + $3, + $4 +) +RETURNING id +` + +type InsertPaymentMigParams struct { + AmountMsat int64 + CreatedAt time.Time + PaymentIdentifier []byte + FailReason sql.NullInt32 +} + +// Migration-specific payment insert that allows setting fail_reason. +// Normal InsertPayment forces fail_reason to NULL since new payments +// aren't failed yet. During migration, we're inserting historical data +// that may already be failed. +func (q *Queries) InsertPaymentMig(ctx context.Context, arg InsertPaymentMigParams) (int64, error) { + row := q.db.QueryRowContext(ctx, insertPaymentMig, + arg.AmountMsat, + arg.CreatedAt, + arg.PaymentIdentifier, + arg.FailReason, + ) + var id int64 + err := row.Scan(&id) + return id, err +} + +const insertRouteHop = `-- name: InsertRouteHop :one +INSERT INTO payment_route_hops ( + htlc_attempt_index, + hop_index, + pub_key, + scid, + outgoing_time_lock, + amt_to_forward, + meta_data +) +VALUES ( + $1, + $2, + $3, + $4, + $5, + $6, + $7 +) +RETURNING id +` + +type InsertRouteHopParams struct { + HtlcAttemptIndex int64 + HopIndex int32 + PubKey []byte + Scid string + OutgoingTimeLock int32 + AmtToForward int64 + MetaData []byte +} + +func (q *Queries) InsertRouteHop(ctx context.Context, arg InsertRouteHopParams) (int64, error) { + row := q.db.QueryRowContext(ctx, insertRouteHop, + arg.HtlcAttemptIndex, + arg.HopIndex, + arg.PubKey, + arg.Scid, + arg.OutgoingTimeLock, + arg.AmtToForward, + arg.MetaData, + ) + var id int64 + err := row.Scan(&id) + return id, err +} + +const insertRouteHopAmp = `-- name: InsertRouteHopAmp :exec +INSERT INTO payment_route_hop_amp ( + hop_id, + root_share, + set_id, + child_index +) +VALUES ( + $1, + $2, + $3, + $4 +) +` + +type InsertRouteHopAmpParams struct { + HopID int64 + RootShare []byte + SetID []byte + ChildIndex int32 +} + +func (q *Queries) InsertRouteHopAmp(ctx context.Context, arg InsertRouteHopAmpParams) error { + _, err := q.db.ExecContext(ctx, insertRouteHopAmp, + arg.HopID, + arg.RootShare, + arg.SetID, + arg.ChildIndex, + ) + return err +} + +const insertRouteHopBlinded = `-- name: InsertRouteHopBlinded :exec +INSERT INTO payment_route_hop_blinded ( + hop_id, + encrypted_data, + blinding_point, + blinded_path_total_amt +) +VALUES ( + $1, + $2, + $3, + $4 +) +` + +type InsertRouteHopBlindedParams struct { + HopID int64 + EncryptedData []byte + BlindingPoint []byte + BlindedPathTotalAmt sql.NullInt64 +} + +func (q *Queries) InsertRouteHopBlinded(ctx context.Context, arg InsertRouteHopBlindedParams) error { + _, err := q.db.ExecContext(ctx, insertRouteHopBlinded, + arg.HopID, + arg.EncryptedData, + arg.BlindingPoint, + arg.BlindedPathTotalAmt, + ) + return err +} + +const insertRouteHopMpp = `-- name: InsertRouteHopMpp :exec +INSERT INTO payment_route_hop_mpp ( + hop_id, + payment_addr, + total_msat +) +VALUES ( + $1, + $2, + $3 +) +` + +type InsertRouteHopMppParams struct { + HopID int64 + PaymentAddr []byte + TotalMsat int64 +} + +func (q *Queries) InsertRouteHopMpp(ctx context.Context, arg InsertRouteHopMppParams) error { + _, err := q.db.ExecContext(ctx, insertRouteHopMpp, arg.HopID, arg.PaymentAddr, arg.TotalMsat) + return err +} + +const settleAttempt = `-- name: SettleAttempt :exec +INSERT INTO payment_htlc_attempt_resolutions ( + attempt_index, + resolution_time, + resolution_type, + settle_preimage +) +VALUES ( + $1, + $2, + $3, + $4 +) +` + +type SettleAttemptParams struct { + AttemptIndex int64 + ResolutionTime time.Time + ResolutionType int32 + SettlePreimage []byte +} + +func (q *Queries) SettleAttempt(ctx context.Context, arg SettleAttemptParams) error { + _, err := q.db.ExecContext(ctx, settleAttempt, + arg.AttemptIndex, + arg.ResolutionTime, + arg.ResolutionType, + arg.SettlePreimage, + ) + return err +} From 581fae5fea5cee2b375b7ebae147cf772af540b5 Mon Sep 17 00:00:00 2001 From: ziggie Date: Sun, 11 Jan 2026 00:02:52 +0100 Subject: [PATCH 04/10] payments/migration1: add the payments mig code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implement the KV→SQL payment migration and add an in-migration validation pass that deep-compares KV and SQL payment data in batches. Duplicate payments are migrated into the payment_duplicates table, and duplicates without attempt info or explicit resolution are marked failed to ensure terminal state. Validation checks those rows as well. --- .../db/migration1/migration_validation.go | 480 +++++++++++ payments/db/migration1/sql_migration.go | 801 ++++++++++++++++++ 2 files changed, 1281 insertions(+) create mode 100644 payments/db/migration1/migration_validation.go create mode 100644 payments/db/migration1/sql_migration.go diff --git a/payments/db/migration1/migration_validation.go b/payments/db/migration1/migration_validation.go new file mode 100644 index 00000000000..304f39070ac --- /dev/null +++ b/payments/db/migration1/migration_validation.go @@ -0,0 +1,480 @@ +package migration1 + +import ( + "bytes" + "context" + "database/sql" + "fmt" + "reflect" + "sort" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/lightningnetwork/lnd/kvdb" + "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/payments/db/migration1/sqlc" + "github.com/lightningnetwork/lnd/record" + "github.com/pmezard/go-difflib/difflib" +) + +// migratedPaymentRef is a reference to a migrated payment. +type migratedPaymentRef struct { + Hash lntypes.Hash + PaymentID int64 +} + +// validateMigratedPaymentBatch performs a deep validation pass by comparing +// KV payments with their SQL counterparts for a batch of payments. +func validateMigratedPaymentBatch(ctx context.Context, + kvBackend kvdb.Backend, sqlDB SQLQueries, + cfg *SQLStoreConfig, batch []migratedPaymentRef) error { + + if len(batch) == 0 { + return nil + } + + if cfg == nil || cfg.QueryCfg == nil { + return fmt.Errorf("missing SQL store config for validation") + } + + paymentIDs := make([]int64, 0, len(batch)) + for _, item := range batch { + paymentIDs = append(paymentIDs, item.PaymentID) + } + + rows, err := sqlDB.FetchPaymentsByIDs(ctx, paymentIDs) + if err != nil { + return fmt.Errorf("fetch SQL payments: %w", err) + } + if len(rows) != len(paymentIDs) { + return fmt.Errorf("SQL payment batch mismatch: got=%d want=%d", + len(rows), len(paymentIDs)) + } + + batchData, err := batchLoadPaymentDetailsData( + ctx, cfg.QueryCfg, sqlDB, paymentIDs, + ) + if err != nil { + return fmt.Errorf("load payment batch: %w", err) + } + + // After loading the SQL payments, we need to compare them with the KV + // payments to ensure they are the same. So we fetch the KV payments and + // compare them with the SQL payments. + err = kvBackend.View(func(kvTx kvdb.RTx) error { + paymentsBucket := kvTx.ReadBucket(paymentsRootBucket) + if paymentsBucket == nil { + return fmt.Errorf("no payments bucket") + } + + for _, row := range rows { + payment := row.GetPayment() + hash := payment.PaymentIdentifier + var paymentHash lntypes.Hash + copy(paymentHash[:], hash) + + paymentBucket := paymentsBucket.NestedReadBucket(hash) + if paymentBucket == nil { + return fmt.Errorf("missing payment bucket %x", + hash[:8]) + } + + kvPayment, err := fetchPayment(paymentBucket) + if err != nil { + return fmt.Errorf("fetch KV payment %x: %w", + hash[:8], err) + } + + sqlPayment, err := buildPaymentFromBatchData( + row, batchData, + ) + if err != nil { + return fmt.Errorf("build SQL payment %x: %w", + hash[:8], err) + } + + normalizePaymentForCompare(kvPayment) + normalizePaymentForCompare(sqlPayment) + + if !reflect.DeepEqual(kvPayment, sqlPayment) { + // make sure we properly print the diff between + // the two payments if they are not equal. + dumpCfg := spew.ConfigState{ + DisablePointerAddresses: true, + DisableCapacities: true, + DisableMethods: true, + SortKeys: true, + } + diff := difflib.UnifiedDiff{ + A: difflib.SplitLines( + dumpCfg.Sdump(kvPayment), + ), + B: difflib.SplitLines( + dumpCfg.Sdump(sqlPayment), + ), + FromFile: "kv", + ToFile: "sql", + Context: 3, + } + diffText, _ := difflib.GetUnifiedDiffString( + diff, + ) + + return fmt.Errorf("payment mismatch %x\n%s", + hash[:8], diffText) + } + + err = compareDuplicatePayments( + ctx, paymentBucket, sqlDB, payment.ID, + paymentHash, + ) + if err != nil { + return err + } + } + + return nil + }, func() {}) + if err != nil { + return err + } + + return nil +} + +// normalizePaymentForCompare normalizes fields that are expected to differ +// between KV and SQL representations before deep comparison. +func normalizePaymentForCompare(payment *MPPayment) { + if payment == nil { + return + } + + // SequenceNum will not be equal because the kv db can have already + // payments deleted during its lifetime. + payment.SequenceNum = 0 + + // We normalize timestamps before deep-comparing KV vs SQL objects. + // + // - **Microseconds**: SQL backends typically persist timestamps at + // microsecond precision (e.g. Postgres), while KV (Go `time.Time`) + // can contain nanoseconds. Truncating avoids false mismatches caused + // solely by differing storage precision. + // + // - **Local timezone**: when reading from SQL, timestamps are typically + // materialized in the local timezone by the SQL layer (and/or + // converters). Converting both sides to `time.Local` ensures the + // comparison is consistent across KV and SQL representations. + trunc := func(t time.Time) time.Time { + if t.IsZero() { + return t + } + + return time.Unix(0, t.UnixNano()). + In(time.Local). + Truncate(time.Microsecond) + } + + // Normalize PaymentCreationInfo fields. + if payment.Info != nil { + payment.Info.CreationTime = trunc( + payment.Info.CreationTime, + ) + if len(payment.Info.PaymentRequest) == 0 { + payment.Info.PaymentRequest = []byte{} + } + if len(payment.Info.FirstHopCustomRecords) == 0 { + payment.Info.FirstHopCustomRecords = lnwire. + CustomRecords{} + } + } + + // Normalize HTLCAttemptInfo so nil is converted to an empty slice. + if len(payment.HTLCs) == 0 { + payment.HTLCs = []HTLCAttempt{} + } + + // Normalize HTLC attempt ordering; SQL/KV may return attempts + // in different orders. + sort.SliceStable(payment.HTLCs, func(i, j int) bool { + return payment.HTLCs[i].AttemptID < payment.HTLCs[j].AttemptID + }) + + // Normalize HTLCAttemptInfo fields. + for i := range payment.HTLCs { + htlc := &payment.HTLCs[i] + + htlc.AttemptTime = trunc(htlc.AttemptTime) + if htlc.Settle != nil { + htlc.Settle.SettleTime = trunc( + htlc.Settle.SettleTime, + ) + } + if htlc.Failure != nil { + htlc.Failure.FailTime = trunc( + htlc.Failure.FailTime, + ) + } + + // Clear cached fields not persisted in storage. + htlc.onionBlob = [1366]byte{} + htlc.circuit = nil + htlc.cachedSessionKey = nil + + if len(htlc.Route.FirstHopWireCustomRecords) == 0 { + htlc.Route.FirstHopWireCustomRecords = + lnwire.CustomRecords{} + } + + for j := range htlc.Route.Hops { + if len(htlc.Route.Hops[j].CustomRecords) == 0 { + htlc.Route.Hops[j].CustomRecords = + record.CustomSet{} + } + } + } +} + +// duplicateRecord is a record that represents a duplicate payment. +type duplicateRecord struct { + PaymentIdentifier []byte + AmountMsat int64 + CreatedAt time.Time + FailReason sql.NullInt32 + SettlePreimage []byte + SettleTime sql.NullTime +} + +// compareDuplicatePayments validates migrated duplicate rows against KV data. +func compareDuplicatePayments(ctx context.Context, paymentBucket kvdb.RBucket, + sqlDB SQLQueries, paymentID int64, hash lntypes.Hash) error { + + // Fetch the duplicate payments from the KV store. + kvDuplicates, err := fetchDuplicateRecords(paymentBucket) + if err != nil { + return fmt.Errorf("fetch KV duplicates %x: %w", + hash[:8], err) + } + + // Fetch the duplicate payments from the SQL store. + sqlDuplicates, err := sqlDB.FetchPaymentDuplicates(ctx, paymentID) + if err != nil { + return fmt.Errorf("fetch SQL duplicates %x: %w", + hash[:8], err) + } + + if len(kvDuplicates) != len(sqlDuplicates) { + return fmt.Errorf("duplicate count mismatch %x: kv=%d "+ + "sql=%d", hash[:8], len(kvDuplicates), + len(sqlDuplicates)) + } + + kvNormalized := normalizeDuplicateRecords(kvDuplicates) + sqlNormalized := normalizeDuplicateRecords( + dbDuplicatesToDuplicateRecords(sqlDuplicates), + ) + + sortDuplicates(kvNormalized) + sortDuplicates(sqlNormalized) + + if !reflect.DeepEqual(kvNormalized, sqlNormalized) { + dumpCfg := spew.ConfigState{ + DisablePointerAddresses: true, + DisableCapacities: true, + DisableMethods: true, + SortKeys: true, + } + diff := difflib.UnifiedDiff{ + A: difflib.SplitLines( + dumpCfg.Sdump(kvNormalized), + ), + B: difflib.SplitLines( + dumpCfg.Sdump(sqlNormalized), + ), + FromFile: "kv", + ToFile: "sql", + Context: 3, + } + diffText, _ := difflib.GetUnifiedDiffString(diff) + + return fmt.Errorf("duplicate mismatch %x\n%s", + hash[:8], diffText) + } + + return nil +} + +// fetchDuplicateRecords reads duplicate payment records from the KV bucket. +func fetchDuplicateRecords(paymentBucket kvdb.RBucket) ([]duplicateRecord, + error) { + + dupBucket := paymentBucket.NestedReadBucket(duplicatePaymentsBucket) + if dupBucket == nil { + return nil, nil + } + + var duplicates []duplicateRecord + err := dupBucket.ForEach(func(seqBytes, _ []byte) error { + if len(seqBytes) != 8 { + return nil + } + + subBucket := dupBucket.NestedReadBucket(seqBytes) + if subBucket == nil { + return nil + } + + creationData := subBucket.Get(duplicatePaymentCreationInfoKey) + if creationData == nil { + return fmt.Errorf("missing duplicate creation info") + } + + creationInfo, err := deserializeDuplicatePaymentCreationInfo( + bytes.NewReader(creationData), + ) + if err != nil { + return fmt.Errorf("deserialize duplicate creation "+ + "info: %w", err) + } + + settleData := subBucket.Get(duplicatePaymentSettleInfoKey) + failReasonData := subBucket.Get(duplicatePaymentFailInfoKey) + + if settleData != nil && len(failReasonData) > 0 { + return fmt.Errorf("duplicate has both settle and " + + "fail info") + } + + var ( + failReason sql.NullInt32 + settlePreimage []byte + settleTime sql.NullTime + ) + + switch { + case settleData != nil: + settlePreimage, settleTime, err = + parseDuplicateSettleData(settleData) + if err != nil { + return err + } + case len(failReasonData) > 0: + failReason = sql.NullInt32{ + Int32: int32(failReasonData[0]), + Valid: true, + } + default: + // If the duplicate has no settle or fail info, it is + // considered failed. Every duplicate payment must have + // either a settle or fail info in the sql database. So + // we set the fail reason to error to mimic the behavior + // for the kv store. + failReason = sql.NullInt32{ + Int32: int32(FailureReasonError), + Valid: true, + } + } + + duplicates = append(duplicates, duplicateRecord{ + PaymentIdentifier: creationInfo.PaymentIdentifier[:], + AmountMsat: int64(creationInfo.Value), + CreatedAt: normalizeTimeForSQL( + creationInfo.CreationTime, + ), + FailReason: failReason, + SettlePreimage: settlePreimage, + SettleTime: settleTime, + }) + + return nil + }) + if err != nil { + return nil, err + } + + return duplicates, nil +} + +// dbDuplicatesToDuplicateRecords maps SQL duplicate rows into comparable +// duplicate records. +func dbDuplicatesToDuplicateRecords( + rows []sqlc.PaymentDuplicate) []duplicateRecord { + + duplicates := make([]duplicateRecord, 0, len(rows)) + for _, row := range rows { + duplicates = append(duplicates, duplicateRecord{ + PaymentIdentifier: row.PaymentIdentifier, + AmountMsat: row.AmountMsat, + CreatedAt: row.CreatedAt, + FailReason: row.FailReason, + SettlePreimage: row.SettlePreimage, + SettleTime: row.SettleTime, + }) + } + + return duplicates +} + +// normalizeDuplicateRecords normalizes time precision and empty fields. +func normalizeDuplicateRecords(records []duplicateRecord) []duplicateRecord { + if len(records) == 0 { + return []duplicateRecord{} + } + + trunc := func(t time.Time) time.Time { + if t.IsZero() { + return t + } + + return t.In(time.Local).Truncate(time.Microsecond) + } + + for i := range records { + records[i].CreatedAt = trunc(records[i].CreatedAt) + if records[i].SettleTime.Valid { + records[i].SettleTime.Time = trunc( + records[i].SettleTime.Time, + ) + } + + if len(records[i].SettlePreimage) == 0 { + records[i].SettlePreimage = []byte{} + } + } + + return records +} + +// sortDuplicates orders records deterministically for deep comparison. +func sortDuplicates(records []duplicateRecord) { + sort.SliceStable(records, func(i, j int) bool { + ai := records[i] + aj := records[j] + + // Duplicates are "duplicates" because they share the same + // payment identifier. So ordering can be stable using + // timestamp + amount. + if !ai.CreatedAt.Equal(aj.CreatedAt) { + return ai.CreatedAt.Before(aj.CreatedAt) + } + + return ai.AmountMsat < aj.AmountMsat + }) +} + +// validatePaymentCounts compares the number of migrated payments with the SQL +// payment count to catch missing rows. +func validatePaymentCounts(ctx context.Context, sqlDB SQLQueries, + expectedCount int64) error { + + sqlCount, err := sqlDB.CountPayments(ctx) + if err != nil { + return fmt.Errorf("count SQL payments: %w", err) + } + if expectedCount != sqlCount { + return fmt.Errorf("payment count mismatch: kv=%d sql=%d", + expectedCount, sqlCount) + } + + return nil +} diff --git a/payments/db/migration1/sql_migration.go b/payments/db/migration1/sql_migration.go new file mode 100644 index 00000000000..8fad6822039 --- /dev/null +++ b/payments/db/migration1/sql_migration.go @@ -0,0 +1,801 @@ +package migration1 + +import ( + "bytes" + "context" + "database/sql" + "fmt" + "strconv" + "time" + + "github.com/lightningnetwork/lnd/kvdb" + "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/payments/db/migration1/sqlc" + "github.com/lightningnetwork/lnd/routing/route" + "golang.org/x/time/rate" +) + +// MigrationStats tracks migration progress. +type MigrationStats struct { + TotalPayments int64 + SuccessfulPayments int64 + FailedPayments int64 + InFlightPayments int64 + InitiatedPayments int64 + TotalAttempts int64 + SettledAttempts int64 + FailedAttempts int64 + InFlightAttempts int64 + TotalHops int64 + DuplicatePayments int64 + DuplicateEntries int64 + MigrationDuration time.Duration +} + +// MigratePaymentsKVToSQL migrates payments from KV to SQL and validates +// migrated data in batches. Callers are responsible for executing this within +// a single SQL transaction if atomicity is required. +func MigratePaymentsKVToSQL(ctx context.Context, kvBackend kvdb.Backend, + sqlDB SQLQueries, cfg *SQLStoreConfig) error { + + if cfg == nil || cfg.QueryCfg == nil { + return fmt.Errorf("missing SQL store config for validation") + } + + if cfg.QueryCfg.MaxBatchSize == 0 { + return fmt.Errorf("invalid max batch size for validation") + } + + stats := &MigrationStats{} + startTime := time.Now() + + log.Infof("Starting payment migration from KV to SQL...") + + var ( + validationBatch []migratedPaymentRef + validatedPayments int64 + + reportInterval = rate.Sometimes{Interval: 5 * time.Second} + validationInterval = rate.Sometimes{Interval: 5 * time.Second} + ) + + // Open the KV backend in read-only mode. + err := kvBackend.View(func(kvTx kvdb.RTx) error { + // In case we start with an empty database, there are no + // payments to migrate. + paymentsBucket := kvTx.ReadBucket(paymentsRootBucket) + if paymentsBucket == nil { + log.Infof("No payments bucket found - database is " + + "empty") + + return nil + } + + // The index bucket maps sequence number -> payment hash. + indexes := kvTx.ReadBucket(paymentsIndexBucket) + if indexes == nil { + return fmt.Errorf("index bucket does not exist") + } + + // We iterate over all sequence numbers in the index bucket to + // make sure we have the correct order of payments. Otherwise, + // if we just loop over the payments bucket, we might get the + // payments not in the chronological order but rather the + // lexicographical order of the payment hashes. + return indexes.ForEach(func(seqKey, indexVal []byte) error { + // Progress reporting based on time + actual work done. + reportProgress := func() { + elapsed := time.Since(startTime) + if elapsed <= 0 { + return + } + + if stats.TotalPayments == 0 { + return + } + + paymentRate := float64(stats.TotalPayments) / + elapsed.Seconds() + attemptRate := float64(stats.TotalAttempts) / + elapsed.Seconds() + + log.Infof("Progress: %d payments, %d "+ + "attempts, %d hops | Rate: %.1f "+ + "pmt/s, %.1f att/s | Elapsed: %v", + stats.TotalPayments, + stats.TotalAttempts, stats.TotalHops, + paymentRate, attemptRate, + elapsed.Round(time.Second), + ) + } + + reportInterval.Do(reportProgress) + + r := bytes.NewReader(indexVal) + paymentHash, err := deserializePaymentIndex(r) + if err != nil { + return err + } + + paymentBucket := paymentsBucket.NestedReadBucket( + paymentHash[:], + ) + if paymentBucket == nil { + // We skip the entry in case this sequence + // number does not have a corresponding + // payment bucket. But aborting would not help + // either because it is just a db inconsistency. + log.Warnf("Missing bucket for payment %x", + paymentHash[:8]) + + return nil + } + + // Every payment bucket should have a sequence number + // which is also important to check for duplicates. + seqBytes := paymentBucket.Get(paymentSequenceKey) + if seqBytes == nil { + return ErrNoSequenceNumber + } + + // Skip duplicates. They are migrated into the + // payment_duplicates table when the primary payment is + // processed. + if !bytes.Equal(seqBytes, seqKey) { + return nil + } + + // Fetch the payment from the kv store. + payment, err := fetchPayment(paymentBucket) + if err != nil { + return fmt.Errorf("fetch payment %x: %w", + paymentHash[:8], err) + } + + // Migrate the payment to the SQL database. + paymentID, err := migratePayment( + ctx, payment, paymentHash, sqlDB, stats, + ) + if err != nil { + return fmt.Errorf("migrate payment %x: %w", + paymentHash[:8], err) + } + + // Check for duplicates. + dupBucket := paymentBucket.NestedReadBucket( + duplicatePaymentsBucket, + ) + if dupBucket != nil { + err = migrateDuplicatePayments( + ctx, dupBucket, paymentHash, + paymentID, sqlDB, stats, + ) + if err != nil { + return fmt.Errorf("migrate duplicates "+ + "%x: %w", paymentHash[:8], + err) + } + } + + // Add the payment to the validation batch. + validationBatch = append( + validationBatch, migratedPaymentRef{ + Hash: paymentHash, + PaymentID: paymentID, + }, + ) + if uint32(len(validationBatch)) >= + cfg.QueryCfg.MaxBatchSize { + + err := validateMigratedPaymentBatch( + ctx, kvBackend, sqlDB, + cfg, + validationBatch, + ) + if err != nil { + return err + } + + validatedPayments += int64( + len(validationBatch), + ) + + // Log validation progress periodically. + validationInterval.Do(func() { + log.Infof("Validated %d/%d payments", + validatedPayments, + stats.TotalPayments, + ) + }) + + validationBatch = validationBatch[:0] + } + + return nil + }) + }, func() {}) + + if err != nil { + return fmt.Errorf("migrate payments: %w", err) + } + + // Validate any remaining payments in the batch. + if len(validationBatch) > 0 { + if err := validateMigratedPaymentBatch( + ctx, kvBackend, sqlDB, cfg, validationBatch, + ); err != nil { + return err + } + + validatedPayments += int64(len(validationBatch)) + log.Infof("Validated %d/%d payments", validatedPayments, + stats.TotalPayments) + } + + // Validate the total number of payments as an additional sanity check. + if err := validatePaymentCounts( + ctx, sqlDB, stats.TotalPayments, + ); err != nil { + return err + } + + stats.MigrationDuration = time.Since(startTime) + + printMigrationSummary(stats) + + return nil +} + +// normalizeTimeForSQL converts a timestamp into the representation we persist +// and compare against in SQL: +// - drops any monotonic clock reading (SQL can't store it), +// - forces UTC for deterministic comparisons across environments. +// +// A zero time is returned unchanged. +func normalizeTimeForSQL(t time.Time) time.Time { + if t.IsZero() { + return t + } + + return time.Unix(0, t.UnixNano()).UTC() +} + +// migratePayment migrates a single payment from KV to SQL. +func migratePayment(ctx context.Context, payment *MPPayment, hash lntypes.Hash, + sqlDB SQLQueries, stats *MigrationStats) (int64, error) { + + // Update migration stats based on payment status. + switch payment.Status { + case StatusSucceeded: + stats.SuccessfulPayments++ + + case StatusFailed: + stats.FailedPayments++ + + case StatusInFlight: + stats.InFlightPayments++ + + case StatusInitiated: + stats.InitiatedPayments++ + } + + // Prepare fail reason for SQL insert. + var failReason sql.NullInt32 + if payment.FailureReason != nil { + failReason = sql.NullInt32{ + Int32: int32(*payment.FailureReason), + Valid: true, + } + } + + // Insert payment using migration query. + paymentID, err := sqlDB.InsertPaymentMig( + ctx, sqlc.InsertPaymentMigParams{ + AmountMsat: int64(payment.Info.Value), + CreatedAt: normalizeTimeForSQL( + payment.Info.CreationTime, + ), + PaymentIdentifier: hash[:], + FailReason: failReason, + }) + if err != nil { + return 0, fmt.Errorf("insert payment: %w", err) + } + + // Insert payment intent. + // + // Only insert a row if we have an actual intent payload. For legacy + // hash-only/keysend-style payments, the intent may be absent. + if len(payment.Info.PaymentRequest) > 0 { + _, err = sqlDB.InsertPaymentIntent( + ctx, sqlc.InsertPaymentIntentParams{ + PaymentID: paymentID, + IntentType: int16(PaymentIntentTypeBolt11), + IntentPayload: payment.Info.PaymentRequest, + }, + ) + if err != nil { + return 0, fmt.Errorf("insert intent: %w", err) + } + } + + // Insert first hop custom records (payment level). + for key, value := range payment.Info.FirstHopCustomRecords { + err = sqlDB.InsertPaymentFirstHopCustomRecord(ctx, + sqlc.InsertPaymentFirstHopCustomRecordParams{ + PaymentID: paymentID, + Key: int64(key), + Value: value, + }, + ) + if err != nil { + return 0, fmt.Errorf("insert custom record: %w", err) + } + } + + // Migrate HTLC attempts. + for _, htlc := range payment.HTLCs { + err = migrateHTLCAttempt( + ctx, paymentID, hash, &htlc, sqlDB, stats, + ) + if err != nil { + return 0, fmt.Errorf("migrate attempt %d: %w", + htlc.AttemptID, err) + } + } + + stats.TotalPayments++ + + return paymentID, nil +} + +// migrateHTLCAttempt migrates a single HTLC attempt. +func migrateHTLCAttempt(ctx context.Context, paymentID int64, + parentPaymentHash lntypes.Hash, htlc *HTLCAttempt, sqlDB SQLQueries, + stats *MigrationStats) error { + + // Validate that we have a payment hash for the attempt. + // + // NOTE: We always require an attempt payment hash. A missing hash is an + // unrecoverable inconsistency. All payments should have a payment hash + // (AMP,MPP,Legacy) + var paymentHash []byte + switch { + case htlc.Hash != nil: + paymentHash = (*htlc.Hash)[:] + + default: + return fmt.Errorf("HTLC attempt %d missing payment hash "+ + "(parent payment hash=%x)", htlc.AttemptID, + parentPaymentHash[:]) + } + + firstHopAmountMsat := int64(htlc.Route.FirstHopAmount.Val.Int()) + + // Get the session key bytes. + sessionKeyBytes := htlc.SessionKey().Serialize() + + // Insert HTLC attempt. + _, err := sqlDB.InsertHtlcAttempt(ctx, sqlc.InsertHtlcAttemptParams{ + PaymentID: paymentID, + AttemptIndex: int64(htlc.AttemptID), + SessionKey: sessionKeyBytes, + AttemptTime: normalizeTimeForSQL(htlc.AttemptTime), + PaymentHash: paymentHash, + FirstHopAmountMsat: firstHopAmountMsat, + RouteTotalTimeLock: int32(htlc.Route.TotalTimeLock), + RouteTotalAmount: int64(htlc.Route.TotalAmount), + RouteSourceKey: htlc.Route.SourcePubKey[:], + }) + if err != nil { + return fmt.Errorf("insert HTLC attempt: %w", err) + } + + // Insert the route-level first hop custom records. + for key, value := range htlc.Route.FirstHopWireCustomRecords { + err = sqlDB.InsertPaymentAttemptFirstHopCustomRecord( + ctx, + sqlc.InsertPaymentAttemptFirstHopCustomRecordParams{ + HtlcAttemptIndex: int64(htlc.AttemptID), + Key: int64(key), + Value: value, + }, + ) + if err != nil { + return fmt.Errorf("insert attempt first hop custom "+ + "record: %w", err) + } + } + + // Insert route hops. + for hopIndex := range htlc.Route.Hops { + hop := htlc.Route.Hops[hopIndex] + err = migrateRouteHop( + ctx, int64(htlc.AttemptID), hopIndex, hop, + sqlDB, stats, + ) + if err != nil { + return fmt.Errorf("migrate hop %d: %w", hopIndex, err) + } + } + + // Handle attempt resolution (settle or fail). + switch { + case htlc.Settle != nil: + // Settled + err = sqlDB.SettleAttempt(ctx, sqlc.SettleAttemptParams{ + AttemptIndex: int64(htlc.AttemptID), + ResolutionTime: normalizeTimeForSQL( + htlc.Settle.SettleTime, + ), + ResolutionType: int32(HTLCAttemptResolutionSettled), + SettlePreimage: htlc.Settle.Preimage[:], + }) + if err != nil { + return fmt.Errorf("settle attempt: %w", err) + } + + stats.SettledAttempts++ + + case htlc.Failure != nil: + var failureMsg bytes.Buffer + if htlc.Failure.Message != nil { + err := lnwire.EncodeFailureMessage( + &failureMsg, htlc.Failure.Message, 0, + ) + if err != nil { + return fmt.Errorf("failed to encode "+ + "failure message: %w", err) + } + } + + err = sqlDB.FailAttempt(ctx, sqlc.FailAttemptParams{ + AttemptIndex: int64(htlc.AttemptID), + ResolutionTime: normalizeTimeForSQL( + htlc.Failure.FailTime, + ), + ResolutionType: int32(HTLCAttemptResolutionFailed), + FailureSourceIndex: sql.NullInt32{ + Int32: int32(htlc.Failure.FailureSourceIndex), + Valid: true, + }, + HtlcFailReason: sql.NullInt32{ + Int32: int32(htlc.Failure.Reason), + Valid: true, + }, + FailureMsg: failureMsg.Bytes(), + }) + if err != nil { + return fmt.Errorf("fail attempt: %w", err) + } + + stats.FailedAttempts++ + + default: + // If the attempt is not settled or failed, it is in flight. + stats.InFlightAttempts++ + } + + stats.TotalAttempts++ + + return nil +} + +// migrateRouteHop migrates a single route hop. +func migrateRouteHop(ctx context.Context, + attemptID int64, hopIndex int, hop *route.Hop, sqlDB SQLQueries, + stats *MigrationStats) error { + + // Convert channel ID to string representation of uint64. + // The SCID is stored as a decimal string to match the converter + // expectations (sql_converters.go:173). + scidStr := strconv.FormatUint(hop.ChannelID, 10) + + // Insert route hop. + hopID, err := sqlDB.InsertRouteHop(ctx, sqlc.InsertRouteHopParams{ + HtlcAttemptIndex: attemptID, + HopIndex: int32(hopIndex), + PubKey: hop.PubKeyBytes[:], + Scid: scidStr, + OutgoingTimeLock: int32(hop.OutgoingTimeLock), + AmtToForward: int64(hop.AmtToForward), + MetaData: hop.Metadata, + }) + if err != nil { + return fmt.Errorf("insert hop: %w", err) + } + + // Check for blinded route data (route blinding). + if len(hop.EncryptedData) > 0 || hop.BlindingPoint != nil || + hop.TotalAmtMsat != 0 { + + var blindingPoint []byte + if hop.BlindingPoint != nil { + blindingPoint = hop.BlindingPoint.SerializeCompressed() + } + + var totalAmt sql.NullInt64 + if hop.TotalAmtMsat != 0 { + totalAmt = sql.NullInt64{ + Int64: int64(hop.TotalAmtMsat), + Valid: true, + } + } + + err := sqlDB.InsertRouteHopBlinded( + ctx, sqlc.InsertRouteHopBlindedParams{ + HopID: hopID, + EncryptedData: hop.EncryptedData, + BlindingPoint: blindingPoint, + BlindedPathTotalAmt: totalAmt, + }, + ) + if err != nil { + return fmt.Errorf("insert blinded hop: %w", err) + } + } + + // Check for MPP record. + if hop.MPP != nil { + paymentAddr := hop.MPP.PaymentAddr() + err = sqlDB.InsertRouteHopMpp(ctx, sqlc.InsertRouteHopMppParams{ + HopID: hopID, + PaymentAddr: paymentAddr[:], + TotalMsat: int64(hop.MPP.TotalMsat()), + }) + if err != nil { + return fmt.Errorf("insert MPP: %w", err) + } + } + + // Check for AMP record. + if hop.AMP != nil { + rootShare := hop.AMP.RootShare() + setID := hop.AMP.SetID() + err = sqlDB.InsertRouteHopAmp(ctx, sqlc.InsertRouteHopAmpParams{ + HopID: hopID, + RootShare: rootShare[:], + SetID: setID[:], + ChildIndex: int32(hop.AMP.ChildIndex()), + }) + if err != nil { + return fmt.Errorf("insert AMP: %w", err) + } + } + + // Check for custom records. + if hop.CustomRecords != nil { + for tlvType, value := range hop.CustomRecords { + err = sqlDB.InsertPaymentHopCustomRecord( + ctx, + sqlc.InsertPaymentHopCustomRecordParams{ + HopID: hopID, + Key: int64(tlvType), + Value: value, + }, + ) + if err != nil { + return fmt.Errorf("insert hop custom "+ + "record: %w", err) + } + } + } + + stats.TotalHops++ + + return nil +} + +// migrateDuplicatePayments migrates duplicate payments into the dedicated +// payment_duplicates table. +func migrateDuplicatePayments(ctx context.Context, dupBucket kvdb.RBucket, + hash [32]byte, primaryPaymentID int64, sqlDB SQLQueries, + stats *MigrationStats) error { + + duplicateCount := 0 + + err := dupBucket.ForEach(func(seqBytes, _ []byte) error { + // The duplicates bucket should only contain nested buckets + // keyed by 8-byte sequence numbers. Skip any unexpected keys + // (defensive check for corrupted or malformed data). + if len(seqBytes) != 8 { + log.Warnf("Skipping unexpected key in duplicates "+ + "bucket for payment %x: key length %d, "+ + "expected 8", + hash[:8], len(seqBytes), + ) + + return nil + } + + seqNum := byteOrder.Uint64(seqBytes) + subBucket := dupBucket.NestedReadBucket(seqBytes) + if subBucket == nil { + return nil + } + + duplicateCount++ + log.Infof("Migrating duplicate payment seq=%d for "+ + "payment %x", seqNum, hash[:8]) + + err := migrateSingleDuplicatePayment( + ctx, subBucket, hash, primaryPaymentID, seqNum, + sqlDB, + ) + if err != nil { + return fmt.Errorf( + "migrate duplicate payment seq=%d: %w", + seqNum, err, + ) + } + + return nil + }) + + if duplicateCount > 0 { + stats.DuplicatePayments++ + stats.DuplicateEntries += int64(duplicateCount) + + log.Infof("Payment %x had %d duplicate(s) migrated", hash[:8], + duplicateCount) + } + + return err +} + +// migrateSingleDuplicatePayment inserts a duplicate payment record for the +// given payment hash into payment_duplicates. +func migrateSingleDuplicatePayment(ctx context.Context, dupBucket kvdb.RBucket, + hash [32]byte, primaryPaymentID int64, duplicateSeq uint64, + sqlDB SQLQueries) error { + + creationData := dupBucket.Get(duplicatePaymentCreationInfoKey) + if creationData == nil { + return fmt.Errorf("duplicate payment seq=%d missing "+ + "creation info (payment=%x)", duplicateSeq, hash[:8]) + } + + creationInfo, err := deserializeDuplicatePaymentCreationInfo( + bytes.NewReader(creationData), + ) + if err != nil { + return fmt.Errorf("deserialize duplicate creation "+ + "info: %w", err) + } + + settleData := dupBucket.Get(duplicatePaymentSettleInfoKey) + failReasonData := dupBucket.Get(duplicatePaymentFailInfoKey) + attemptData := dupBucket.Get(duplicatePaymentAttemptInfoKey) + + if settleData != nil && len(failReasonData) > 0 { + return fmt.Errorf("duplicate payment seq=%d has both "+ + "settle and fail info (payment=%x)", duplicateSeq, + hash[:8]) + } + + var ( + failReason sql.NullInt32 + settlePreimage []byte + settleTime sql.NullTime + ) + + switch { + case settleData != nil: + settlePreimage, settleTime, err = parseDuplicateSettleData( + settleData, + ) + if err != nil { + return err + } + + case len(failReasonData) > 0: + failReason = sql.NullInt32{ + Int32: int32(failReasonData[0]), + Valid: true, + } + + default: + // If the duplicate payment has no settle or fail info, + // we mark it as failed during the migration. Duplicate + // payments were a bug in older versions of LND, so we can be + // sure if a duplicate payment has no failure reason or + // settlement data, the corresponding HTLC for this payment + // has been failed (resolved). + if attemptData == nil { + log.Warnf("Duplicate payment seq=%d has no "+ + "attempt info and no resolution (payment=%x); "+ + "marking failed", duplicateSeq, hash[:8]) + } else { + log.Warnf("Duplicate payment seq=%d has attempt "+ + "info but no resolution (payment=%x); "+ + "marking failed", duplicateSeq, hash[:8]) + } + + failReason = sql.NullInt32{ + Int32: int32(FailureReasonError), + Valid: true, + } + } + + _, err = sqlDB.InsertPaymentDuplicateMig( + ctx, sqlc.InsertPaymentDuplicateMigParams{ + PaymentID: primaryPaymentID, + PaymentIdentifier: creationInfo.PaymentIdentifier[:], + AmountMsat: int64(creationInfo.Value), + CreatedAt: normalizeTimeForSQL( + creationInfo.CreationTime, + ), + FailReason: failReason, + SettlePreimage: settlePreimage, + SettleTime: settleTime, + }, + ) + if err != nil { + return fmt.Errorf("insert duplicate payment: %w", err) + } + + return nil +} + +// parseDuplicateSettleData extracts settle data from either legacy or modern +// duplicate formats. +func parseDuplicateSettleData(settleData []byte) ([]byte, sql.NullTime, error) { + if len(settleData) == lntypes.PreimageSize { + return append([]byte(nil), settleData...), sql.NullTime{}, nil + } + + settleInfo, err := deserializeHTLCSettleInfo( + bytes.NewReader(settleData), + ) + if err != nil { + return nil, sql.NullTime{}, + fmt.Errorf("deserialize duplicate settle: %w", err) + } + + settleTime := normalizeTimeForSQL(settleInfo.SettleTime) + + return settleInfo.Preimage[:], sql.NullTime{ + Time: settleTime, + Valid: !settleTime.IsZero(), + }, nil +} + +// printMigrationSummary prints a summary of the migration. +func printMigrationSummary(stats *MigrationStats) { + if stats.TotalPayments == 0 { + log.Infof("No payments migrated - database is empty") + + return + } + + log.Infof("========================================") + log.Infof(" Payment Migration Summary") + log.Infof("========================================") + log.Infof("Total Payments: %d", stats.TotalPayments) + log.Infof(" Successful: %d", stats.SuccessfulPayments) + log.Infof(" Failed: %d", stats.FailedPayments) + log.Infof(" In-Flight: %d", stats.InFlightPayments) + log.Infof(" Initiated: %d", stats.InitiatedPayments) + log.Infof("") + log.Infof("Total HTLC Attempts: %d", stats.TotalAttempts) + log.Infof(" Settled: %d", stats.SettledAttempts) + log.Infof(" Failed: %d", stats.FailedAttempts) + log.Infof(" In-Flight: %d", stats.InFlightAttempts) + log.Infof("") + log.Infof("Total Route Hops: %d", stats.TotalHops) + + if stats.DuplicatePayments > 0 { + log.Infof("") + log.Warnf("DUPLICATE PAYMENTS DETECTED:") + log.Warnf(" Unique payment hashes with duplicates: %d", + stats.DuplicatePayments) + log.Warnf(" Total duplicate entries migrated: %d", + stats.DuplicateEntries) + log.Warnf(" These were caused by an old LND bug.") + } + + log.Infof("") + log.Infof("Migration Duration: %v", stats.MigrationDuration) + log.Infof("========================================") +} From f1eb78bfe1cb315a167caa186da410f1c338a3c1 Mon Sep 17 00:00:00 2001 From: ziggie Date: Sun, 11 Jan 2026 00:05:29 +0100 Subject: [PATCH 05/10] payments/migration1: add migration test suite and helpers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add test helpers plus sql_migration_test coverage for KV→SQL migration. Basic migration, sequence ordering, data integrity, and feature-specific cases (MPP/AMP, custom records, blinded routes, metadata, failure messages). Also cover duplicate payment migration to payment_duplicates, including missing attempt info to ensure terminal failure is recorded. This gives broad regression coverage for the migration path and its edge-cases. --- payments/db/migration1/sql_migration_test.go | 2298 ++++++++++++++++++ payments/db/migration1/test_harness..go | 26 + payments/db/migration1/test_postgres.go | 94 + payments/db/migration1/test_sql.go | 59 + payments/db/migration1/test_sqlite.go | 73 + 5 files changed, 2550 insertions(+) create mode 100644 payments/db/migration1/sql_migration_test.go create mode 100644 payments/db/migration1/test_harness..go create mode 100644 payments/db/migration1/test_postgres.go create mode 100644 payments/db/migration1/test_sql.go create mode 100644 payments/db/migration1/test_sqlite.go diff --git a/payments/db/migration1/sql_migration_test.go b/payments/db/migration1/sql_migration_test.go new file mode 100644 index 00000000000..0f406fa96a7 --- /dev/null +++ b/payments/db/migration1/sql_migration_test.go @@ -0,0 +1,2298 @@ +//go:build test_db_postgres || test_db_sqlite + +package migration1 + +import ( + "bytes" + "context" + "crypto/sha256" + "fmt" + "io" + "sort" + "testing" + "time" + + "github.com/btcsuite/btcd/btcec/v2" + "github.com/lightningnetwork/lnd/kvdb" + "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/payments/db/migration1/sqlc" + "github.com/lightningnetwork/lnd/record" + "github.com/lightningnetwork/lnd/routing/route" + "github.com/lightningnetwork/lnd/sqldb" + "github.com/stretchr/testify/require" +) + +// TestMigrationKVToSQL tests the basic payment migration from KV to SQL. +func TestMigrationKVToSQL(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + // Setup KV database and populate with test data. + kvDB := setupTestKVDB(t) + populateTestPayments(t, kvDB, 5) + + sqlStore := setupTestSQLDB(t) + + // Run migration in a single transaction. + err := runPaymentsMigration(ctx, kvDB, sqlStore) + require.NoError(t, err) +} + +// TestMigrationSequenceOrder ensures the migration follows sequence order +// rather than lexicographic hash order. +func TestMigrationSequenceOrder(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + kvDB := setupTestKVDB(t) + err := kvdb.Update(kvDB, func(tx kvdb.RwTx) error { + paymentsBucket, err := tx.CreateTopLevelBucket( + paymentsRootBucket, + ) + if err != nil { + return err + } + + indexBucket, err := tx.CreateTopLevelBucket(paymentsIndexBucket) + if err != nil { + return err + } + + var globalAttemptID uint64 + hash0 := [32]byte{} + hash1 := [32]byte{} + hash2 := [32]byte{} + hash0[0] = 3 + hash1[0] = 2 + hash2[0] = 1 + + if err := createTestPaymentInKV( + t, paymentsBucket, indexBucket, 0, hash0, + &globalAttemptID, + ); err != nil { + return err + } + + // We make sure that the duplicate payment is skipped because + // it will be migrated separately into payment_duplicates. + if err := createTestDuplicatePaymentWithIndex( + t, paymentsBucket, indexBucket, hash0, 1, false, + &globalAttemptID, + ); err != nil { + return err + } + if err := createTestPaymentInKV( + t, paymentsBucket, indexBucket, 2, hash1, + &globalAttemptID, + ); err != nil { + return err + } + if err := createTestPaymentInKV( + t, paymentsBucket, indexBucket, 3, hash2, + &globalAttemptID, + ); err != nil { + return err + } + + return nil + }, func() {}) + require.NoError(t, err) + + sqlStore := setupTestSQLDB(t) + + err = runPaymentsMigration(ctx, kvDB, sqlStore) + require.NoError(t, err) + + resp, err := sqlStore.QueryPayments(ctx, Query{ + MaxPayments: 10, + IncludeIncomplete: true, + }) + require.NoError(t, err) + require.Len(t, resp.Payments, 3) + + var ( + exp0 lntypes.Hash + exp1 lntypes.Hash + exp2 lntypes.Hash + ) + exp0[0] = 3 + exp1[0] = 2 + exp2[0] = 1 + + require.Equal(t, exp0, resp.Payments[0].Info.PaymentIdentifier) + require.Equal(t, exp1, resp.Payments[1].Info.PaymentIdentifier) + require.Equal(t, exp2, resp.Payments[2].Info.PaymentIdentifier) +} + +// TestMigrationDataIntegrity verifies that migrated payment data exactly +// matches the original KV data when fetched through the SQLStore +// (SQLStore.FetchPayment). This covers the SQLStore query path separately +// from the migration's own batch validation. +func TestMigrationDataIntegrity(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + // Setup KV database with test data. + kvDB := setupTestKVDB(t) + numPayments := populateTestPayments(t, kvDB, 5) + + // Fetch all payments from KV before migration. + kvPayments := fetchAllPaymentsFromKV(t, kvDB) + require.Len(t, kvPayments, numPayments) + + // Setup SQL database and run migration. + sqlStore := setupTestSQLDB(t) + + err := runPaymentsMigration(ctx, kvDB, sqlStore) + require.NoError(t, err) + + // Compare each KV payment with its SQL counterpart using deep equality. + // This ensures that ALL fields match, not just a few selected ones. + for _, kvPayment := range kvPayments { + comparePaymentData(t, ctx, sqlStore, kvPayment) + } +} + +// TestMigrationWithDuplicates tests migration of duplicate payments into +// the payment_duplicates table. +func TestMigrationWithDuplicates(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + // Setup KV database. + kvDB := setupTestKVDB(t) + + // Create a payment with duplicates. + hash := createTestPaymentHash(t, 0) + err := kvdb.Update(kvDB, func(tx kvdb.RwTx) error { + // Create root buckets. + paymentsBucket, err := tx.CreateTopLevelBucket( + paymentsRootBucket, + ) + if err != nil { + return err + } + + indexBucket, err := tx.CreateTopLevelBucket(paymentsIndexBucket) + if err != nil { + return err + } + + // Create primary payment with sequence 0 and globally unique + // attempt ID. + var globalAttemptID uint64 + err = createTestPaymentInKV( + t, paymentsBucket, indexBucket, 0, hash, + &globalAttemptID, + ) + if err != nil { + return err + } + + // Add 2 duplicate payments for the same hash. + paymentBucket := paymentsBucket.NestedReadWriteBucket(hash[:]) + require.NotNil(t, paymentBucket) + + dupBucket, err := paymentBucket.CreateBucketIfNotExists( + duplicatePaymentsBucket, + ) + if err != nil { + return err + } + + // Create duplicate with sequence 1 using global attempt ID. + err = createTestDuplicatePayment( + t, dupBucket, hash, 1, true, &globalAttemptID, + ) + if err != nil { + return err + } + + // Create duplicate with sequence 2 using global attempt ID. + err = createTestDuplicatePayment( + t, dupBucket, hash, 2, false, &globalAttemptID, + ) + if err != nil { + return err + } + + return nil + }, func() {}) + require.NoError(t, err) + + sqlStore := setupTestSQLDB(t) + + // Run migration. + err = runPaymentsMigration(ctx, kvDB, sqlStore) + require.NoError(t, err) + + // Verify in SQL database. + var count int64 + err = sqlStore.db.ExecTx( + ctx, sqldb.ReadTxOpt(), func(q SQLQueries) error { + var err error + count, err = q.CountPayments(ctx) + return err + }, sqldb.NoOpReset, + ) + require.NoError(t, err) + require.Equal( + t, int64(1), count, "SQL DB should have 1 payment", + ) + + var ( + dbPayment sqlc.FetchPaymentRow + duplicates []sqlc.PaymentDuplicate + ) + err = sqlStore.db.ExecTx( + ctx, sqldb.ReadTxOpt(), func(q SQLQueries) error { + var err error + dbPayment, err = q.FetchPayment(ctx, hash[:]) + if err != nil { + return err + } + + duplicates, err = q.FetchPaymentDuplicates( + ctx, dbPayment.Payment.ID, + ) + return err + }, sqldb.NoOpReset, + ) + require.NoError(t, err) + + require.Len(t, duplicates, 2) + sort.SliceStable(duplicates, func(i, j int) bool { + return duplicates[i].AmountMsat < duplicates[j].AmountMsat + }) + + require.Equal(t, hash[:], duplicates[0].PaymentIdentifier) + require.Equal(t, int64(2001000), duplicates[0].AmountMsat) + require.False(t, duplicates[0].FailReason.Valid) + require.NotEmpty(t, duplicates[0].SettlePreimage) + + require.Equal(t, hash[:], duplicates[1].PaymentIdentifier) + require.Equal(t, int64(2002000), duplicates[1].AmountMsat) + require.True(t, duplicates[1].FailReason.Valid) + require.Equal( + t, int32(FailureReasonError), + duplicates[1].FailReason.Int32, + ) + require.Empty(t, duplicates[1].SettlePreimage) +} + +// TestDuplicatePaymentsWithoutAttemptInfo verifies duplicate payments without +// attempt info are migrated with terminal failure reasons. +func TestDuplicatePaymentsWithoutAttemptInfo(t *testing.T) { + t.Parallel() + + ctx := context.Background() + kvDB := setupTestKVDB(t) + + hash := createTestPaymentHash(t, 0) + + err := kvdb.Update(kvDB, func(tx kvdb.RwTx) error { + paymentsBucket, err := tx.CreateTopLevelBucket( + paymentsRootBucket, + ) + if err != nil { + return err + } + + indexBucket, err := tx.CreateTopLevelBucket(paymentsIndexBucket) + if err != nil { + return err + } + + var globalAttemptID uint64 + err = createTestPaymentInKV( + t, paymentsBucket, indexBucket, 1, hash, + &globalAttemptID, + ) + if err != nil { + return err + } + + paymentBucket := paymentsBucket.NestedReadWriteBucket( + hash[:], + ) + require.NotNil(t, paymentBucket) + + dupBucket, err := paymentBucket.CreateBucketIfNotExists( + duplicatePaymentsBucket, + ) + if err != nil { + return err + } + + if err := createDuplicateWithoutAttemptInfo( + t, dupBucket, hash, 2, true, false, + ); err != nil { + return err + } + if err := createDuplicateWithoutAttemptInfo( + t, dupBucket, hash, 3, false, true, + ); err != nil { + return err + } + if err := createDuplicateWithoutAttemptInfo( + t, dupBucket, hash, 4, false, false, + ); err != nil { + return err + } + + return nil + }, func() {}) + require.NoError(t, err) + + sqlStore := setupTestSQLDB(t) + err = runPaymentsMigration(ctx, kvDB, sqlStore) + require.NoError(t, err) + + var ( + dbPayment sqlc.FetchPaymentRow + duplicates []sqlc.PaymentDuplicate + ) + err = sqlStore.db.ExecTx( + ctx, sqldb.ReadTxOpt(), func(q SQLQueries) error { + var err error + dbPayment, err = q.FetchPayment(ctx, hash[:]) + if err != nil { + return err + } + + duplicates, err = q.FetchPaymentDuplicates( + ctx, dbPayment.Payment.ID, + ) + return err + }, sqldb.NoOpReset, + ) + require.NoError(t, err) + + require.Len(t, duplicates, 3) + sort.SliceStable(duplicates, func(i, j int) bool { + return duplicates[i].AmountMsat < duplicates[j].AmountMsat + }) + + require.Equal(t, int64(2002000), duplicates[0].AmountMsat) + require.NotEmpty(t, duplicates[0].SettlePreimage) + require.False(t, duplicates[0].FailReason.Valid) + + require.Equal(t, int64(2003000), duplicates[1].AmountMsat) + require.True(t, duplicates[1].FailReason.Valid) + require.Equal( + t, int32(FailureReasonNoRoute), + duplicates[1].FailReason.Int32, + ) + require.Empty(t, duplicates[1].SettlePreimage) + + require.Equal(t, int64(2004000), duplicates[2].AmountMsat) + require.True(t, duplicates[2].FailReason.Valid) + require.Equal( + t, int32(FailureReasonError), + duplicates[2].FailReason.Int32, + ) + require.Empty(t, duplicates[2].SettlePreimage) +} + +// TestMigratePaymentWithMPP tests migration of a payment with MPP (multi-path +// payment) records. +func TestMigratePaymentWithMPP(t *testing.T) { + t.Parallel() + + ctx := context.Background() + kvDB := setupTestKVDB(t) + + // Create a payment with MPP. + var paymentHash [32]byte + copy(paymentHash[:], []byte("test_mpp_payment_hash_12345")) + + err := kvdb.Update(kvDB, func(tx kvdb.RwTx) error { + paymentsBucket, err := tx.CreateTopLevelBucket( + paymentsRootBucket, + ) + if err != nil { + return err + } + + indexBucket, err := tx.CreateTopLevelBucket(paymentsIndexBucket) + if err != nil { + return err + } + + return createPaymentWithMPP( + t, paymentsBucket, indexBucket, paymentHash, + ) + }, func() {}) + require.NoError(t, err) + + // Run migration. + sqlStore := setupTestSQLDB(t) + + err = runPaymentsMigration(ctx, kvDB, sqlStore) + require.NoError(t, err) + + // Verify payment matches. + assertPaymentDataMatches(t, ctx, kvDB, sqlStore, paymentHash) +} + +// TestMigratePaymentWithAMP tests migration of a payment with AMP (atomic +// multi-path) records. +func TestMigratePaymentWithAMP(t *testing.T) { + t.Parallel() + + ctx := context.Background() + kvDB := setupTestKVDB(t) + + var paymentHash [32]byte + copy(paymentHash[:], []byte("test_amp_payment_hash_12345")) + + err := kvdb.Update(kvDB, func(tx kvdb.RwTx) error { + paymentsBucket, err := tx.CreateTopLevelBucket( + paymentsRootBucket, + ) + if err != nil { + return err + } + + indexBucket, err := tx.CreateTopLevelBucket(paymentsIndexBucket) + if err != nil { + return err + } + + return createPaymentWithAMP( + t, paymentsBucket, indexBucket, paymentHash, + ) + }, func() {}) + require.NoError(t, err) + + sqlStore := setupTestSQLDB(t) + + err = runPaymentsMigration(ctx, kvDB, sqlStore) + require.NoError(t, err) + + assertPaymentDataMatches(t, ctx, kvDB, sqlStore, paymentHash) +} + +// TestMigratePaymentWithAMPSignedChildIndex tests migration of an AMP payment +// where the child index has the signed bit set. +func TestMigratePaymentWithAMPSignedChildIndex(t *testing.T) { + t.Parallel() + + ctx := context.Background() + kvDB := setupTestKVDB(t) + + var paymentHash [32]byte + copy(paymentHash[:], []byte("test_amp_child_idx_8000")) + + const childIndex = uint32(0x80000001) + + err := kvdb.Update(kvDB, func(tx kvdb.RwTx) error { + paymentsBucket, err := tx.CreateTopLevelBucket( + paymentsRootBucket, + ) + if err != nil { + return err + } + + indexBucket, err := tx.CreateTopLevelBucket(paymentsIndexBucket) + if err != nil { + return err + } + + return createPaymentWithAMPChildIndex( + t, paymentsBucket, indexBucket, paymentHash, childIndex, + ) + }, func() {}) + require.NoError(t, err) + + sqlStore := setupTestSQLDB(t) + + err = runPaymentsMigration(ctx, kvDB, sqlStore) + require.NoError(t, err) + + assertPaymentDataMatches(t, ctx, kvDB, sqlStore, paymentHash) +} + +// TestMigratePaymentWithCustomRecords tests migration of a payment with custom +// records. +func TestMigratePaymentWithCustomRecords(t *testing.T) { + t.Parallel() + + ctx := context.Background() + kvDB := setupTestKVDB(t) + + var paymentHash [32]byte + copy(paymentHash[:], []byte("test_custom_records_hash_12")) + + err := kvdb.Update(kvDB, func(tx kvdb.RwTx) error { + paymentsBucket, err := tx.CreateTopLevelBucket( + paymentsRootBucket, + ) + if err != nil { + return err + } + + indexBucket, err := tx.CreateTopLevelBucket(paymentsIndexBucket) + if err != nil { + return err + } + + return createPaymentWithCustomRecords( + t, paymentsBucket, indexBucket, paymentHash, + ) + }, func() {}) + require.NoError(t, err) + + sqlStore := setupTestSQLDB(t) + + err = runPaymentsMigration(ctx, kvDB, sqlStore) + require.NoError(t, err) + + assertPaymentDataMatches(t, ctx, kvDB, sqlStore, paymentHash) +} + +// TestMigratePaymentWithBlindedRoute tests migration of a payment with blinded +// route. +func TestMigratePaymentWithBlindedRoute(t *testing.T) { + t.Parallel() + + ctx := context.Background() + kvDB := setupTestKVDB(t) + + var paymentHash [32]byte + copy(paymentHash[:], []byte("test_blinded_route_hash_123")) + + err := kvdb.Update(kvDB, func(tx kvdb.RwTx) error { + paymentsBucket, err := tx.CreateTopLevelBucket( + paymentsRootBucket, + ) + if err != nil { + return err + } + + indexBucket, err := tx.CreateTopLevelBucket(paymentsIndexBucket) + if err != nil { + return err + } + + return createPaymentWithBlindedRoute( + t, paymentsBucket, indexBucket, paymentHash, + ) + }, func() {}) + require.NoError(t, err) + + sqlStore := setupTestSQLDB(t) + + err = runPaymentsMigration(ctx, kvDB, sqlStore) + require.NoError(t, err) + + assertPaymentDataMatches(t, ctx, kvDB, sqlStore, paymentHash) +} + +// TestMigratePaymentWithMetadata tests migration of a payment with hop +// metadata. +func TestMigratePaymentWithMetadata(t *testing.T) { + t.Parallel() + + ctx := context.Background() + kvDB := setupTestKVDB(t) + + var paymentHash [32]byte + copy(paymentHash[:], []byte("test_metadata_payment_hash_")) + + err := kvdb.Update(kvDB, func(tx kvdb.RwTx) error { + paymentsBucket, err := tx.CreateTopLevelBucket( + paymentsRootBucket, + ) + if err != nil { + return err + } + + indexBucket, err := tx.CreateTopLevelBucket(paymentsIndexBucket) + if err != nil { + return err + } + + return createPaymentWithMetadata( + t, paymentsBucket, indexBucket, paymentHash, + ) + }, func() {}) + require.NoError(t, err) + + sqlStore := setupTestSQLDB(t) + + err = runPaymentsMigration(ctx, kvDB, sqlStore) + require.NoError(t, err) + + assertPaymentDataMatches(t, ctx, kvDB, sqlStore, paymentHash) +} + +// TestMigratePaymentWithAllFeatures tests migration with all optional +// features enabled. +func TestMigratePaymentWithAllFeatures(t *testing.T) { + t.Parallel() + + ctx := context.Background() + kvDB := setupTestKVDB(t) + + var paymentHash [32]byte + copy(paymentHash[:], []byte("test_all_features_hash_1234")) + + err := kvdb.Update(kvDB, func(tx kvdb.RwTx) error { + paymentsBucket, err := tx.CreateTopLevelBucket( + paymentsRootBucket, + ) + if err != nil { + return err + } + + indexBucket, err := tx.CreateTopLevelBucket(paymentsIndexBucket) + if err != nil { + return err + } + + return createPaymentWithAllFeatures( + t, paymentsBucket, indexBucket, paymentHash, + ) + }, func() {}) + require.NoError(t, err) + + sqlStore := setupTestSQLDB(t) + + err = runPaymentsMigration(ctx, kvDB, sqlStore) + require.NoError(t, err) + + assertPaymentDataMatches(t, ctx, kvDB, sqlStore, paymentHash) +} + +// TestMigratePaymentFeatureCombinations tests selected feature combinations +// in a single migration to cover interactions without random data. +func TestMigratePaymentFeatureCombinations(t *testing.T) { + t.Parallel() + + ctx := context.Background() + kvDB := setupTestKVDB(t) + + cases := []paymentFeatureSet{ + { + name: "mpp_custom", + mpp: true, + customRecords: true, + }, + { + name: "amp_blinded", + amp: true, + blindedRoute: true, + }, + { + name: "custom_metadata", + customRecords: true, + hopMetadata: true, + }, + { + name: "blinded_metadata", + blindedRoute: true, + hopMetadata: true, + }, + { + name: "mpp_metadata", + mpp: true, + hopMetadata: true, + }, + } + + hashes := make([][32]byte, 0, len(cases)) + err := kvdb.Update(kvDB, func(tx kvdb.RwTx) error { + paymentsBucket, err := tx.CreateTopLevelBucket( + paymentsRootBucket, + ) + if err != nil { + return err + } + + indexBucket, err := tx.CreateTopLevelBucket(paymentsIndexBucket) + if err != nil { + return err + } + + var globalAttemptID uint64 + for i, c := range cases { + hash := sha256.Sum256([]byte(c.name)) + hashes = append(hashes, hash) + + err := createPaymentWithFeatureSet( + t, paymentsBucket, indexBucket, hash, + uint64(10+i), c, &globalAttemptID, + ) + if err != nil { + return err + } + } + + return nil + }, func() {}) + require.NoError(t, err) + + sqlStore := setupTestSQLDB(t) + + err = runPaymentsMigration(ctx, kvDB, sqlStore) + require.NoError(t, err) + + for _, hash := range hashes { + assertPaymentDataMatches(t, ctx, kvDB, sqlStore, hash) + } +} + +// TestMigratePaymentWithFailureMessage tests migration of a payment with a +// failed HTLC that includes a failure message. +func TestMigratePaymentWithFailureMessage(t *testing.T) { + t.Parallel() + + ctx := context.Background() + kvDB := setupTestKVDB(t) + + var paymentHash [32]byte + copy(paymentHash[:], []byte("test_fail_msg_hash_123456789")) + + // Create a payment with a failed HTLC. + err := kvdb.Update(kvDB, func(tx kvdb.RwTx) error { + paymentsBucket, err := tx.CreateTopLevelBucket( + paymentsRootBucket, + ) + if err != nil { + return err + } + + indexBucket, err := tx.CreateTopLevelBucket(paymentsIndexBucket) + if err != nil { + return err + } + + // Create payment bucket. + paymentBucket, err := paymentsBucket.CreateBucketIfNotExists( + paymentHash[:], + ) + if err != nil { + return err + } + + // Add creation info. + var paymentID lntypes.Hash + copy(paymentID[:], paymentHash[:]) + + creationInfo := &PaymentCreationInfo{ + PaymentIdentifier: paymentID, + Value: lnwire.MilliSatoshi(1000000), + CreationTime: time.Now().Add(-24 * time.Hour), + PaymentRequest: []byte("lnbc10utest"), + } + + // Use a separate buffer for payment creation info to avoid + // reuse issues when serializing HTLC attempts later. + var creationInfoBuf bytes.Buffer + err = serializePaymentCreationInfo( + &creationInfoBuf, creationInfo, + ) + if err != nil { + return err + } + + serialized := creationInfoBuf.Bytes() + + err = paymentBucket.Put( + paymentCreationInfoKey, serialized, + ) + if err != nil { + return err + } + + // Add sequence number. + seqBytes := make([]byte, 8) + byteOrder.PutUint64(seqBytes, 50) + err = paymentBucket.Put(paymentSequenceKey, seqBytes) + if err != nil { + return err + } + + // Add payment-level failure reason. + failReasonBytes := []byte{byte(FailureReasonNoRoute)} + err = paymentBucket.Put( + paymentFailInfoKey, failReasonBytes, + ) + if err != nil { + return err + } + + // Create HTLC bucket with one failed attempt. + htlcBucket, err := paymentBucket.CreateBucketIfNotExists( + paymentHtlcsBucket, + ) + if err != nil { + return err + } + + // Create the failed attempt with a failure message. + attemptID := uint64(500) + sessionKey, err := btcec.NewPrivateKey() + if err != nil { + return err + } + + var sessionKeyBytes [32]byte + copy(sessionKeyBytes[:], sessionKey.Serialize()) + + var sourcePubKey route.Vertex + copy(sourcePubKey[:], sessionKey.PubKey().SerializeCompressed()) + + hopKey, err := btcec.NewPrivateKey() + if err != nil { + return err + } + + // Create a proper copy of the hash instead of referencing + // the local variable directly. + attemptHash := new(lntypes.Hash) + copy(attemptHash[:], paymentHash[:]) + + //nolint:ll + attemptInfo := &HTLCAttemptInfo{ + AttemptID: attemptID, + sessionKey: sessionKeyBytes, + Route: route.Route{ + TotalTimeLock: 500000, + TotalAmount: 900, + SourcePubKey: sourcePubKey, + Hops: []*route.Hop{ + { + PubKeyBytes: route.NewVertex(hopKey.PubKey()), + ChannelID: 12345, + OutgoingTimeLock: 499500, + AmtToForward: 850, + }, + }, + }, + AttemptTime: time.Now().Add(-2 * time.Hour), + Hash: attemptHash, + } + + // Write attempt info. + attemptKey := make([]byte, len(htlcAttemptInfoKey)+8) + copy(attemptKey, htlcAttemptInfoKey) + byteOrder.PutUint64( + attemptKey[len(htlcAttemptInfoKey):], attemptID, + ) + + var b bytes.Buffer + err = serializeHTLCAttemptInfo(&b, attemptInfo) + if err != nil { + return err + } + err = htlcBucket.Put(attemptKey, b.Bytes()) + if err != nil { + return err + } + + // Add failure info with a message. + //nolint:ll + failInfo := &HTLCFailInfo{ + FailTime: time.Now().Add(-1 * time.Hour), + Message: &lnwire.FailTemporaryChannelFailure{}, + Reason: HTLCFailMessage, + FailureSourceIndex: 1, + } + + failKey := make([]byte, len(htlcFailInfoKey)+8) + copy(failKey, htlcFailInfoKey) + byteOrder.PutUint64(failKey[len(htlcFailInfoKey):], attemptID) + + b.Reset() + if err := serializeHTLCFailInfo(&b, failInfo); err != nil { + return err + } + if err := htlcBucket.Put(failKey, b.Bytes()); err != nil { + return err + } + + // Create index entry. + var idx bytes.Buffer + if err := WriteElements( + &idx, paymentIndexTypeHash, paymentHash[:], + ); err != nil { + return err + } + return indexBucket.Put(seqBytes, idx.Bytes()) + }, func() {}) + require.NoError(t, err) + + // Migrate to SQL. + sqlStore := setupTestSQLDB(t) + + err = runPaymentsMigration(ctx, kvDB, sqlStore) + require.NoError(t, err) + + // Verify data matches. + assertPaymentDataMatches(t, ctx, kvDB, sqlStore, paymentHash) +} + +// setupTestKVDB creates a temporary KV database for testing. +func setupTestKVDB(t *testing.T) kvdb.Backend { + t.Helper() + + backend, cleanup, err := kvdb.GetTestBackend(t.TempDir(), "payments") + require.NoError(t, err) + t.Cleanup(cleanup) + + return backend +} + +// populateTestPayments populates the KV database with test payment data. +func populateTestPayments(t *testing.T, db kvdb.Backend, numPayments int) int { + t.Helper() + + err := kvdb.Update(db, func(tx kvdb.RwTx) error { + // Create root buckets. + paymentsBucket, err := tx.CreateTopLevelBucket( + paymentsRootBucket, + ) + if err != nil { + return err + } + + indexBucket, err := tx.CreateTopLevelBucket(paymentsIndexBucket) + if err != nil { + return err + } + + // Create test payments with globally unique attempt IDs. + var globalAttemptID uint64 + for i := 0; i < numPayments; i++ { + hash := createTestPaymentHash(t, i) + + err := createTestPaymentInKV( + t, paymentsBucket, indexBucket, uint64(i), hash, + &globalAttemptID, + ) + if err != nil { + return err + } + } + + return nil + }, func() {}) + + require.NoError(t, err) + return numPayments +} + +// serializeDuplicatePaymentCreationInfo serializes PaymentCreationInfo for +// duplicate payments. The time is stored in seconds (not nanoseconds) to match +// the format used by deserializeDuplicatePaymentCreationInfo in the KV store. +func serializeDuplicatePaymentCreationInfo(w io.Writer, + c *PaymentCreationInfo) error { + + var scratch [8]byte + + if _, err := w.Write(c.PaymentIdentifier[:]); err != nil { + return err + } + + byteOrder.PutUint64(scratch[:], uint64(c.Value)) + if _, err := w.Write(scratch[:]); err != nil { + return err + } + + // Store time in seconds (not nanoseconds) for duplicate payments. + // This matches the deserialization format used in + // deserializeDuplicatePaymentCreationInfo. + var unixSec int64 + if !c.CreationTime.IsZero() { + unixSec = c.CreationTime.Unix() + } + byteOrder.PutUint64(scratch[:], uint64(unixSec)) + if _, err := w.Write(scratch[:]); err != nil { + return err + } + + byteOrder.PutUint32(scratch[:4], uint32(len(c.PaymentRequest))) + if _, err := w.Write(scratch[:4]); err != nil { + return err + } + + if _, err := w.Write(c.PaymentRequest); err != nil { + return err + } + + return nil +} + +// createTestPaymentHash creates a deterministic payment hash for testing. +func createTestPaymentHash(t *testing.T, seed int) [32]byte { + t.Helper() + + hash := sha256.Sum256([]byte{byte(seed)}) + return hash +} + +// createTestPaymentInKV creates a single payment in the KV store. +func createTestPaymentInKV(t *testing.T, paymentsBucket, + indexBucket kvdb.RwBucket, seqNum uint64, hash [32]byte, + globalAttemptID *uint64) error { + + t.Helper() + + // Create payment bucket. + paymentBucket, err := paymentsBucket.CreateBucketIfNotExists(hash[:]) + if err != nil { + return err + } + + // Create payment creation info. + var paymentID lntypes.Hash + copy(paymentID[:], hash[:]) + + creationInfo := &PaymentCreationInfo{ + PaymentIdentifier: paymentID, + Value: lnwire.MilliSatoshi(1000000 + seqNum*1000), + CreationTime: time.Now().Add(-24 * time.Hour), + PaymentRequest: []byte("lnbc1test"), + } + + // Serialize and write creation info. + var b bytes.Buffer + err = serializePaymentCreationInfo(&b, creationInfo) + if err != nil { + return err + } + err = paymentBucket.Put(paymentCreationInfoKey, b.Bytes()) + if err != nil { + return err + } + + // Store sequence number. + seqBytes := make([]byte, 8) + byteOrder.PutUint64(seqBytes, seqNum) + err = paymentBucket.Put(paymentSequenceKey, seqBytes) + if err != nil { + return err + } + + // Add one HTLC attempt for each payment with globally unique ID. + htlcBucket, err := paymentBucket.CreateBucketIfNotExists( + paymentHtlcsBucket, + ) + if err != nil { + return err + } + + // Increment global attempt ID and create HTLC attempt. So we have a + // globally unique attempt ID for the HTLC attempt. + *globalAttemptID++ + err = createTestHTLCAttempt( + t, htlcBucket, hash, *globalAttemptID, seqNum%3 == 0, + ) + if err != nil { + return err + } + + var idx bytes.Buffer + err = WriteElements(&idx, paymentIndexTypeHash, hash[:]) + if err != nil { + return err + } + + return indexBucket.Put(seqBytes, idx.Bytes()) +} + +// createTestHTLCAttempt creates a test HTLC attempt in the KV store. +func createTestHTLCAttempt(t *testing.T, htlcBucket kvdb.RwBucket, + paymentHash [32]byte, attemptID uint64, shouldSettle bool) error { + t.Helper() + + // Generate a session key. + sessionKey, err := btcec.NewPrivateKey() + require.NoError(t, err) + + // Create a simple 2-hop route. + hop1Key, err := btcec.NewPrivateKey() + require.NoError(t, err) + + hop2Key, err := btcec.NewPrivateKey() + require.NoError(t, err) + + var sourcePubKey route.Vertex + copy(sourcePubKey[:], sessionKey.PubKey().SerializeCompressed()) + + // Convert session key to [32]byte. + var sessionKeyBytes [32]byte + copy(sessionKeyBytes[:], sessionKey.Serialize()) + + attemptInfo := &HTLCAttemptInfo{ + AttemptID: attemptID, + sessionKey: sessionKeyBytes, + Route: route.Route{ + TotalTimeLock: 500000, + TotalAmount: 900, + SourcePubKey: sourcePubKey, + Hops: []*route.Hop{ + { + PubKeyBytes: route.NewVertex( + hop1Key.PubKey(), + ), + ChannelID: 12345, + OutgoingTimeLock: 499500, + AmtToForward: 850, + }, + { + PubKeyBytes: route.NewVertex( + hop2Key.PubKey(), + ), + ChannelID: 67890, + OutgoingTimeLock: 499000, + AmtToForward: 800, + }, + }, + }, + AttemptTime: time.Now().Add(-2 * time.Hour), + Hash: (*lntypes.Hash)(&paymentHash), + } + + // Serialize and write attempt info. + attemptKey := make([]byte, len(htlcAttemptInfoKey)+8) + copy(attemptKey, htlcAttemptInfoKey) + byteOrder.PutUint64(attemptKey[len(htlcAttemptInfoKey):], attemptID) + + var b bytes.Buffer + err = serializeHTLCAttemptInfo(&b, attemptInfo) + if err != nil { + return err + } + err = htlcBucket.Put(attemptKey, b.Bytes()) + if err != nil { + return err + } + + // Add settlement if requested. + if shouldSettle { + settleInfo := &HTLCSettleInfo{ + Preimage: lntypes.Preimage(paymentHash), + SettleTime: time.Now().Add(-1 * time.Hour), + } + + settleKey := make([]byte, len(htlcSettleInfoKey)+8) + copy(settleKey, htlcSettleInfoKey) + byteOrder.PutUint64( + settleKey[len(htlcSettleInfoKey):], attemptID, + ) + + var sb bytes.Buffer + err = serializeHTLCSettleInfo(&sb, settleInfo) + if err != nil { + return err + } + err = htlcBucket.Put(settleKey, sb.Bytes()) + if err != nil { + return err + } + } + + return nil +} + +// createTestDuplicatePaymentWithIndex creates a duplicate payment and adds +// a matching entry into the global payment sequence index. +func createTestDuplicatePaymentWithIndex(t *testing.T, + paymentsBucket kvdb.RwBucket, indexBucket kvdb.RwBucket, + paymentHash [32]byte, seqNum uint64, shouldSettle bool, + globalAttemptID *uint64) error { + t.Helper() + + paymentBucket, err := paymentsBucket.CreateBucketIfNotExists( + paymentHash[:], + ) + if err != nil { + return err + } + + dupBucket, err := paymentBucket.CreateBucketIfNotExists( + duplicatePaymentsBucket, + ) + if err != nil { + return err + } + + if err := createTestDuplicatePayment( + t, dupBucket, paymentHash, seqNum, shouldSettle, + globalAttemptID, + ); err != nil { + return err + } + + seqBytes := make([]byte, 8) + byteOrder.PutUint64(seqBytes, seqNum) + var idx bytes.Buffer + if err := WriteElements( + &idx, paymentIndexTypeHash, paymentHash[:], + ); err != nil { + return err + } + + return indexBucket.Put(seqBytes, idx.Bytes()) +} + +// createTestDuplicatePayment creates a duplicate payment in the KV store. +func createTestDuplicatePayment(t *testing.T, + dupBucket kvdb.RwBucket, paymentHash [32]byte, seqNum uint64, + shouldSettle bool, globalAttemptID *uint64) error { + + t.Helper() + + // Create bucket for this duplicate using sequence number as key. + seqBytes := make([]byte, 8) + byteOrder.PutUint64(seqBytes, seqNum) + + dupPaymentBucket, err := dupBucket.CreateBucketIfNotExists(seqBytes) + if err != nil { + return err + } + + // Store sequence number. + err = dupPaymentBucket.Put(duplicatePaymentSequenceKey, seqBytes) + if err != nil { + return err + } + + // Create payment creation info. + var paymentID lntypes.Hash + copy(paymentID[:], paymentHash[:]) + + creationInfo := &PaymentCreationInfo{ + PaymentIdentifier: paymentID, + Value: lnwire.MilliSatoshi(2000000 + seqNum*1000), + CreationTime: time.Now().Add(-48 * time.Hour), + PaymentRequest: []byte("lnbc1duplicate"), + } + + var b bytes.Buffer + err = serializeDuplicatePaymentCreationInfo(&b, creationInfo) + if err != nil { + return err + } + err = dupPaymentBucket.Put(duplicatePaymentCreationInfoKey, b.Bytes()) + if err != nil { + return err + } + + // Generate a session key for the duplicate attempt. + sessionKey, err := btcec.NewPrivateKey() + require.NoError(t, err) + + // Create route for duplicate. + hop1Key, err := btcec.NewPrivateKey() + require.NoError(t, err) + + hop2Key, err := btcec.NewPrivateKey() + require.NoError(t, err) + + var sourcePubKey route.Vertex + copy(sourcePubKey[:], sessionKey.PubKey().SerializeCompressed()) + + var sessionKeyBytes [32]byte + copy(sessionKeyBytes[:], sessionKey.Serialize()) + + // Use globally unique attempt ID. + *globalAttemptID++ + attemptID := *globalAttemptID + + duplicateAttempt := &duplicateHTLCAttemptInfo{ + attemptID: attemptID, + sessionKey: sessionKeyBytes, + route: route.Route{ + TotalTimeLock: 500000, + TotalAmount: 900, + SourcePubKey: sourcePubKey, + Hops: []*route.Hop{ + { + PubKeyBytes: route.NewVertex( + hop1Key.PubKey(), + ), + ChannelID: 12345, + OutgoingTimeLock: 499500, + AmtToForward: 850, + }, + { + PubKeyBytes: route.NewVertex( + hop2Key.PubKey(), + ), + ChannelID: 67890, + OutgoingTimeLock: 499000, + AmtToForward: 800, + }, + }, + }, + } + + // Serialize and write attempt info (using existing WriteElements + // and SerializeRoute). + var ab bytes.Buffer + if err := WriteElements( + &ab, duplicateAttempt.attemptID, + duplicateAttempt.sessionKey, + ); err != nil { + return err + } + if err := SerializeRoute(&ab, duplicateAttempt.route); err != nil { + return err + } + err = dupPaymentBucket.Put(duplicatePaymentAttemptInfoKey, ab.Bytes()) + if err != nil { + return err + } + + // Add settlement if requested. + if shouldSettle { + settleInfo := &HTLCSettleInfo{ + Preimage: lntypes.Preimage(paymentHash), + SettleTime: time.Now().Add(-1 * time.Hour), + } + + var sb bytes.Buffer + err = serializeHTLCSettleInfo(&sb, settleInfo) + if err != nil { + return err + } + err = dupPaymentBucket.Put( + duplicatePaymentSettleInfoKey, sb.Bytes(), + ) + if err != nil { + return err + } + } + + return nil +} + +// createDuplicateWithoutAttemptInfo creates a duplicate payment bucket with +// settle/fail info but without attempt info. +func createDuplicateWithoutAttemptInfo(t *testing.T, + dupBucket kvdb.RwBucket, paymentHash [32]byte, seqNum uint64, + shouldSettle bool, shouldFail bool) error { + t.Helper() + + seqBytes := make([]byte, 8) + byteOrder.PutUint64(seqBytes, seqNum) + + dupPaymentBucket, err := dupBucket.CreateBucketIfNotExists(seqBytes) + if err != nil { + return err + } + + if err := dupPaymentBucket.Put( + duplicatePaymentSequenceKey, seqBytes, + ); err != nil { + return err + } + + var paymentID lntypes.Hash + copy(paymentID[:], paymentHash[:]) + + creationInfo := &PaymentCreationInfo{ + PaymentIdentifier: paymentID, + Value: lnwire.MilliSatoshi(2000000 + seqNum*1000), + CreationTime: time.Now().Add(-48 * time.Hour), + PaymentRequest: []byte("lnbc1duplicate"), + } + + var b bytes.Buffer + err = serializeDuplicatePaymentCreationInfo(&b, creationInfo) + if err != nil { + return err + } + if err := dupPaymentBucket.Put( + duplicatePaymentCreationInfoKey, b.Bytes(), + ); err != nil { + return err + } + + switch { + case shouldSettle && shouldFail: + return fmt.Errorf("invalid duplicate state") + case shouldSettle: + settleInfo := &HTLCSettleInfo{ + Preimage: lntypes.Preimage(paymentHash), + SettleTime: time.Now().Add(-1 * time.Hour), + } + + var sb bytes.Buffer + err = serializeHTLCSettleInfo(&sb, settleInfo) + if err != nil { + return err + } + if err := dupPaymentBucket.Put( + duplicatePaymentSettleInfoKey, sb.Bytes(), + ); err != nil { + return err + } + case shouldFail: + failReasonBytes := []byte{byte(FailureReasonNoRoute)} + if err := dupPaymentBucket.Put( + duplicatePaymentFailInfoKey, failReasonBytes, + ); err != nil { + return err + } + } + + return nil +} + +// fetchAllPaymentsFromKV fetches all payments from the KV store using the +// KVStore implementation. +func fetchAllPaymentsFromKV(t *testing.T, kvDB kvdb.Backend) []*MPPayment { + t.Helper() + + kvStore, err := NewKVStore(kvDB, WithNoMigration(true)) + require.NoError(t, err) + + payments, err := kvStore.FetchPayments() + require.NoError(t, err) + + return payments +} + +// normalizePaymentData makes sure that the payment data is normalized for +// comparison using the same logic as in-migration validation. +func normalizePaymentData(payment *MPPayment) { + normalizePaymentForCompare(payment) +} + +// comparePaymentData compares a KV payment with its SQL counterpart using +// deep equality check (similar to invoice migration). +func comparePaymentData(t *testing.T, ctx context.Context, sqlStore *SQLStore, + kvPayment *MPPayment) { + + t.Helper() + + // Fetch the SQL payment as MPPayment using SQLStore. + var paymentHash lntypes.Hash + copy(paymentHash[:], kvPayment.Info.PaymentIdentifier[:]) + + sqlPayment, err := sqlStore.FetchPayment(ctx, paymentHash) + require.NoError(t, err, "SQL payment should exist for %x", + paymentHash[:8]) + + // Normalize time precision to microseconds. + normalizePaymentData(kvPayment) + normalizePaymentData(sqlPayment) + + // Deep equality check - compares all fields recursively. + require.Equal(t, kvPayment, sqlPayment, + "KV and SQL payments should be equal for %x", paymentHash[:8]) +} + +// runPaymentsMigration executes the payment migration from KV to SQL within +// a SQL transaction. +func runPaymentsMigration(ctx context.Context, kvDB kvdb.Backend, + sqlStore *SQLStore) error { + + return sqlStore.db.ExecTx( + ctx, sqldb.WriteTxOpt(), func(tx SQLQueries) error { + return MigratePaymentsKVToSQL( + ctx, kvDB, tx, &SQLStoreConfig{ + QueryCfg: sqlStore.cfg.QueryCfg, + }, + ) + }, sqldb.NoOpReset, + ) +} + +// assertPaymentDataMatches verifies a payment in KV matches its SQL counterpart +// using deep equality check. +func assertPaymentDataMatches(t *testing.T, ctx context.Context, + kvDB kvdb.Backend, sqlStore *SQLStore, hash [32]byte) { + t.Helper() + + // Fetch from KV. + var kvPayment *MPPayment + err := kvdb.View(kvDB, func(tx kvdb.RTx) error { + paymentsBucket := tx.ReadBucket(paymentsRootBucket) + if paymentsBucket == nil { + return nil + } + + paymentBucket := paymentsBucket.NestedReadBucket(hash[:]) + if paymentBucket == nil { + return nil + } + + var err error + kvPayment, err = fetchPayment(paymentBucket) + return err + }, func() {}) + require.NoError(t, err) + + if kvPayment == nil { + // Payment doesn't exist in KV, should not exist in SQL + // either. + var paymentHash lntypes.Hash + copy(paymentHash[:], hash[:]) + _, err := sqlStore.FetchPayment(ctx, paymentHash) + require.Error( + t, err, "payment should not exist in SQL if not "+ + "in KV", + ) + return + } + + // Use the deep comparison function. + comparePaymentData(t, ctx, sqlStore, kvPayment) +} + +// paymentTestConfig holds configuration for creating test payments with various +// features. +type paymentTestConfig struct { + hash [32]byte + seqNum uint64 + value lnwire.MilliSatoshi + creationTime time.Time + paymentRequest string + attemptID uint64 + numHops int + baseChannelID uint64 + baseTimeLock uint32 + paymentCustomRecs lnwire.CustomRecords + attemptCustomRecs lnwire.CustomRecords + hopConfigurator func(hop *route.Hop, index int, isFinal bool) +} + +// serializeAndPut serializes data using the provided serializer function and +// writes it to the bucket. +func serializeAndPut(bucket kvdb.RwBucket, key []byte, + serializer func(io.Writer) error) error { + + var b bytes.Buffer + if err := serializer(&b); err != nil { + return err + } + return bucket.Put(key, b.Bytes()) +} + +// generateSessionKey creates a new session key and returns the private key, +// source public key vertex, and serialized key bytes. +func generateSessionKey(t *testing.T) (*btcec.PrivateKey, route.Vertex, + [32]byte, error) { + + t.Helper() + + sessionKey, err := btcec.NewPrivateKey() + if err != nil { + return nil, route.Vertex{}, [32]byte{}, err + } + + var sourcePubKey route.Vertex + copy(sourcePubKey[:], sessionKey.PubKey().SerializeCompressed()) + + var sessionKeyBytes [32]byte + copy(sessionKeyBytes[:], sessionKey.Serialize()) + + return sessionKey, sourcePubKey, sessionKeyBytes, nil +} + +// createTestHops creates the specified number of test hops with the given +// parameters. The configurator function is called for each hop to allow +// feature-specific customization. +func createTestHops(t *testing.T, numHops int, baseAmount lnwire.MilliSatoshi, + baseChannelID uint64, baseTimeLock uint32, + configurator func(*route.Hop, int, bool)) ([]*route.Hop, + lnwire.MilliSatoshi, error) { + + t.Helper() + + hops := make([]*route.Hop, numHops) + currentAmt := baseAmount + + for i := 0; i < numHops; i++ { + hopKey, err := btcec.NewPrivateKey() + if err != nil { + return nil, 0, err + } + + amt := baseAmount - lnwire.MilliSatoshi(uint64(i)*100) + hop := &route.Hop{ + PubKeyBytes: route.NewVertex(hopKey.PubKey()), + ChannelID: baseChannelID + uint64(i), + OutgoingTimeLock: baseTimeLock - uint32(i*40), + AmtToForward: amt, + } + + // Apply feature-specific configuration. + if configurator != nil { + configurator(hop, i, i == numHops-1) + } + + hops[i] = hop + currentAmt = amt + } + + return hops, currentAmt, nil +} + +// writeHTLCAttempt writes the HTLC attempt info to the bucket. +func writeHTLCAttempt(bucket kvdb.RwBucket, attemptID uint64, + info *HTLCAttemptInfo) error { + + attemptKey := make([]byte, len(htlcAttemptInfoKey)+8) + copy(attemptKey, htlcAttemptInfoKey) + byteOrder.PutUint64(attemptKey[len(htlcAttemptInfoKey):], attemptID) + + return serializeAndPut(bucket, attemptKey, func(w io.Writer) error { + return serializeHTLCAttemptInfo(w, info) + }) +} + +// writeHTLCSettle writes the HTLC settle info to the bucket. +func writeHTLCSettle(bucket kvdb.RwBucket, attemptID uint64, + info *HTLCSettleInfo) error { + + settleKey := make([]byte, len(htlcSettleInfoKey)+8) + copy(settleKey, htlcSettleInfoKey) + byteOrder.PutUint64(settleKey[len(htlcSettleInfoKey):], attemptID) + + return serializeAndPut(bucket, settleKey, func(w io.Writer) error { + return serializeHTLCSettleInfo(w, info) + }) +} + +// createIndexEntry creates a payment index entry in the index bucket. +func createIndexEntry(indexBucket kvdb.RwBucket, seqBytes []byte, + hash [32]byte) error { + + var idx bytes.Buffer + if err := WriteElements( + &idx, paymentIndexTypeHash, hash[:], + ); err != nil { + return err + } + + return indexBucket.Put(seqBytes, idx.Bytes()) +} + +// createTestPayment creates a test payment with the specified configuration, +// handling all common boilerplate code. +func createTestPayment(t *testing.T, paymentsBucket, indexBucket kvdb.RwBucket, + cfg paymentTestConfig) error { + + t.Helper() + + // Create payment bucket. + paymentBucket, err := paymentsBucket.CreateBucketIfNotExists( + cfg.hash[:], + ) + if err != nil { + return err + } + + // Create payment ID. + var paymentID lntypes.Hash + copy(paymentID[:], cfg.hash[:]) + + // Create and serialize payment creation info. + creationInfo := &PaymentCreationInfo{ + PaymentIdentifier: paymentID, + Value: cfg.value, + CreationTime: cfg.creationTime, + PaymentRequest: []byte(cfg.paymentRequest), + FirstHopCustomRecords: cfg.paymentCustomRecs, + } + + err = serializeAndPut( + paymentBucket, paymentCreationInfoKey, + func(w io.Writer) error { + return serializePaymentCreationInfo(w, creationInfo) + }, + ) + if err != nil { + return err + } + + // Store sequence number. + seqBytes := make([]byte, 8) + byteOrder.PutUint64(seqBytes, cfg.seqNum) + if err := paymentBucket.Put(paymentSequenceKey, seqBytes); err != nil { + return err + } + + // Create HTLC bucket. + htlcBucket, err := paymentBucket.CreateBucketIfNotExists( + paymentHtlcsBucket, + ) + if err != nil { + return err + } + + // Generate session key. + _, sourcePubKey, sessionKeyBytes, err := generateSessionKey(t) + if err != nil { + return err + } + + // Create route with hops. + hops, totalAmount, err := createTestHops( + t, cfg.numHops, cfg.value, cfg.baseChannelID, + cfg.baseTimeLock, cfg.hopConfigurator, + ) + if err != nil { + return err + } + + // Create and serialize attempt info. + attemptInfo := &HTLCAttemptInfo{ + AttemptID: cfg.attemptID, + sessionKey: sessionKeyBytes, + Route: route.Route{ + TotalTimeLock: cfg.baseTimeLock, + TotalAmount: totalAmount, + SourcePubKey: sourcePubKey, + Hops: hops, + FirstHopWireCustomRecords: cfg.attemptCustomRecs, + }, + AttemptTime: cfg.creationTime.Add(time.Minute), + Hash: (*lntypes.Hash)(&cfg.hash), + } + + if err = writeHTLCAttempt( + htlcBucket, cfg.attemptID, attemptInfo, + ); err != nil { + return err + } + + // Add settlement. + settleInfo := &HTLCSettleInfo{ + Preimage: lntypes.Preimage(cfg.hash), + SettleTime: cfg.creationTime.Add(2 * time.Minute), + } + + if err := writeHTLCSettle( + htlcBucket, cfg.attemptID, settleInfo, + ); err != nil { + return err + } + + // Create index entry. + return createIndexEntry(indexBucket, seqBytes, cfg.hash) +} + +// createPaymentWithMPP creates a payment with MPP records on the final hop. +func createPaymentWithMPP(t *testing.T, paymentsBucket, + indexBucket kvdb.RwBucket, hash [32]byte) error { + + t.Helper() + + return createTestPayment( + t, paymentsBucket, indexBucket, + paymentTestConfig{ + hash: hash, + seqNum: 1, + value: 50000, + creationTime: time.Date( + 2024, 1, 1, 12, 0, 0, 0, time.UTC, + ), + paymentRequest: "lnbc500n1test_mpp", + attemptID: 1, + numHops: 3, + baseChannelID: 100000, + baseTimeLock: 500000, + hopConfigurator: func(hop *route.Hop, index int, + isFinal bool) { + + if isFinal { + var paymentAddr [32]byte + copy( + paymentAddr[:], + []byte( + "test_mpp_payment_"+ + "address_32", + ), + ) + hop.MPP = record.NewMPP( + lnwire.MilliSatoshi(50000), + paymentAddr, + ) + } + }, + }, + ) +} + +// createPaymentWithAMP creates a payment with AMP records on the final hop. +func createPaymentWithAMP(t *testing.T, paymentsBucket, + indexBucket kvdb.RwBucket, hash [32]byte) error { + t.Helper() + + return createPaymentWithAMPChildIndex( + t, paymentsBucket, indexBucket, hash, 0, + ) +} + +// createPaymentWithAMPChildIndex creates a payment with AMP records on the +// final hop and a specific child index. +func createPaymentWithAMPChildIndex(t *testing.T, paymentsBucket, + indexBucket kvdb.RwBucket, hash [32]byte, childIndex uint32) error { + + t.Helper() + + return createTestPayment( + t, paymentsBucket, indexBucket, + paymentTestConfig{ + hash: hash, + seqNum: 2, + value: 75000, + creationTime: time.Date( + 2024, 2, 1, 10, 0, 0, 0, time.UTC, + ), + paymentRequest: "lnbc750n1test_amp", + attemptID: 1, + numHops: 2, + baseChannelID: 200000, + baseTimeLock: 600000, + hopConfigurator: func(hop *route.Hop, index int, + isFinal bool) { + + if isFinal { + var rootShare [32]byte + copy( + rootShare[:], + []byte( + "test_amp_root_share"+ + "_12345678", + ), + ) + var setID [32]byte + copy( + setID[:], + []byte( + "test_amp_set_id_"+ + "123456789012", + ), + ) + hop.AMP = record.NewAMP( + rootShare, setID, childIndex, + ) + } + }, + }, + ) +} + +// createPaymentWithCustomRecords creates a payment with custom records at all +// levels. +func createPaymentWithCustomRecords(t *testing.T, paymentsBucket, + indexBucket kvdb.RwBucket, hash [32]byte) error { + + t.Helper() + + return createTestPayment( + t, paymentsBucket, indexBucket, + paymentTestConfig{ + hash: hash, + seqNum: 3, + value: 100000, + creationTime: time.Date( + 2024, 3, 1, 14, 0, 0, 0, time.UTC, + ), + paymentRequest: "lnbc1m1test_custom", + attemptID: 1, + numHops: 3, + baseChannelID: 300000, + baseTimeLock: 700000, + paymentCustomRecs: lnwire.CustomRecords{ + 65536: []byte("payment_level_value_1"), + 65537: []byte("payment_level_value_2"), + }, + attemptCustomRecs: lnwire.CustomRecords{ + 65541: []byte("attempt_custom_value_1"), + 65542: []byte("attempt_custom_value_2"), + }, + hopConfigurator: func(hop *route.Hop, index int, + isFinal bool) { + + hop.CustomRecords = record.CustomSet{ + 65538 + uint64(index): []byte( + fmt.Sprintf( + "hop_%d_custom_value", + index, + ), + ), + } + }, + }, + ) +} + +// createPaymentWithBlindedRoute creates a payment with blinded route data. +func createPaymentWithBlindedRoute(t *testing.T, paymentsBucket, + indexBucket kvdb.RwBucket, hash [32]byte) error { + + t.Helper() + + return createTestPayment( + t, paymentsBucket, indexBucket, + paymentTestConfig{ + hash: hash, + seqNum: 4, + value: 120000, + creationTime: time.Date( + 2024, 4, 1, 16, 0, 0, 0, time.UTC, + ), + paymentRequest: "lnbc1200n1test_blinded", + attemptID: 1, + numHops: 4, + baseChannelID: 400000, + baseTimeLock: 800000, + hopConfigurator: func(hop *route.Hop, index int, + isFinal bool) { + + if isFinal { + blindingKey, err := btcec. + NewPrivateKey() + + require.NoError(t, err) + + hop.BlindingPoint = blindingKey.PubKey() + hop.EncryptedData = []byte( + "encrypted_blinded_route_" + + "data_test_value_12345", + ) + hop.TotalAmtMsat = lnwire.MilliSatoshi( + 119400, + ) + } + }, + }, + ) +} + +// createPaymentWithMetadata creates a payment with hop metadata. +func createPaymentWithMetadata(t *testing.T, paymentsBucket, + indexBucket kvdb.RwBucket, hash [32]byte) error { + + t.Helper() + + return createTestPayment( + t, paymentsBucket, indexBucket, + paymentTestConfig{ + hash: hash, + seqNum: 5, + value: 80000, + creationTime: time.Date( + 2024, 5, 1, 18, 0, 0, 0, time.UTC, + ), + paymentRequest: "lnbc800n1test_metadata", + attemptID: 1, + numHops: 3, + baseChannelID: 500000, + baseTimeLock: 900000, + hopConfigurator: func(hop *route.Hop, index int, + isFinal bool) { + + hop.Metadata = []byte( + fmt.Sprintf( + "hop_%d_metadata_value", index, + ), + ) + }, + }, + ) +} + +// createPaymentWithAllFeatures creates a payment with all optional features +// enabled. +func createPaymentWithAllFeatures(t *testing.T, paymentsBucket, + indexBucket kvdb.RwBucket, hash [32]byte) error { + + t.Helper() + + return createTestPayment( + t, paymentsBucket, indexBucket, + paymentTestConfig{ + hash: hash, + seqNum: 6, + value: 150000, + creationTime: time.Date( + 2024, 6, 1, 20, 0, 0, 0, time.UTC, + ), + paymentRequest: "lnbc1500n1test_all_features", + attemptID: 1, + numHops: 4, + baseChannelID: 600000, + baseTimeLock: 1000000, + paymentCustomRecs: lnwire.CustomRecords{ + 65543: []byte("all_features_payment_custom_1"), + 65544: []byte("all_features_payment_custom_2"), + }, + attemptCustomRecs: lnwire.CustomRecords{ + 65549: []byte("all_feat_attempt_custom_1"), + 65550: []byte("all_feat_attempt_custom_2"), + }, + hopConfigurator: func(hop *route.Hop, index int, + isFinal bool) { + + // Add custom records and metadata to all hops. + hop.CustomRecords = record.CustomSet{ + 65545 + uint64(index): []byte( + fmt.Sprintf( + "all_feat_hop_%d", + index, + ), + ), + } + hop.Metadata = []byte( + fmt.Sprintf( + "all_feat_metadata_%d", index, + ), + ) + + // Add MPP and blinded route data to final hop. + if isFinal { + var paymentAddr [32]byte + copy( + paymentAddr[:], + []byte( + "all_features_mpp_"+ + "addr_123456", + ), + ) + hop.MPP = record.NewMPP( + lnwire.MilliSatoshi(149250), + paymentAddr, + ) + + blindingKey, err := btcec. + NewPrivateKey() + + require.NoError(t, err) + hop.BlindingPoint = blindingKey.PubKey() + hop.EncryptedData = []byte( + "all_features_encrypted_" + + "blinded_data_123456", + ) + hop.TotalAmtMsat = lnwire.MilliSatoshi( + 149250, + ) + } + }, + }, + ) +} + +// paymentFeatureSet defines a combination of optional payment features for +// testing feature interactions. +type paymentFeatureSet struct { + name string + mpp bool + amp bool + customRecords bool + blindedRoute bool + hopMetadata bool +} + +// createPaymentWithFeatureSet creates a payment with a selected set of +// optional features for combination testing. +func createPaymentWithFeatureSet(t *testing.T, paymentsBucket, + indexBucket kvdb.RwBucket, hash [32]byte, seqNum uint64, + features paymentFeatureSet, globalAttemptID *uint64) error { + t.Helper() + + if features.mpp && features.amp { + return fmt.Errorf("invalid feature set: mpp and amp") + } + + paymentBucket, err := paymentsBucket.CreateBucketIfNotExists(hash[:]) + if err != nil { + return err + } + + var paymentID lntypes.Hash + copy(paymentID[:], hash[:]) + + creationTime := time.Date(2024, 7, 1, 12, 0, 0, 0, time.UTC). + Add(time.Duration(seqNum) * time.Minute) + creationInfo := &PaymentCreationInfo{ + PaymentIdentifier: paymentID, + Value: lnwire.MilliSatoshi(100000), + CreationTime: creationTime, + PaymentRequest: []byte( + fmt.Sprintf("lnbc_test_%s", features.name), + ), + } + if features.customRecords { + creationInfo.FirstHopCustomRecords = lnwire.CustomRecords{ + 65560: []byte("combo_payment_custom_1"), + 65561: []byte("combo_payment_custom_2"), + } + } + + var b bytes.Buffer + err = serializePaymentCreationInfo(&b, creationInfo) + if err != nil { + return err + } + err = paymentBucket.Put(paymentCreationInfoKey, b.Bytes()) + if err != nil { + return err + } + + seqBytes := make([]byte, 8) + byteOrder.PutUint64(seqBytes, seqNum) + err = paymentBucket.Put(paymentSequenceKey, seqBytes) + if err != nil { + return err + } + + htlcBucket, err := paymentBucket.CreateBucketIfNotExists( + paymentHtlcsBucket, + ) + if err != nil { + return err + } + + sessionKey, err := btcec.NewPrivateKey() + require.NoError(t, err) + + var sourcePubKey route.Vertex + copy(sourcePubKey[:], sessionKey.PubKey().SerializeCompressed()) + + var sessionKeyBytes [32]byte + copy(sessionKeyBytes[:], sessionKey.Serialize()) + + baseAmt := lnwire.MilliSatoshi(100000) + hops := make([]*route.Hop, 3) + for i := 0; i < 3; i++ { + hopKey, err := btcec.NewPrivateKey() + require.NoError(t, err) + + amt := baseAmt - lnwire.MilliSatoshi(uint64(i)*100) + hop := &route.Hop{ + PubKeyBytes: route.NewVertex(hopKey.PubKey()), + ChannelID: uint64(700000 + i), + OutgoingTimeLock: uint32(700000 - i*40), + AmtToForward: amt, + } + if features.customRecords { + hop.CustomRecords = record.CustomSet{ + 65562 + uint64(i): []byte(fmt.Sprintf( + "combo_hop_%d", i, + )), + } + } + if features.hopMetadata { + hop.Metadata = []byte( + fmt.Sprintf("combo_metadata_%d", i), + ) + } + + if i == 2 { + if features.mpp { + var paymentAddr [32]byte + copy( + paymentAddr[:], + []byte("combo_mpp_payment_addr_1234"), + ) + hop.MPP = record.NewMPP( + baseAmt-200, paymentAddr, + ) + } + if features.amp { + var rootShare [32]byte + copy( + rootShare[:], + []byte("combo_amp_root_share_123456"), + ) + var setID [32]byte + copy( + setID[:], + []byte("combo_amp_set_id_12345678"), + ) + hop.AMP = record.NewAMP(rootShare, setID, 0) + } + if features.blindedRoute { + blindingKey, err := btcec.NewPrivateKey() + require.NoError(t, err) + hop.BlindingPoint = blindingKey.PubKey() + hop.EncryptedData = []byte( + "combo_encrypted_blinded_data", + ) + hop.TotalAmtMsat = baseAmt - 200 + } + } + + hops[i] = hop + } + + routeInfo := route.Route{ + TotalTimeLock: 700000, + TotalAmount: baseAmt - 200, + SourcePubKey: sourcePubKey, + Hops: hops, + } + if features.customRecords { + routeInfo.FirstHopWireCustomRecords = lnwire.CustomRecords{ + 65565: []byte("combo_attempt_custom_1"), + 65566: []byte("combo_attempt_custom_2"), + } + } + + *globalAttemptID++ + attemptID := *globalAttemptID + attemptInfo := &HTLCAttemptInfo{ + AttemptID: attemptID, + sessionKey: sessionKeyBytes, + Route: routeInfo, + AttemptTime: creationTime.Add(time.Minute), + Hash: (*lntypes.Hash)(&hash), + } + + attemptKey := make([]byte, len(htlcAttemptInfoKey)+8) + copy(attemptKey, htlcAttemptInfoKey) + byteOrder.PutUint64(attemptKey[len(htlcAttemptInfoKey):], attemptID) + + var ab bytes.Buffer + err = serializeHTLCAttemptInfo(&ab, attemptInfo) + if err != nil { + return err + } + err = htlcBucket.Put(attemptKey, ab.Bytes()) + if err != nil { + return err + } + + settleInfo := &HTLCSettleInfo{ + Preimage: lntypes.Preimage(hash), + SettleTime: creationTime.Add(2 * time.Minute), + } + + settleKey := make([]byte, len(htlcSettleInfoKey)+8) + copy(settleKey, htlcSettleInfoKey) + byteOrder.PutUint64(settleKey[len(htlcSettleInfoKey):], attemptID) + + var sb bytes.Buffer + err = serializeHTLCSettleInfo(&sb, settleInfo) + if err != nil { + return err + } + err = htlcBucket.Put(settleKey, sb.Bytes()) + if err != nil { + return err + } + + var idx bytes.Buffer + err = WriteElements(&idx, paymentIndexTypeHash, hash[:]) + if err != nil { + return err + } + + return indexBucket.Put(seqBytes, idx.Bytes()) +} diff --git a/payments/db/migration1/test_harness..go b/payments/db/migration1/test_harness..go new file mode 100644 index 00000000000..b25867a4f52 --- /dev/null +++ b/payments/db/migration1/test_harness..go @@ -0,0 +1,26 @@ +package migration1 + +import ( + "testing" + + "github.com/lightningnetwork/lnd/lntypes" +) + +// TestHarness provides implementation-specific test utilities for the payments +// database. Different database backends (KV, SQL) have different internal +// structures and indexing mechanisms, so this interface allows tests to verify +// implementation-specific behavior without coupling the test logic to a +// particular backend. +type TestHarness interface { + // AssertPaymentIndex checks that a payment is correctly indexed. + // For KV: verifies the payment index bucket entry exists and points + // to the correct payment hash. + // For SQL: no-op (SQL doesn't use a separate index bucket). + AssertPaymentIndex(t *testing.T, expectedHash lntypes.Hash) + + // AssertNoIndex checks that an index for a sequence number doesn't + // exist. + // For KV: verifies the index bucket entry is deleted. + // For SQL: no-op. + AssertNoIndex(t *testing.T, seqNr uint64) +} diff --git a/payments/db/migration1/test_postgres.go b/payments/db/migration1/test_postgres.go new file mode 100644 index 00000000000..7055fb885c5 --- /dev/null +++ b/payments/db/migration1/test_postgres.go @@ -0,0 +1,94 @@ +//go:build test_db_postgres && !test_db_sqlite + +package migration1 + +import ( + "testing" + + "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/payments/db/migration1/sqlc" + "github.com/lightningnetwork/lnd/sqldb" + "github.com/stretchr/testify/require" +) + +// NewTestDB is a helper function that creates a SQLStore backed by a SQL +// database for testing. +func NewTestDB(t testing.TB, opts ...OptionModifier) (DB, TestHarness) { + db := NewTestDBWithFixture(t, nil, opts...) + return db, &noopTestHarness{} +} + +// NewTestDBFixture creates a new sqldb.TestPgFixture for testing purposes. +func NewTestDBFixture(t *testing.T) *sqldb.TestPgFixture { + pgFixture := sqldb.NewTestPgFixture( + t, sqldb.DefaultPostgresFixtureLifetime, + ) + t.Cleanup(func() { + pgFixture.TearDown(t) + }) + return pgFixture +} + +// NewTestDBWithFixture is a helper function that creates a SQLStore backed by a +// SQL database for testing. +func NewTestDBWithFixture(t testing.TB, + pgFixture *sqldb.TestPgFixture, opts ...OptionModifier) DB { + + var querier BatchedSQLQueries + if pgFixture == nil { + querier = newBatchQuerier(t) + } else { + querier = newBatchQuerierWithFixture(t, pgFixture) + } + + store, err := NewSQLStore( + &SQLStoreConfig{ + QueryCfg: sqldb.DefaultPostgresConfig(), + }, querier, opts..., + ) + require.NoError(t, err) + + return store +} + +// newBatchQuerier creates a new BatchedSQLQueries instance for testing +// using a PostgreSQL database fixture. +func newBatchQuerier(t testing.TB) BatchedSQLQueries { + pgFixture := sqldb.NewTestPgFixture( + t, sqldb.DefaultPostgresFixtureLifetime, + ) + t.Cleanup(func() { + pgFixture.TearDown(t) + }) + + return newBatchQuerierWithFixture(t, pgFixture) +} + +// newBatchQuerierWithFixture creates a new BatchedSQLQueries instance for +// testing using a PostgreSQL database fixture. +func newBatchQuerierWithFixture(t testing.TB, + pgFixture *sqldb.TestPgFixture) BatchedSQLQueries { + + rawDB := sqldb.NewTestPostgresDB(t, pgFixture).BaseDB.DB + + return &testBatchedSQLQueries{ + db: rawDB, + Queries: sqlc.New(rawDB), + } +} + +// noopTestHarness is the SQL test harness implementation. Since SQL doesn't +// use a separate payment index bucket like KV, these assertions are no-ops. +type noopTestHarness struct{} + +// AssertPaymentIndex is a no-op for SQL implementations. +func (h *noopTestHarness) AssertPaymentIndex(t *testing.T, + expectedHash lntypes.Hash) { + + // No-op: SQL doesn't use a separate index bucket. +} + +// AssertNoIndex is a no-op for SQL implementations. +func (h *noopTestHarness) AssertNoIndex(t *testing.T, seqNr uint64) { + // No-op: SQL doesn't use a separate index bucket. +} diff --git a/payments/db/migration1/test_sql.go b/payments/db/migration1/test_sql.go new file mode 100644 index 00000000000..4a576fc7b64 --- /dev/null +++ b/payments/db/migration1/test_sql.go @@ -0,0 +1,59 @@ +//go:build test_db_postgres || test_db_sqlite + +package migration1 + +import ( + "context" + "database/sql" + "testing" + + "github.com/lightningnetwork/lnd/payments/db/migration1/sqlc" + "github.com/lightningnetwork/lnd/sqldb" + "github.com/stretchr/testify/require" +) + +// setupTestSQLDB creates a SQLStore-backed test database. +func setupTestSQLDB(t testing.TB, opts ...OptionModifier) *SQLStore { + t.Helper() + + db, _ := NewTestDB(t, opts...) + sqlStore, ok := db.(*SQLStore) + require.True(t, ok) + + return sqlStore +} + +// testBatchedSQLQueries is a simple implementation of BatchedSQLQueries for +// testing. +type testBatchedSQLQueries struct { + db *sql.DB + *sqlc.Queries +} + +// ExecTx implements the transaction execution logic. +func (t *testBatchedSQLQueries) ExecTx(ctx context.Context, + txOpts sqldb.TxOptions, txBody func(SQLQueries) error, + reset func()) error { + + sqlOptions := sql.TxOptions{ + Isolation: sql.LevelSerializable, + ReadOnly: txOpts.ReadOnly(), + } + + tx, err := t.db.BeginTx(ctx, &sqlOptions) + if err != nil { + return err + } + defer func() { + if err != nil { + _ = tx.Rollback() + } else { + err = tx.Commit() + } + }() + + reset() + queries := sqlc.New(tx) + + return txBody(queries) +} diff --git a/payments/db/migration1/test_sqlite.go b/payments/db/migration1/test_sqlite.go new file mode 100644 index 00000000000..b84c9c2d9ea --- /dev/null +++ b/payments/db/migration1/test_sqlite.go @@ -0,0 +1,73 @@ +//go:build !test_db_postgres && test_db_sqlite + +package migration1 + +import ( + "testing" + + "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/payments/db/migration1/sqlc" + "github.com/lightningnetwork/lnd/sqldb" + "github.com/stretchr/testify/require" +) + +// NewTestDB is a helper function that creates a SQLStore backed by a SQL +// database for testing. +func NewTestDB(t testing.TB, opts ...OptionModifier) (DB, TestHarness) { + db := NewTestDBWithFixture(t, nil, opts...) + return db, &noopTestHarness{} +} + +// NewTestDBFixture is a no-op for the sqlite build. +func NewTestDBFixture(_ *testing.T) *sqldb.TestPgFixture { + return nil +} + +// NewTestDBWithFixture is a helper function that creates a SQLStore backed by a +// SQL database for testing. +func NewTestDBWithFixture(t testing.TB, _ *sqldb.TestPgFixture, + opts ...OptionModifier) DB { + + store, err := NewSQLStore( + &SQLStoreConfig{ + QueryCfg: sqldb.DefaultSQLiteConfig(), + }, newBatchQuerier(t), opts..., + ) + require.NoError(t, err) + return store +} + +// newBatchQuerier creates a new BatchedSQLQueries instance for testing +// using a SQLite database. +func newBatchQuerier(t testing.TB) BatchedSQLQueries { + return newBatchQuerierWithFixture(t, nil) +} + +// newBatchQuerierWithFixture creates a new BatchedSQLQueries instance for +// testing using a SQLite database. +func newBatchQuerierWithFixture(t testing.TB, + _ *sqldb.TestPgFixture) BatchedSQLQueries { + + rawDB := sqldb.NewTestSqliteDB(t).BaseDB.DB + + return &testBatchedSQLQueries{ + db: rawDB, + Queries: sqlc.New(rawDB), + } +} + +// noopTestHarness is the SQL test harness implementation. Since SQL doesn't +// use a separate payment index bucket like KV, these assertions are no-ops. +type noopTestHarness struct{} + +// AssertPaymentIndex is a no-op for SQL implementations. +func (h *noopTestHarness) AssertPaymentIndex(t *testing.T, + expectedHash lntypes.Hash) { + + // No-op: SQL doesn't use a separate index bucket. +} + +// AssertNoIndex is a no-op for SQL implementations. +func (h *noopTestHarness) AssertNoIndex(t *testing.T, seqNr uint64) { + // No-op: SQL doesn't use a separate index bucket. +} From 4318f49104daa47f2c80536f85a88148685bf695 Mon Sep 17 00:00:00 2001 From: ziggie Date: Sun, 11 Jan 2026 00:07:36 +0100 Subject: [PATCH 06/10] payments/migration1: add external migration test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a developer-facing migration_external_test that allows running the KV→SQL payments migration against a real channel.db backend to debug migration failures on actual data. The accompanying testdata README documents how to supply a database file and configure the test, so users can validate their data and confirm the migration completes successfully. The test is skipped by default and meant for manual diagnostics. --- .../db/migration1/migration_external_test.go | 180 ++++++++++++++++++ payments/db/migration1/testdata/README.md | 52 +++++ 2 files changed, 232 insertions(+) create mode 100644 payments/db/migration1/migration_external_test.go create mode 100644 payments/db/migration1/testdata/README.md diff --git a/payments/db/migration1/migration_external_test.go b/payments/db/migration1/migration_external_test.go new file mode 100644 index 00000000000..ec8d72dfbc9 --- /dev/null +++ b/payments/db/migration1/migration_external_test.go @@ -0,0 +1,180 @@ +//go:build test_db_postgres || test_db_sqlite + +package migration1 + +import ( + "context" + "os" + "path" + "strings" + "testing" + "time" + + "github.com/btcsuite/btclog/v2" + "github.com/lightningnetwork/lnd/kvdb" + "github.com/lightningnetwork/lnd/kvdb/postgres" + "github.com/lightningnetwork/lnd/kvdb/sqlbase" + "github.com/lightningnetwork/lnd/kvdb/sqlite" + "github.com/lightningnetwork/lnd/sqldb" + "github.com/stretchr/testify/require" +) + +// TestMigrationWithExternalDB tests the migration of the payment store from a +// bolt backed channel.db or a kvdb channel.sqlite to a SQL database. Note that +// this test does not attempt to be a complete migration test for all payment +// store types but rather is added as a tool for developers and users to debug +// payment migration issues with an actual channel.db/channel.sqlite file. +// +// NOTE: To use this test, place either of those files in the +// payments/db/migration1/testdata directory, uncomment the "Skipf" line, and +// set the "fileName" variable to the name of the channel database file you +// want to use for the migration test. +func TestMigrationWithExternalDB(t *testing.T) { + ctx := context.Background() + + // NOTE: comment this line out to run the test. + t.Skipf("skipping test meant for local debugging only") + + // NOTE: set this to the name of the channel database file you want + // to use for the migration test. This may be either a bbolt ".db" file + // or a SQLite ".sqlite" file. If you want to migrate from a + // bbolt channel.db file, set this to "channel.db". + const fileName = "channel.db" + + // NOTE: if set, this test will prefer migrating from a Postgres-backed + // kvdb source instead of a local file. Leave empty to use fileName. + const postgresKVDSN = "" + const postgresKVPfx = "channeldb" + const logSequenceOrder = false + + // Determine if we are using a SQLite file or a Bolt DB file. + isSqlite := strings.HasSuffix(fileName, ".sqlite") + + // Set up logging for the test. + logger := btclog.NewSLogger(btclog.NewDefaultHandler(os.Stdout)) + UseLogger(logger) + + // migrate runs the migration from the kvdb store to the SQL store. + migrate := func(t *testing.T, kvBackend kvdb.Backend) { + sqlStore := setupTestSQLDB(t) + + // Run migration in a transaction + err := sqlStore.db.ExecTx( + ctx, sqldb.WriteTxOpt(), func(tx SQLQueries) error { + return MigratePaymentsKVToSQL( + ctx, kvBackend, tx, &SQLStoreConfig{ + QueryCfg: sqlStore.cfg.QueryCfg, + }, + ) + }, sqldb.NoOpReset, + ) + require.NoError(t, err) + + _ = logSequenceOrder + } + + connectPostgres := func(t *testing.T, dsn, prefix string) kvdb.Backend { + dsn = strings.TrimSpace(dsn) + if dsn == "" { + t.Fatalf("missing postgres kvdb dsn") + } + + prefix = strings.TrimSpace(prefix) + if prefix == "" { + prefix = "channeldb" + } + + const ( + timeout = 10 * time.Second + maxConns = 5 + ) + sqlbase.Init(maxConns) + + dbCfg := &postgres.Config{ + Dsn: dsn, + Timeout: timeout, + MaxConnections: maxConns, + } + + kvStore, err := kvdb.Open( + kvdb.PostgresBackendName, ctx, dbCfg, prefix, + ) + require.NoError(t, err) + + return kvStore + } + + connectPostgresKV := func(t *testing.T) kvdb.Backend { + return connectPostgres(t, postgresKVDSN, postgresKVPfx) + } + + connectBBolt := func(t *testing.T, dbPath string) kvdb.Backend { + cfg := &kvdb.BoltBackendConfig{ + DBPath: dbPath, + DBFileName: fileName, + NoFreelistSync: true, + AutoCompact: false, + AutoCompactMinAge: kvdb.DefaultBoltAutoCompactMinAge, + DBTimeout: kvdb.DefaultDBTimeout, + } + + kvStore, err := kvdb.GetBoltBackend(cfg) + require.NoError(t, err) + + return kvStore + } + + connectSQLite := func(t *testing.T, dbPath string) kvdb.Backend { + const ( + timeout = 10 * time.Second + maxConns = 5 + ) + sqlbase.Init(maxConns) + + cfg := &sqlite.Config{ + Timeout: timeout, + BusyTimeout: timeout, + MaxConnections: maxConns, + } + + kvStore, err := kvdb.Open( + kvdb.SqliteBackendName, ctx, cfg, + dbPath, fileName, + // NOTE: we use the raw string here else we get an + // import cycle if we try to import lncfg.NSChannelDB. + "channeldb", + ) + require.NoError(t, err) + + return kvStore + } + + tests := []struct { + name string + dbPath string + }{ + { + name: "testdata", + dbPath: "testdata", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if postgresKVDSN != "" { + migrate(t, connectPostgresKV(t)) + return + } + + chanDBPath := path.Join(test.dbPath, fileName) + t.Logf("Connecting to channel DB at: %s", chanDBPath) + + connectDB := connectBBolt + if isSqlite { + connectDB = connectSQLite + } + + migrate(t, connectDB(t, test.dbPath)) + }) + } +} diff --git a/payments/db/migration1/testdata/README.md b/payments/db/migration1/testdata/README.md new file mode 100644 index 00000000000..08c5fb06b4a --- /dev/null +++ b/payments/db/migration1/testdata/README.md @@ -0,0 +1,52 @@ +# Payment Migration External Testdata + +This directory holds a real `channel.db` (bbolt) or `channel.sqlite` file for +testing the payments KV to SQL migration locally. You can also point the test +at an existing Postgres-backed kvdb instance. + +## How to use + +1. Copy your `channel.db` or `channel.sqlite` file into this folder. +2. Edit `migration_external_test.go`: + + ```go + // Comment out this line to enable the test + t.Skipf("skipping test meant for local debugging only") + + // Set to your database filename + const fileName = "channel.db" // or "channel.sqlite" + ``` + +3. Run the test: + + ```bash + # For Postgres backend + go test -v -tags="test_db_postgres" -run TestMigrationWithExternalDB + ``` + +## SQLite kvdb source + +To migrate from a `channel.sqlite` file, run with the `kvdb_sqlite` build +tag: + +```bash +go test -v -tags="test_db_sqlite kvdb_sqlite" \ + -run TestMigrationWithExternalDB +``` + +## Postgres kvdb source + +To migrate from an existing Postgres-backed kvdb instance, edit +`postgresKVDSN` in `migration_external_test.go` (set it non-empty), then +run with the `kvdb_postgres` build tag: + +```bash +go test -v -tags="kvdb_postgres test_db_postgres" \ + -run TestMigrationWithExternalDB +``` + +## Notes + +- The external database is opened read-only. +- The test creates a fresh SQL database for each run. +- Do not commit production data; keep the file local. From cd277d668bfe58d8d855787b8ad27dd7c92955b4 Mon Sep 17 00:00:00 2001 From: ziggie Date: Sun, 11 Jan 2026 00:09:42 +0100 Subject: [PATCH 07/10] =?UTF-8?q?payments/migration1:=20wire=20KV=E2=86=92?= =?UTF-8?q?SQL=20migration=20in=20the=20main=20pkg?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Hook the payments KV→SQL migration into the SQL migration config. The migration is still only available when building with the build tag "test_native_sql". Moreover a tombstone protection similar to the invoice migration is added to prevent re-runningi with the KV backend once migration completes. --- config_builder.go | 58 ++++++++++++++++++++++++++++++ payments/db/kv_tombstone.go | 71 +++++++++++++++++++++++++++++++++++++ payments/db/log.go | 2 ++ sqldb/migrations_dev.go | 14 ++++++++ 4 files changed, 145 insertions(+) create mode 100644 payments/db/kv_tombstone.go diff --git a/config_builder.go b/config_builder.go index babe5c20d79..c435e0fb5f3 100644 --- a/config_builder.go +++ b/config_builder.go @@ -51,6 +51,8 @@ import ( "github.com/lightningnetwork/lnd/macaroons" "github.com/lightningnetwork/lnd/msgmux" paymentsdb "github.com/lightningnetwork/lnd/payments/db" + paymentsmig1 "github.com/lightningnetwork/lnd/payments/db/migration1" + paymentsmig1sqlc "github.com/lightningnetwork/lnd/payments/db/migration1/sqlc" "github.com/lightningnetwork/lnd/rpcperms" "github.com/lightningnetwork/lnd/signal" "github.com/lightningnetwork/lnd/sqldb" @@ -76,6 +78,10 @@ const ( // graphMigration is the version number for the graph migration // that migrates the KV graph to the native SQL schema. graphMigration = 10 + + // paymentMigration is the version number for the payments migration + // that migrates KV payments to the native SQL schema. + paymentMigration = 12 ) // GrpcRegistrar is an interface that must be satisfied by an external subserver @@ -1153,6 +1159,31 @@ func (d *DefaultDatabaseBuilder) BuildDatabase( return nil } + paymentMig := func(tx *sqlc.Queries) error { + err := paymentsmig1.MigratePaymentsKVToSQL( + ctx, + dbs.ChanStateDB.Backend, + paymentsmig1sqlc.New(tx.GetTx()), + &paymentsmig1.SQLStoreConfig{ + QueryCfg: queryCfg, + }, + ) + if err != nil { + return fmt.Errorf("failed to migrate "+ + "payments to SQL: %w", err) + } + + // Set the payments bucket tombstone to + // indicate that the migration has been + // completed. + d.logger.Debugf("Setting payments bucket " + + "tombstone") + + return paymentsdb.SetPaymentsBucketTombstone( + dbs.ChanStateDB.Backend, + ) + } + // Make sure we attach the custom migration function to // the correct migration version. for i := 0; i < len(migrations); i++ { @@ -1162,11 +1193,17 @@ func (d *DefaultDatabaseBuilder) BuildDatabase( migrations[i].MigrationFn = invoiceMig continue + case graphMigration: migrations[i].MigrationFn = graphMig continue + case paymentMigration: + migrations[i].MigrationFn = paymentMig + + continue + default: } @@ -1265,6 +1302,27 @@ func (d *DefaultDatabaseBuilder) BuildDatabase( return nil, nil, err } + // Check if the payments bucket tombstone is set. If it is, we + // need to return and ask the user switch back to using the + // native SQL store. + ripPayments, err := paymentsdb.GetPaymentsBucketTombstone( + dbs.ChanStateDB.Backend, + ) + if err != nil { + err = fmt.Errorf("unable to check payments bucket "+ + "tombstone: %w", err) + d.logger.Error(err) + + return nil, nil, err + } + if ripPayments { + err = fmt.Errorf("payments bucket tombstoned, please " + + "switch back to native SQL") + d.logger.Error(err) + + return nil, nil, err + } + dbs.InvoiceDB = dbs.ChanStateDB graphStore, err = graphdb.NewKVStore( diff --git a/payments/db/kv_tombstone.go b/payments/db/kv_tombstone.go new file mode 100644 index 00000000000..7dc13b93b83 --- /dev/null +++ b/payments/db/kv_tombstone.go @@ -0,0 +1,71 @@ +package paymentsdb + +import ( + "fmt" + + "github.com/lightningnetwork/lnd/kvdb" +) + +var ( + // paymentsBucketTombstone is the key used to mark the payments bucket + // as permanently closed after a successful migration. + paymentsBucketTombstone = []byte("payments-tombstone") +) + +// SetPaymentsBucketTombstone sets the tombstone key in the payments bucket to +// mark the bucket as permanently closed. This prevents it from being reopened +// in the future. +func SetPaymentsBucketTombstone(db kvdb.Backend) error { + return kvdb.Update(db, func(tx kvdb.RwTx) error { + // Access the top-level payments bucket. + payments := tx.ReadWriteBucket(paymentsRootBucket) + + // In case the bucket doesn't exist, because we start + // immediately with the native SQL schema, we create it as well + // to make sure the user cannot switch back to the KV store. + if payments == nil { + var err error + payments, err = tx.CreateTopLevelBucket( + paymentsRootBucket, + ) + if err != nil { + return fmt.Errorf("failed to create payments "+ + "bucket: %w", err) + } + } + + // Add the tombstone key to the payments bucket. + err := payments.Put(paymentsBucketTombstone, []byte("1")) + if err != nil { + return fmt.Errorf("failed to set tombstone: %w", err) + } + + return nil + }, func() {}) +} + +// GetPaymentsBucketTombstone checks if the tombstone key exists in the payments +// bucket. It returns true if the tombstone is present and false otherwise. +func GetPaymentsBucketTombstone(db kvdb.Backend) (bool, error) { + var tombstoneExists bool + + err := kvdb.View(db, func(tx kvdb.RTx) error { + // Access the top-level payments bucket. + payments := tx.ReadBucket(paymentsRootBucket) + if payments == nil { + tombstoneExists = false + return nil + } + + // Check if the tombstone key exists. + tombstone := payments.Get(paymentsBucketTombstone) + tombstoneExists = tombstone != nil + + return nil + }, func() {}) + if err != nil { + return false, err + } + + return tombstoneExists, nil +} diff --git a/payments/db/log.go b/payments/db/log.go index 8a77dbcec7f..c8892341da7 100644 --- a/payments/db/log.go +++ b/payments/db/log.go @@ -3,6 +3,7 @@ package paymentsdb import ( "github.com/btcsuite/btclog/v2" "github.com/lightningnetwork/lnd/build" + paymentsmig1 "github.com/lightningnetwork/lnd/payments/db/migration1" ) // log is a logger that is initialized with no output filters. This @@ -29,4 +30,5 @@ func DisableLog() { // using btclog. func UseLogger(logger btclog.Logger) { log = logger + paymentsmig1.UseLogger(logger) } diff --git a/sqldb/migrations_dev.go b/sqldb/migrations_dev.go index 4158cb94903..e130fd52116 100644 --- a/sqldb/migrations_dev.go +++ b/sqldb/migrations_dev.go @@ -8,4 +8,18 @@ var migrationAdditions = []MigrationConfig{ Version: 11, SchemaVersion: 9, }, + { + Name: "000010_payment_duplicates", + Version: 12, + SchemaVersion: 10, + }, + { + Name: "kv_payments_migration", + Version: 13, + SchemaVersion: 10, + // A migration function may be attached to this + // migration to migrate KV payments to the native SQL + // schema. This is optional and can be disabled by the + // user if necessary. + }, } From 727fcb567daa219a31b5635bde69b2e8e2f5c559 Mon Sep 17 00:00:00 2001 From: ziggie Date: Sun, 11 Jan 2026 00:29:56 +0100 Subject: [PATCH 08/10] mod: update new direct dependency via go mod tidy --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 35a47c70253..a9b57fdc483 100644 --- a/go.mod +++ b/go.mod @@ -141,7 +141,7 @@ require ( github.com/opencontainers/runc v1.1.14 // indirect github.com/ory/dockertest/v3 v3.10.0 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pmezard/go-difflib v1.0.0 github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.26.0 // indirect github.com/prometheus/procfs v0.6.0 // indirect From 509c47105b6044cf4756ea8d5def76ea71af3bd5 Mon Sep 17 00:00:00 2001 From: ziggie Date: Sun, 11 Jan 2026 20:15:00 +0100 Subject: [PATCH 09/10] payments: allow skipping migration validation Add a config flag to skip in-migration validation for the KV->SQL payments migration. This is added as an option in case bigger payment databases don't require strict validation but instead prefer speed. This commit wires the option through the config, documents it in the sample config, and disables batch/count validation when requested. --- config_builder.go | 3 + lncfg/db.go | 7 +- payments/db/migration1/sql_migration.go | 86 ++++++++++++++----------- payments/db/migration1/sql_store.go | 4 ++ sample-lnd.conf | 5 ++ 5 files changed, 67 insertions(+), 38 deletions(-) diff --git a/config_builder.go b/config_builder.go index c435e0fb5f3..9d5dbfa3ea8 100644 --- a/config_builder.go +++ b/config_builder.go @@ -1159,6 +1159,7 @@ func (d *DefaultDatabaseBuilder) BuildDatabase( return nil } + //nolint:ll paymentMig := func(tx *sqlc.Queries) error { err := paymentsmig1.MigratePaymentsKVToSQL( ctx, @@ -1166,6 +1167,8 @@ func (d *DefaultDatabaseBuilder) BuildDatabase( paymentsmig1sqlc.New(tx.GetTx()), &paymentsmig1.SQLStoreConfig{ QueryCfg: queryCfg, + SkipMigrationValidation: cfg.DB. + SkipPaymentsMigrationValidation, }, ) if err != nil { diff --git a/lncfg/db.go b/lncfg/db.go index 5bd4d2e19bb..38cd6f6b2c0 100644 --- a/lncfg/db.go +++ b/lncfg/db.go @@ -90,6 +90,8 @@ type DB struct { SkipNativeSQLMigration bool `long:"skip-native-sql-migration" description:"If set to true, the KV to native SQL migration will be skipped. Note that this option is intended for users who experience non-resolvable migration errors. Enabling after there is a non-resolvable migration error that resulted in an incomplete migration will cause that partial migration to be abandoned and ignored and an empty database will be used instead. Since invoices are currently the only native SQL database used, our channels will still work but the invoice history will be forgotten. This option has no effect if native SQL is not in use (db.use-native-sql=false)."` + SkipPaymentsMigrationValidation bool `long:"skip-payments-migration-validation" description:"If set to true, the KV to native SQL payments migration will skip its in-migration validation step. This can be used if validation is too slow for large databases."` + NoGraphCache bool `long:"no-graph-cache" description:"Don't use the in-memory graph cache for path finding. Much slower but uses less RAM. Can only be used with a bolt database backend."` PruneRevocation bool `long:"prune-revocation" description:"Run the optional migration that prunes the revocation logs to save disk space."` @@ -130,8 +132,9 @@ func DefaultDB() *DB { BusyTimeout: defaultSqliteBusyTimeout, QueryConfig: *sqldb.DefaultSQLiteConfig(), }, - UseNativeSQL: false, - SkipNativeSQLMigration: false, + UseNativeSQL: false, + SkipNativeSQLMigration: false, + SkipPaymentsMigrationValidation: false, } } diff --git a/payments/db/migration1/sql_migration.go b/payments/db/migration1/sql_migration.go index 8fad6822039..3f5cff3e8fa 100644 --- a/payments/db/migration1/sql_migration.go +++ b/payments/db/migration1/sql_migration.go @@ -39,12 +39,21 @@ type MigrationStats struct { func MigratePaymentsKVToSQL(ctx context.Context, kvBackend kvdb.Backend, sqlDB SQLQueries, cfg *SQLStoreConfig) error { - if cfg == nil || cfg.QueryCfg == nil { - return fmt.Errorf("missing SQL store config for validation") + if cfg == nil { + return fmt.Errorf("missing SQL store config for migration") } - if cfg.QueryCfg.MaxBatchSize == 0 { - return fmt.Errorf("invalid max batch size for validation") + validateMigration := !cfg.SkipMigrationValidation + if validateMigration { + if cfg.QueryCfg == nil { + return fmt.Errorf("missing SQL store config for " + + "validation") + } + + if cfg.QueryCfg.MaxBatchSize == 0 { + return fmt.Errorf("invalid max batch size for " + + "validation") + } } stats := &MigrationStats{} @@ -178,38 +187,41 @@ func MigratePaymentsKVToSQL(ctx context.Context, kvBackend kvdb.Backend, } } - // Add the payment to the validation batch. - validationBatch = append( - validationBatch, migratedPaymentRef{ - Hash: paymentHash, - PaymentID: paymentID, - }, - ) - if uint32(len(validationBatch)) >= - cfg.QueryCfg.MaxBatchSize { - - err := validateMigratedPaymentBatch( - ctx, kvBackend, sqlDB, - cfg, - validationBatch, + if validateMigration { + // Add the payment to the validation batch. + validationBatch = append( + validationBatch, migratedPaymentRef{ + Hash: paymentHash, + PaymentID: paymentID, + }, ) - if err != nil { - return err - } + if uint32(len(validationBatch)) >= + cfg.QueryCfg.MaxBatchSize { - validatedPayments += int64( - len(validationBatch), - ) + err := validateMigratedPaymentBatch( + ctx, kvBackend, sqlDB, + cfg, + validationBatch, + ) + if err != nil { + return err + } - // Log validation progress periodically. - validationInterval.Do(func() { - log.Infof("Validated %d/%d payments", - validatedPayments, - stats.TotalPayments, + validatedPayments += int64( + len(validationBatch), ) - }) - validationBatch = validationBatch[:0] + // Log validation progress periodically. + validationInterval.Do(func() { + log.Infof("Validated %d/%d "+ + "payments", + validatedPayments, + stats.TotalPayments, + ) + }) + + validationBatch = validationBatch[:0] + } } return nil @@ -221,7 +233,7 @@ func MigratePaymentsKVToSQL(ctx context.Context, kvBackend kvdb.Backend, } // Validate any remaining payments in the batch. - if len(validationBatch) > 0 { + if validateMigration && len(validationBatch) > 0 { if err := validateMigratedPaymentBatch( ctx, kvBackend, sqlDB, cfg, validationBatch, ); err != nil { @@ -234,10 +246,12 @@ func MigratePaymentsKVToSQL(ctx context.Context, kvBackend kvdb.Backend, } // Validate the total number of payments as an additional sanity check. - if err := validatePaymentCounts( - ctx, sqlDB, stats.TotalPayments, - ); err != nil { - return err + if validateMigration { + if err := validatePaymentCounts( + ctx, sqlDB, stats.TotalPayments, + ); err != nil { + return err + } } stats.MigrationDuration = time.Since(startTime) diff --git a/payments/db/migration1/sql_store.go b/payments/db/migration1/sql_store.go index 0725bfe59ba..2cf84a688ed 100644 --- a/payments/db/migration1/sql_store.go +++ b/payments/db/migration1/sql_store.go @@ -125,6 +125,10 @@ var _ DB = (*SQLStore)(nil) type SQLStoreConfig struct { // QueryConfig holds configuration values for SQL queries. QueryCfg *sqldb.QueryConfig + + // SkipMigrationValidation disables in-migration validation checks. + // This is intended for large databases where validation is too slow. + SkipMigrationValidation bool } // NewSQLStore creates a new SQLStore instance given an open diff --git a/sample-lnd.conf b/sample-lnd.conf index 830d42ea151..687177171a6 100644 --- a/sample-lnd.conf +++ b/sample-lnd.conf @@ -1528,6 +1528,11 @@ ; is not in use (db.use-native-sql=false). ; db.skip-native-sql-migration=false +; If set to true, the KV to native SQL payments migration will skip its +; in-migration validation step. This can be used if validation is too slow for +; large databases. +; db.skip-payments-migration-validation=false + [etcd] ; Etcd database host. Supports multiple hosts separated by a comma. From 915f28267ef79433f73355ceb99403db880baaf5 Mon Sep 17 00:00:00 2001 From: ziggie Date: Sun, 11 Jan 2026 00:10:37 +0100 Subject: [PATCH 10/10] docs: add release-notes --- docs/release-notes/release-notes-0.21.0.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/release-notes/release-notes-0.21.0.md b/docs/release-notes/release-notes-0.21.0.md index 90a51b2cd8b..89f316c9fbd 100644 --- a/docs/release-notes/release-notes-0.21.0.md +++ b/docs/release-notes/release-notes-0.21.0.md @@ -157,6 +157,10 @@ db functions Part 2](https://github.com/lightningnetwork/lnd/pull/10308) * [Finalize SQL implementation for payments db](https://github.com/lightningnetwork/lnd/pull/10373) + * [Add the KV-to-SQL payment + migration](https://github.com/lightningnetwork/lnd/pull/10485) with + comprehensive tests and build tag "test_native_sql" gated wiring into the + payment flow. ## Code Health