diff --git a/contribs/gnobro/go.mod b/contribs/gnobro/go.mod index 436e7968553..3765875629a 100644 --- a/contribs/gnobro/go.mod +++ b/contribs/gnobro/go.mod @@ -60,6 +60,7 @@ require ( github.com/emicklei/dot v1.6.2 // indirect github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect github.com/getsentry/sentry-go v0.27.0 // indirect + github.com/go-chi/chi/v5 v5.2.3 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -85,6 +86,7 @@ require ( github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect github.com/muesli/cancelreader v0.2.2 // indirect github.com/muesli/termenv v0.16.0 // indirect + github.com/olahol/melody v1.4.0 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/peterbourgon/ff/v3 v3.4.0 // indirect diff --git a/contribs/gnobro/go.sum b/contribs/gnobro/go.sum index f1a2876ae25..8f4c1be0cf4 100644 --- a/contribs/gnobro/go.sum +++ b/contribs/gnobro/go.sum @@ -140,6 +140,8 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= @@ -239,6 +241,8 @@ github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3 github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/olahol/melody v1.4.0 h1:Pa5SdeZL/zXPi1tJuMAPDbl4n3gQOThSL6G1p4qZ4SI= +github.com/olahol/melody v1.4.0/go.mod h1:GgkTl6Y7yWj/HtfD48Q5vLKPVoZOH+Qqgfa7CvJgJM4= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= diff --git a/contribs/gnodev/accounts.go b/contribs/gnodev/accounts.go index 46cf408b060..3481a775cf6 100644 --- a/contribs/gnodev/accounts.go +++ b/contribs/gnodev/accounts.go @@ -13,7 +13,6 @@ import ( "github.com/gnolang/gno/gno.land/pkg/gnoland" "github.com/gnolang/gno/gno.land/pkg/gnoland/ugnot" "github.com/gnolang/gno/tm2/pkg/amino" - "github.com/gnolang/gno/tm2/pkg/bft/rpc/client" "github.com/gnolang/gno/tm2/pkg/std" ) @@ -107,7 +106,7 @@ func generateBalances(bk *address.Book, cfg *AppConfig) (gnoland.Balances, error return blsFile, nil } -func logAccounts(ctx context.Context, logger *slog.Logger, book *address.Book, _ *dev.Node) error { +func logAccounts(ctx context.Context, logger *slog.Logger, book *address.Book, n *dev.Node) error { var tab strings.Builder tabw := tabwriter.NewWriter(&tab, 0, 0, 2, ' ', tabwriter.TabIndent) @@ -117,7 +116,7 @@ func logAccounts(ctx context.Context, logger *slog.Logger, book *address.Book, _ for _, entry := range entries { address := entry.Address.String() - qres, err := client.NewLocal().ABCIQuery(ctx, "auth/accounts/"+address, []byte{}) + qres, err := n.Client().ABCIQuery(ctx, "auth/accounts/"+address, []byte{}) if err != nil { return fmt.Errorf("unable to query account %q: %w", address, err) } diff --git a/contribs/gnodev/go.mod b/contribs/gnodev/go.mod index c1850d1ff7b..8cce943a905 100644 --- a/contribs/gnodev/go.mod +++ b/contribs/gnodev/go.mod @@ -46,6 +46,7 @@ require ( github.com/dlclark/regexp2 v1.11.4 // indirect github.com/emicklei/dot v1.6.2 // indirect github.com/getsentry/sentry-go v0.27.0 // indirect + github.com/go-chi/chi/v5 v5.2.3 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -64,6 +65,7 @@ require ( github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/olahol/melody v1.4.0 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/peterbourgon/ff/v3 v3.4.0 // indirect github.com/pkg/errors v0.9.1 // indirect diff --git a/contribs/gnodev/go.sum b/contribs/gnodev/go.sum index 7a983abcfab..e556f94bf4a 100644 --- a/contribs/gnodev/go.sum +++ b/contribs/gnodev/go.sum @@ -110,6 +110,8 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= @@ -191,6 +193,8 @@ github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3 github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/olahol/melody v1.4.0 h1:Pa5SdeZL/zXPi1tJuMAPDbl4n3gQOThSL6G1p4qZ4SI= +github.com/olahol/melody v1.4.0/go.mod h1:GgkTl6Y7yWj/HtfD48Q5vLKPVoZOH+Qqgfa7CvJgJM4= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= diff --git a/contribs/gnodev/pkg/dev/node.go b/contribs/gnodev/pkg/dev/node.go index b0dd4a22e98..8ba0622ed9e 100644 --- a/contribs/gnodev/pkg/dev/node.go +++ b/contribs/gnodev/pkg/dev/node.go @@ -149,10 +149,15 @@ func NewDevNode(ctx context.Context, cfg *NodeConfig, pkgpaths ...string) (*Node pkgsModifier[qpath.Path] = qpath } + rpcClient, err := client.NewHTTPClient(cfg.TMConfig.RPC.ListenAddress) + if err != nil { + return nil, fmt.Errorf("unable to initialize RPC client: %w", err) + } + devnode := &Node{ loader: cfg.Loader, config: cfg, - client: client.NewLocal(), + client: rpcClient, emitter: cfg.Emitter, logger: cfg.Logger, startTime: startTime, diff --git a/contribs/gnogenesis/go.mod b/contribs/gnogenesis/go.mod index 35560ca4ac9..38e8b55091f 100644 --- a/contribs/gnogenesis/go.mod +++ b/contribs/gnogenesis/go.mod @@ -29,6 +29,7 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect github.com/emicklei/dot v1.6.2 // indirect github.com/getsentry/sentry-go v0.27.0 // indirect + github.com/go-chi/chi/v5 v5.2.3 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/gofrs/flock v0.12.1 // indirect @@ -45,6 +46,7 @@ require ( github.com/kr/text v0.2.0 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/olahol/melody v1.4.0 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/peterbourgon/ff/v3 v3.4.0 // indirect github.com/pkg/errors v0.9.1 // indirect diff --git a/contribs/gnogenesis/go.sum b/contribs/gnogenesis/go.sum index 4a62c7e0277..eaa3bbc3bd4 100644 --- a/contribs/gnogenesis/go.sum +++ b/contribs/gnogenesis/go.sum @@ -80,6 +80,8 @@ github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwV github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -142,6 +144,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zk github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/olahol/melody v1.4.0 h1:Pa5SdeZL/zXPi1tJuMAPDbl4n3gQOThSL6G1p4qZ4SI= +github.com/olahol/melody v1.4.0/go.mod h1:GgkTl6Y7yWj/HtfD48Q5vLKPVoZOH+Qqgfa7CvJgJM4= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= diff --git a/contribs/gnohealth/go.mod b/contribs/gnohealth/go.mod index 243008a367e..41a3d41f7e0 100644 --- a/contribs/gnohealth/go.mod +++ b/contribs/gnohealth/go.mod @@ -19,6 +19,7 @@ require ( github.com/cosmos/ics23/go v0.11.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect + github.com/go-chi/chi/v5 v5.2.3 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/go-cmp v0.7.0 // indirect @@ -26,6 +27,7 @@ require ( github.com/gorilla/websocket v1.5.3 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/olahol/melody v1.4.0 // indirect github.com/peterbourgon/ff/v3 v3.4.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_golang v1.23.0 // indirect diff --git a/contribs/gnohealth/go.sum b/contribs/gnohealth/go.sum index abe1d921e72..f6a372e931b 100644 --- a/contribs/gnohealth/go.sum +++ b/contribs/gnohealth/go.sum @@ -69,6 +69,8 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -117,6 +119,8 @@ github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QT github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/olahol/melody v1.4.0 h1:Pa5SdeZL/zXPi1tJuMAPDbl4n3gQOThSL6G1p4qZ4SI= +github.com/olahol/melody v1.4.0/go.mod h1:GgkTl6Y7yWj/HtfD48Q5vLKPVoZOH+Qqgfa7CvJgJM4= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= diff --git a/contribs/gnokeykc/go.mod b/contribs/gnokeykc/go.mod index 0d35985058a..a42c220f28c 100644 --- a/contribs/gnokeykc/go.mod +++ b/contribs/gnokeykc/go.mod @@ -27,6 +27,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect github.com/emicklei/dot v1.6.2 // indirect + github.com/go-chi/chi/v5 v5.2.3 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect @@ -39,6 +40,7 @@ require ( github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/olahol/melody v1.4.0 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/peterbourgon/ff/v3 v3.4.0 // indirect github.com/pkg/errors v0.9.1 // indirect diff --git a/contribs/gnokeykc/go.sum b/contribs/gnokeykc/go.sum index cc01ad5cf47..3c63a13778c 100644 --- a/contribs/gnokeykc/go.sum +++ b/contribs/gnokeykc/go.sum @@ -81,6 +81,8 @@ github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwV github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -140,6 +142,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/olahol/melody v1.4.0 h1:Pa5SdeZL/zXPi1tJuMAPDbl4n3gQOThSL6G1p4qZ4SI= +github.com/olahol/melody v1.4.0/go.mod h1:GgkTl6Y7yWj/HtfD48Q5vLKPVoZOH+Qqgfa7CvJgJM4= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= diff --git a/contribs/gnomigrate/go.mod b/contribs/gnomigrate/go.mod index 2a7b6bfc91a..27163d9ec6f 100644 --- a/contribs/gnomigrate/go.mod +++ b/contribs/gnomigrate/go.mod @@ -30,6 +30,7 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect github.com/emicklei/dot v1.6.2 // indirect github.com/getsentry/sentry-go v0.27.0 // indirect + github.com/go-chi/chi/v5 v5.2.3 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/gofrs/flock v0.12.1 // indirect @@ -46,6 +47,7 @@ require ( github.com/kr/text v0.2.0 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/olahol/melody v1.4.0 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/peterbourgon/ff/v3 v3.4.0 // indirect github.com/pkg/errors v0.9.1 // indirect diff --git a/contribs/gnomigrate/go.sum b/contribs/gnomigrate/go.sum index ef729bdf524..6ea28429b23 100644 --- a/contribs/gnomigrate/go.sum +++ b/contribs/gnomigrate/go.sum @@ -81,6 +81,8 @@ github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwV github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -143,6 +145,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zk github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/olahol/melody v1.4.0 h1:Pa5SdeZL/zXPi1tJuMAPDbl4n3gQOThSL6G1p4qZ4SI= +github.com/olahol/melody v1.4.0/go.mod h1:GgkTl6Y7yWj/HtfD48Q5vLKPVoZOH+Qqgfa7CvJgJM4= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= diff --git a/contribs/tx-archive/go.mod b/contribs/tx-archive/go.mod index 7fadbd287f4..9b04ffdbde0 100644 --- a/contribs/tx-archive/go.mod +++ b/contribs/tx-archive/go.mod @@ -33,6 +33,7 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect github.com/emicklei/dot v1.6.2 // indirect github.com/getsentry/sentry-go v0.27.0 // indirect + github.com/go-chi/chi/v5 v5.2.3 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/gofrs/flock v0.12.1 // indirect @@ -49,6 +50,7 @@ require ( github.com/kr/text v0.2.0 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/olahol/melody v1.4.0 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect diff --git a/contribs/tx-archive/go.sum b/contribs/tx-archive/go.sum index 5a19e44339e..4e3c5fb7e92 100644 --- a/contribs/tx-archive/go.sum +++ b/contribs/tx-archive/go.sum @@ -81,6 +81,8 @@ github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwV github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -144,6 +146,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zk github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/olahol/melody v1.4.0 h1:Pa5SdeZL/zXPi1tJuMAPDbl4n3gQOThSL6G1p4qZ4SI= +github.com/olahol/melody v1.4.0/go.mod h1:GgkTl6Y7yWj/HtfD48Q5vLKPVoZOH+Qqgfa7CvJgJM4= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= diff --git a/go.mod b/go.mod index 5037dd1db7b..a58677e6d20 100644 --- a/go.mod +++ b/go.mod @@ -18,12 +18,15 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 github.com/emicklei/dot v1.6.2 github.com/fortytw2/leaktest v1.3.0 + github.com/go-chi/chi/v5 v5.2.3 github.com/gofrs/flock v0.12.1 github.com/golang/mock v1.6.0 github.com/google/gofuzz v1.2.0 + github.com/google/uuid v1.6.0 github.com/gorilla/websocket v1.5.3 github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/libp2p/go-buffer-pool v0.1.0 + github.com/olahol/melody v1.4.0 github.com/pelletier/go-toml v1.9.5 github.com/peterbourgon/ff/v3 v3.4.0 github.com/pmezard/go-difflib v1.0.0 @@ -76,7 +79,6 @@ require ( github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/go-cmp v0.6.0 // indirect - github.com/google/uuid v1.6.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect github.com/klauspost/compress v1.16.0 // indirect github.com/kr/pretty v0.3.1 // indirect diff --git a/go.sum b/go.sum index ab39e34afbb..774d4a0693a 100644 --- a/go.sum +++ b/go.sum @@ -94,6 +94,8 @@ github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwV github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -158,6 +160,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zk github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/olahol/melody v1.4.0 h1:Pa5SdeZL/zXPi1tJuMAPDbl4n3gQOThSL6G1p4qZ4SI= +github.com/olahol/melody v1.4.0/go.mod h1:GgkTl6Y7yWj/HtfD48Q5vLKPVoZOH+Qqgfa7CvJgJM4= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= diff --git a/misc/autocounterd/go.mod b/misc/autocounterd/go.mod index 3b1562b37ab..e9838b1133e 100644 --- a/misc/autocounterd/go.mod +++ b/misc/autocounterd/go.mod @@ -15,6 +15,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect github.com/emicklei/dot v1.6.2 // indirect + github.com/go-chi/chi/v5 v5.2.3 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/gofrs/flock v0.12.1 // indirect @@ -26,6 +27,7 @@ require ( github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/olahol/melody v1.4.0 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/peterbourgon/ff/v3 v3.4.0 // indirect github.com/pkg/errors v0.9.1 // indirect diff --git a/misc/autocounterd/go.sum b/misc/autocounterd/go.sum index c34609d099f..e93fec85f9a 100644 --- a/misc/autocounterd/go.sum +++ b/misc/autocounterd/go.sum @@ -77,6 +77,8 @@ github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwV github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -134,6 +136,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zk github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/olahol/melody v1.4.0 h1:Pa5SdeZL/zXPi1tJuMAPDbl4n3gQOThSL6G1p4qZ4SI= +github.com/olahol/melody v1.4.0/go.mod h1:GgkTl6Y7yWj/HtfD48Q5vLKPVoZOH+Qqgfa7CvJgJM4= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= diff --git a/misc/loop/go.mod b/misc/loop/go.mod index 380edbef266..76a0b9a0495 100644 --- a/misc/loop/go.mod +++ b/misc/loop/go.mod @@ -40,6 +40,7 @@ require ( github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect github.com/getsentry/sentry-go v0.35.0 // indirect + github.com/go-chi/chi/v5 v5.2.3 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/gofrs/flock v0.12.1 // indirect @@ -57,6 +58,7 @@ require ( github.com/moby/term v0.5.0 // indirect github.com/morikuni/aec v1.0.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/olahol/melody v1.4.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0 // indirect github.com/pelletier/go-toml v1.9.5 // indirect diff --git a/misc/loop/go.sum b/misc/loop/go.sum index c16d1a479a7..e3dadedb750 100644 --- a/misc/loop/go.sum +++ b/misc/loop/go.sum @@ -99,6 +99,8 @@ github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/getsentry/sentry-go v0.35.0 h1:+FJNlnjJsZMG3g0/rmmP7GiKjQoUF5EXfEtBwtPtkzY= github.com/getsentry/sentry-go v0.35.0/go.mod h1:C55omcY9ChRQIUcVcGcs+Zdy4ZpQGvNJ7JYHIoSWOtE= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -165,6 +167,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/olahol/melody v1.4.0 h1:Pa5SdeZL/zXPi1tJuMAPDbl4n3gQOThSL6G1p4qZ4SI= +github.com/olahol/melody v1.4.0/go.mod h1:GgkTl6Y7yWj/HtfD48Q5vLKPVoZOH+Qqgfa7CvJgJM4= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= diff --git a/misc/stress-test/stress-test-many-posts/go.mod b/misc/stress-test/stress-test-many-posts/go.mod index dd26f99c422..228c7724c72 100644 --- a/misc/stress-test/stress-test-many-posts/go.mod +++ b/misc/stress-test/stress-test-many-posts/go.mod @@ -23,6 +23,7 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect github.com/emicklei/dot v1.6.2 // indirect github.com/gnolang/gno v0.1.2-0.20240826090356-651f5aac3706 // indirect + github.com/go-chi/chi/v5 v5.2.3 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/gofrs/flock v0.12.1 // indirect @@ -34,6 +35,7 @@ require ( github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/olahol/melody v1.4.0 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect diff --git a/misc/stress-test/stress-test-many-posts/go.sum b/misc/stress-test/stress-test-many-posts/go.sum index 319505bff4a..195f106c296 100644 --- a/misc/stress-test/stress-test-many-posts/go.sum +++ b/misc/stress-test/stress-test-many-posts/go.sum @@ -86,6 +86,8 @@ github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= github.com/gnolang/gnonative/v4 v4.2.2 h1:MxhXQBapoWM42llE5IrU6IM743AKKXQAimFMEdrJIUI= github.com/gnolang/gnonative/v4 v4.2.2/go.mod h1:78NvbayMU0oV1yYLfQEQpVfLlLFWlADIuFhBgYhZPSk= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -146,6 +148,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zk github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/olahol/melody v1.4.0 h1:Pa5SdeZL/zXPi1tJuMAPDbl4n3gQOThSL6G1p4qZ4SI= +github.com/olahol/melody v1.4.0/go.mod h1:GgkTl6Y7yWj/HtfD48Q5vLKPVoZOH+Qqgfa7CvJgJM4= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= diff --git a/tm2/pkg/bft/config/config.go b/tm2/pkg/bft/config/config.go index 29734a69a72..d8b3e6ac7d0 100644 --- a/tm2/pkg/bft/config/config.go +++ b/tm2/pkg/bft/config/config.go @@ -9,11 +9,11 @@ import ( "time" "dario.cat/mergo" + rpc "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/config" abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types" cns "github.com/gnolang/gno/tm2/pkg/bft/consensus/config" mem "github.com/gnolang/gno/tm2/pkg/bft/mempool/config" - rpc "github.com/gnolang/gno/tm2/pkg/bft/rpc/config" eventstore "github.com/gnolang/gno/tm2/pkg/bft/state/eventstore/types" "github.com/gnolang/gno/tm2/pkg/db" "github.com/gnolang/gno/tm2/pkg/errors" @@ -48,7 +48,7 @@ type Config struct { BaseConfig `toml:",squash"` // Options for services - RPC *rpc.RPCConfig `json:"rpc" toml:"rpc" comment:"##### rpc server configuration options #####"` + RPC *rpc.Config `json:"rpc" toml:"rpc" comment:"##### rpc server configuration options #####"` P2P *p2p.P2PConfig `json:"p2p" toml:"p2p" comment:"##### peer to peer configuration options #####"` Mempool *mem.MempoolConfig `json:"mempool" toml:"mempool" comment:"##### mempool configuration options #####"` Consensus *cns.ConsensusConfig `json:"consensus" toml:"consensus" comment:"##### consensus configuration options #####"` @@ -61,7 +61,7 @@ type Config struct { func DefaultConfig() *Config { return &Config{ BaseConfig: DefaultBaseConfig(), - RPC: rpc.DefaultRPCConfig(), + RPC: rpc.DefaultConfig(), P2P: p2p.DefaultP2PConfig(), Mempool: mem.DefaultMempoolConfig(), Consensus: cns.DefaultConsensusConfig(), @@ -178,7 +178,7 @@ func testP2PConfig() *p2p.P2PConfig { func TestConfig() *Config { return &Config{ BaseConfig: testBaseConfig(), - RPC: rpc.TestRPCConfig(), + RPC: rpc.DefaultConfig(), P2P: testP2PConfig(), Mempool: mem.TestMempoolConfig(), Consensus: cns.TestConsensusConfig(), @@ -191,7 +191,6 @@ func TestConfig() *Config { // SetRootDir sets the RootDir for all Config structs func (cfg *Config) SetRootDir(root string) *Config { cfg.BaseConfig.RootDir = root - cfg.RPC.RootDir = root cfg.P2P.RootDir = root cfg.Mempool.RootDir = root cfg.Consensus.RootDir = root @@ -229,9 +228,6 @@ func (cfg *Config) ValidateBasic() error { if err := cfg.BaseConfig.ValidateBasic(); err != nil { return err } - if err := cfg.RPC.ValidateBasic(); err != nil { - return errors.Wrap(err, "Error in [rpc] section") - } if err := cfg.P2P.ValidateBasic(); err != nil { return errors.Wrap(err, "Error in [p2p] section") } diff --git a/tm2/pkg/bft/consensus/wal_generator.go b/tm2/pkg/bft/consensus/wal_generator.go index 79c6e63c6a1..2cd3960e289 100644 --- a/tm2/pkg/bft/consensus/wal_generator.go +++ b/tm2/pkg/bft/consensus/wal_generator.go @@ -18,11 +18,10 @@ func randPort() int { return base + random.RandIntn(spread) } -func makeAddrs() (string, string, string) { +func makeAddrs() (string, string) { start := randPort() - return fmt.Sprintf("tcp://0.0.0.0:%d", start), - fmt.Sprintf("tcp://0.0.0.0:%d", start+1), - fmt.Sprintf("tcp://0.0.0.0:%d", start+2) + return fmt.Sprintf("0.0.0.0:%d", start), + fmt.Sprintf("0.0.0.0:%d", start+1) } // getConfig returns a config and genesis file for test cases @@ -32,10 +31,9 @@ func getConfig(t *testing.T) (*cfg.Config, string) { c, genesisFile := cfg.ResetTestRoot(t.Name()) // and we use random ports to run in parallel - tm, rpc, grpc := makeAddrs() + tm, rpc := makeAddrs() c.P2P.ListenAddress = tm c.RPC.ListenAddress = rpc - c.RPC.GRPCListenAddress = grpc return c, genesisFile } diff --git a/tm2/pkg/bft/node/node.go b/tm2/pkg/bft/node/node.go index 7f1fb8ad132..41064040e49 100644 --- a/tm2/pkg/bft/node/node.go +++ b/tm2/pkg/bft/node/node.go @@ -6,20 +6,20 @@ package node import ( "fmt" "log/slog" - "net" "net/http" "strings" "sync" "time" - "github.com/rs/cors" - "github.com/gnolang/gno/tm2/pkg/bft/appconn" "github.com/gnolang/gno/tm2/pkg/bft/privval" + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server" "github.com/gnolang/gno/tm2/pkg/bft/state/eventstore/file" "github.com/gnolang/gno/tm2/pkg/p2p/conn" "github.com/gnolang/gno/tm2/pkg/p2p/discovery" p2pTypes "github.com/gnolang/gno/tm2/pkg/p2p/types" + "github.com/go-chi/chi/v5" "github.com/gnolang/gno/tm2/pkg/amino" bc "github.com/gnolang/gno/tm2/pkg/bft/blockchain" @@ -29,7 +29,6 @@ import ( "github.com/gnolang/gno/tm2/pkg/bft/proxy" rpccore "github.com/gnolang/gno/tm2/pkg/bft/rpc/core" _ "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" - rpcserver "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server" sm "github.com/gnolang/gno/tm2/pkg/bft/state" "github.com/gnolang/gno/tm2/pkg/bft/state/eventstore" "github.com/gnolang/gno/tm2/pkg/bft/state/eventstore/null" @@ -175,7 +174,7 @@ type Node struct { consensusState *cs.ConsensusState // latest consensus state consensusReactor *cs.ConsensusReactor // for participating in the consensus proxyApp appconn.AppConns // connection to the application - rpcListeners []net.Listener // rpc servers + rpcServer *rpccore.Server // the node's RPC server (TM) txEventStore eventstore.TxEventStore eventStoreService *eventstore.Service firstBlockSignal <-chan struct{} @@ -573,24 +572,31 @@ func (n *Node) OnStart() error { time.Sleep(genTime.Sub(now)) } - // Set up the GLOBAL variables in rpc/core which refer to this node. - // This is done separately from startRPC(), as the values in rpc/core are used, - // for instance, to set up Local clients (rpc/client) which work without - // a network connection. - n.configureRPC() - if n.config.RPC.Unsafe { - rpccore.AddUnsafeRoutes() - } - rpccore.Start() - // Start the RPC server before the P2P server // so we can eg. receive txs for the first block if n.config.RPC.ListenAddress != "" { - listeners, err := n.startRPC() - if err != nil { - return err + // Initialize the JSON-RPC pipeline + rpcServer := server.NewJSONRPC(server.WithLogger(n.Logger.With("json-rpc"))) + + // Setup the handlers with the RPC + rpccore.SetupABCI(rpcServer, n.proxyApp.Query()) + rpccore.SetupBlocks(rpcServer, n.blockStore, n.stateDB) + rpccore.SetupConsensus(rpcServer, n.consensusState, n.stateDB, n.sw) + rpccore.SetupHealth(rpcServer) + rpccore.SetupMempool(rpcServer, n.mempool, n.evsw) + rpccore.SetupNet(rpcServer, n.sw, n, n.genesisDoc) + rpccore.SetupTx(rpcServer, n.blockStore, n.stateDB) + rpccore.SetupStatus(rpcServer, n.buildStatus) + + // Register the mux routes + mux := rpcServer.SetupRoutes(chi.NewMux()) + + // Initialize and start the server + n.rpcServer = rpccore.New(mux, n.config.RPC, n.Logger.With("rpc-server")) + + if err := n.rpcServer.Start(); err != nil { + return fmt.Errorf("unable to start RPC server: %w", err) } - n.rpcListeners = listeners } // Start the transport. @@ -669,12 +675,9 @@ func (n *Node) OnStop() { n.isListening = false - // finally stop the listeners / external services - for _, l := range n.rpcListeners { - n.Logger.Info("Closing rpc listener", "listener", l) - if err := l.Close(); err != nil { - n.Logger.Error("Error closing listener", "listener", l, "err", err) - } + // Stop the RPC server + if err := n.rpcServer.Start(); err != nil { + n.Logger.Error("unable to gracefully stop RPC server", "err", err) } } @@ -683,118 +686,81 @@ func (n *Node) Ready() <-chan struct{} { return n.firstBlockSignal } -// configureRPC sets all variables in rpccore so they will serve -// rpc calls from this node -func (n *Node) configureRPC() { - rpccore.SetStateDB(n.stateDB) - rpccore.SetBlockStore(n.blockStore) - rpccore.SetConsensusState(n.consensusState) - rpccore.SetMempool(n.mempool) - rpccore.SetP2PPeers(n.sw) - rpccore.SetP2PTransport(n) - rpccore.SetPubKey(n.privValidator.PubKey()) - rpccore.SetGenesisDoc(n.genesisDoc) - rpccore.SetProxyAppQuery(n.proxyApp.Query()) - rpccore.SetGetFastSync(n.consensusReactor.FastSync) - rpccore.SetLogger(n.Logger.With("module", "rpc")) - rpccore.SetEventSwitch(n.evsw) - rpccore.SetConfig(*n.config.RPC) -} +// buildStatus builds the node's current status information +func (n *Node) buildStatus() (*ctypes.ResultStatus, error) { + pubKey := n.PrivValidator().PubKey() -func (n *Node) startRPC() (listeners []net.Listener, err error) { - defer func() { - if err != nil { - // Close all the created listeners on any error, instead of - // leaking them: https://github.com/gnolang/gno/issues/3639 - for _, ln := range listeners { - ln.Close() + validatorAtHeight := func(height int64) *types.Validator { + privValAddress := pubKey.Address() + + // If we're still at height h, search in the current validator set. + lastBlockHeight, vals := n.consensusState.GetValidators() + if lastBlockHeight == height { + for _, val := range vals { + if val.Address == privValAddress { + return val + } } } - }() - - listenAddrs := splitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ") - - config := rpcserver.DefaultConfig() - config.MaxBodyBytes = n.config.RPC.MaxBodyBytes - config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes - config.MaxOpenConnections = n.config.RPC.MaxOpenConnections - // If necessary adjust global WriteTimeout to ensure it's greater than - // TimeoutBroadcastTxCommit. - // See https://github.com/gnolang/gno/tm2/pkg/bft/issues/3435 - if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit { - config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second - } - // we may expose the rpc over both a unix and tcp socket - var rebuildAddresses bool - listeners = make([]net.Listener, 0, len(listenAddrs)) - for _, listenAddr := range listenAddrs { - mux := http.NewServeMux() - rpcLogger := n.Logger.With("module", "rpc-server") - wmLogger := rpcLogger.With("protocol", "websocket") - wm := rpcserver.NewWebsocketManager(rpccore.Routes, - rpcserver.OnDisconnect(func(remoteAddr string) { - // any cleanup... - // (we used to unsubscribe from all event subscriptions) - }), - rpcserver.ReadLimit(config.MaxBodyBytes), - ) - wm.SetLogger(wmLogger) - mux.HandleFunc("/websocket", wm.WebsocketHandler) - rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, rpcLogger) - if strings.HasPrefix(listenAddr, "tcp://") && strings.HasSuffix(listenAddr, ":0") { - rebuildAddresses = true - } - listener, err := rpcserver.Listen( - listenAddr, - config, - ) - if err != nil { - return nil, err + // If we've moved to the next height, retrieve the validator set from DB. + if lastBlockHeight > height { + vals, err := sm.LoadValidators(n.stateDB, height) + if err != nil { + return nil // should not happen + } + _, val := vals.GetByAddress(privValAddress) + return val } - var rootHandler http.Handler = mux - if n.config.RPC.IsCorsEnabled() { - corsMiddleware := cors.New(cors.Options{ - AllowedOrigins: n.config.RPC.CORSAllowedOrigins, - AllowedMethods: n.config.RPC.CORSAllowedMethods, - AllowedHeaders: n.config.RPC.CORSAllowedHeaders, - }) - rootHandler = corsMiddleware.Handler(mux) - } - if n.config.RPC.IsTLSEnabled() { - go rpcserver.StartHTTPAndTLSServer( - listener, - rootHandler, - n.config.RPC.CertFile(), - n.config.RPC.KeyFile(), - rpcLogger, - config, - ) - } else { - go rpcserver.StartHTTPServer( - listener, - rootHandler, - rpcLogger, - config, - ) - } + return nil + } + + var latestHeight int64 - listeners = append(listeners, listener) + if n.consensusReactor.FastSync() { + latestHeight = n.blockStore.Height() + } else { + latestHeight = n.consensusState.GetLastHeight() } - if rebuildAddresses { - n.config.RPC.ListenAddress = joinListenerAddresses(listeners) + + var ( + latestBlockMeta *types.BlockMeta + latestBlockHash []byte + latestAppHash []byte + latestBlockTimeNano int64 + ) + if latestHeight != 0 { + latestBlockMeta = n.blockStore.LoadBlockMeta(latestHeight) + latestBlockHash = latestBlockMeta.BlockID.Hash + latestAppHash = latestBlockMeta.Header.AppHash + latestBlockTimeNano = latestBlockMeta.Header.Time.UnixNano() } - return listeners, nil -} + latestBlockTime := time.Unix(0, latestBlockTimeNano) -func joinListenerAddresses(ll []net.Listener) string { - sl := make([]string, len(ll)) - for i, l := range ll { - sl[i] = l.Addr().Network() + "://" + l.Addr().String() + var votingPower int64 + if val := validatorAtHeight(latestHeight); val != nil { + votingPower = val.VotingPower } - return strings.Join(sl, ",") + + result := &ctypes.ResultStatus{ + NodeInfo: n.NodeInfo(), + SyncInfo: ctypes.SyncInfo{ + LatestBlockHash: latestBlockHash, + LatestAppHash: latestAppHash, + LatestBlockHeight: latestHeight, + LatestBlockTime: latestBlockTime, + CatchingUp: n.consensusReactor.FastSync(), + }, + ValidatorInfo: ctypes.ValidatorInfo{ + Address: pubKey.Address(), + PubKey: pubKey, + VotingPower: votingPower, + }, + } + + return result, nil } // Switch returns the Node's Switch. diff --git a/tm2/pkg/bft/rpc/client/batch.go b/tm2/pkg/bft/rpc/client/batch.go index eddcb90d4fb..b8af3fbaa40 100644 --- a/tm2/pkg/bft/rpc/client/batch.go +++ b/tm2/pkg/bft/rpc/client/batch.go @@ -9,7 +9,7 @@ import ( "github.com/gnolang/gno/tm2/pkg/amino" ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" rpcclient "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/client" - rpctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" "github.com/gnolang/gno/tm2/pkg/bft/types" ) @@ -96,7 +96,7 @@ func (b *RPCBatch) Send(ctx context.Context) ([]any, error) { return results, errors.Join(errs...) } -func (b *RPCBatch) addRequest(request rpctypes.RPCRequest, result any) { +func (b *RPCBatch) addRequest(request *spec.BaseJSONRequest, result any) { b.mux.Lock() defer b.mux.Unlock() @@ -107,319 +107,252 @@ func (b *RPCBatch) addRequest(request rpctypes.RPCRequest, result any) { b.batch.AddRequest(request) } -func (b *RPCBatch) Status() error { +func (b *RPCBatch) Status() { // Prepare the RPC request - request, err := newRequest( + request := newRequest( statusMethod, - map[string]any{}, + nil, ) - if err != nil { - return fmt.Errorf("unable to create request, %w", err) - } b.addRequest(request, &ctypes.ResultStatus{}) - - return nil } -func (b *RPCBatch) ABCIInfo() error { +func (b *RPCBatch) ABCIInfo() { // Prepare the RPC request - request, err := newRequest( + request := newRequest( abciInfoMethod, - map[string]any{}, + nil, ) - if err != nil { - return fmt.Errorf("unable to create request, %w", err) - } b.addRequest(request, &ctypes.ResultABCIInfo{}) - - return nil } -func (b *RPCBatch) ABCIQuery(path string, data []byte) error { - return b.ABCIQueryWithOptions(path, data, DefaultABCIQueryOptions) +func (b *RPCBatch) ABCIQuery(path string, data []byte) { + b.ABCIQueryWithOptions(path, data, DefaultABCIQueryOptions) } -func (b *RPCBatch) ABCIQueryWithOptions(path string, data []byte, opts ABCIQueryOptions) error { +func (b *RPCBatch) ABCIQueryWithOptions(path string, data []byte, opts ABCIQueryOptions) { // Prepare the RPC request - request, err := newRequest( + request := newRequest( abciQueryMethod, - map[string]any{ - "path": path, - "data": data, - "height": opts.Height, - "prove": opts.Prove, + []any{ + path, + data, + opts.Height, + opts.Prove, }, ) - if err != nil { - return fmt.Errorf("unable to create request, %w", err) - } b.addRequest(request, &ctypes.ResultABCIQuery{}) - - return nil } -func (b *RPCBatch) BroadcastTxCommit(tx types.Tx) error { +func (b *RPCBatch) BroadcastTxCommit(tx types.Tx) { // Prepare the RPC request - request, err := newRequest( + request := newRequest( broadcastTxCommitMethod, - map[string]any{"tx": tx}, + []any{ + tx, + }, ) - if err != nil { - return fmt.Errorf("unable to create request, %w", err) - } b.addRequest(request, &ctypes.ResultBroadcastTxCommit{}) - - return nil } -func (b *RPCBatch) BroadcastTxAsync(tx types.Tx) error { - return b.broadcastTX(broadcastTxAsyncMethod, tx) +func (b *RPCBatch) BroadcastTxAsync(tx types.Tx) { + b.broadcastTX(broadcastTxAsyncMethod, tx) } -func (b *RPCBatch) BroadcastTxSync(tx types.Tx) error { - return b.broadcastTX(broadcastTxSyncMethod, tx) +func (b *RPCBatch) BroadcastTxSync(tx types.Tx) { + b.broadcastTX(broadcastTxSyncMethod, tx) } -func (b *RPCBatch) broadcastTX(route string, tx types.Tx) error { +func (b *RPCBatch) broadcastTX(route string, tx types.Tx) { // Prepare the RPC request - request, err := newRequest( + request := newRequest( route, - map[string]any{"tx": tx}, + []any{ + tx, + }, ) - if err != nil { - return fmt.Errorf("unable to create request, %w", err) - } b.addRequest(request, &ctypes.ResultBroadcastTx{}) - - return nil } -func (b *RPCBatch) UnconfirmedTxs(limit int) error { +func (b *RPCBatch) UnconfirmedTxs(limit int) { // Prepare the RPC request - request, err := newRequest( + request := newRequest( unconfirmedTxsMethod, - map[string]any{"limit": limit}, + []any{ + limit, + }, ) - if err != nil { - return fmt.Errorf("unable to create request, %w", err) - } b.addRequest(request, &ctypes.ResultUnconfirmedTxs{}) - - return nil } -func (b *RPCBatch) NumUnconfirmedTxs() error { +func (b *RPCBatch) NumUnconfirmedTxs() { // Prepare the RPC request - request, err := newRequest( + request := newRequest( numUnconfirmedTxsMethod, - map[string]any{}, + nil, ) - if err != nil { - return fmt.Errorf("unable to create request, %w", err) - } b.addRequest(request, &ctypes.ResultUnconfirmedTxs{}) - - return nil } -func (b *RPCBatch) NetInfo() error { +func (b *RPCBatch) NetInfo() { // Prepare the RPC request - request, err := newRequest( + request := newRequest( netInfoMethod, - map[string]any{}, + nil, ) - if err != nil { - return fmt.Errorf("unable to create request, %w", err) - } b.addRequest(request, &ctypes.ResultNetInfo{}) - - return nil } -func (b *RPCBatch) DumpConsensusState() error { +func (b *RPCBatch) DumpConsensusState() { // Prepare the RPC request - request, err := newRequest( + request := newRequest( dumpConsensusStateMethod, - map[string]any{}, + nil, ) - if err != nil { - return fmt.Errorf("unable to create request, %w", err) - } b.addRequest(request, &ctypes.ResultDumpConsensusState{}) - - return nil } -func (b *RPCBatch) ConsensusState() error { +func (b *RPCBatch) ConsensusState() { // Prepare the RPC request - request, err := newRequest( + request := newRequest( consensusStateMethod, - map[string]any{}, + nil, ) - if err != nil { - return fmt.Errorf("unable to create request, %w", err) - } b.addRequest(request, &ctypes.ResultConsensusState{}) - - return nil } -func (b *RPCBatch) ConsensusParams(height *int64) error { - params := map[string]any{} +func (b *RPCBatch) ConsensusParams(height *int64) { + var v int64 if height != nil { - params["height"] = height + v = *height } // Prepare the RPC request - request, err := newRequest( + request := newRequest( consensusParamsMethod, - params, + []any{ + v, + }, ) - if err != nil { - return fmt.Errorf("unable to create request, %w", err) - } b.addRequest(request, &ctypes.ResultConsensusParams{}) - - return nil } -func (b *RPCBatch) Health() error { +func (b *RPCBatch) Health() { // Prepare the RPC request - request, err := newRequest( + request := newRequest( healthMethod, - map[string]any{}, + nil, ) - if err != nil { - return fmt.Errorf("unable to create request, %w", err) - } b.addRequest(request, &ctypes.ResultHealth{}) - - return nil } -func (b *RPCBatch) BlockchainInfo(minHeight, maxHeight int64) error { +func (b *RPCBatch) BlockchainInfo(minHeight, maxHeight int64) { // Prepare the RPC request - request, err := newRequest( + request := newRequest( blockchainMethod, - map[string]any{ - "minHeight": minHeight, - "maxHeight": maxHeight, + []any{ + minHeight, + maxHeight, }, ) - if err != nil { - return fmt.Errorf("unable to create request, %w", err) - } b.addRequest(request, &ctypes.ResultBlockchainInfo{}) - - return nil } -func (b *RPCBatch) Genesis() error { +func (b *RPCBatch) Genesis() { // Prepare the RPC request - request, err := newRequest(genesisMethod, map[string]any{}) - if err != nil { - return fmt.Errorf("unable to create request, %w", err) - } + request := newRequest(genesisMethod, nil) b.addRequest(request, &ctypes.ResultGenesis{}) - - return nil } -func (b *RPCBatch) Block(height *int64) error { - params := map[string]any{} +func (b *RPCBatch) Block(height *int64) { + var v int64 if height != nil { - params["height"] = height + v = *height } // Prepare the RPC request - request, err := newRequest(blockMethod, params) - if err != nil { - return fmt.Errorf("unable to create request, %w", err) - } + request := newRequest( + blockMethod, + []any{ + v, + }, + ) b.addRequest(request, &ctypes.ResultBlock{}) - - return nil } -func (b *RPCBatch) BlockResults(height *int64) error { - params := map[string]any{} +func (b *RPCBatch) BlockResults(height *int64) { + var v int64 if height != nil { - params["height"] = height + v = *height } // Prepare the RPC request - request, err := newRequest(blockResultsMethod, params) - if err != nil { - return fmt.Errorf("unable to create request, %w", err) - } + request := newRequest( + blockResultsMethod, + []any{ + v, + }, + ) b.addRequest(request, &ctypes.ResultBlockResults{}) - - return nil } -func (b *RPCBatch) Commit(height *int64) error { - params := map[string]any{} +func (b *RPCBatch) Commit(height *int64) { + var v int64 if height != nil { - params["height"] = height + v = *height } // Prepare the RPC request - request, err := newRequest(commitMethod, params) - if err != nil { - return fmt.Errorf("unable to create request, %w", err) - } + request := newRequest( + commitMethod, + []any{ + v, + }, + ) b.addRequest(request, &ctypes.ResultCommit{}) - - return nil } -func (b *RPCBatch) Tx(hash []byte) error { +func (b *RPCBatch) Tx(hash []byte) { // Prepare the RPC request - request, err := newRequest( + request := newRequest( txMethod, - map[string]any{ - "hash": hash, + []any{ + hash, }, ) - if err != nil { - return fmt.Errorf("unable to create request, %w", err) - } b.addRequest(request, &ctypes.ResultTx{}) - - return nil } -func (b *RPCBatch) Validators(height *int64) error { - params := map[string]any{} +func (b *RPCBatch) Validators(height *int64) { + var v int64 if height != nil { - params["height"] = height + v = *height } // Prepare the RPC request - request, err := newRequest(validatorsMethod, params) - if err != nil { - return fmt.Errorf("unable to create request, %w", err) - } + request := newRequest( + validatorsMethod, + []any{ + v, + }, + ) b.addRequest(request, &ctypes.ResultValidators{}) - - return nil } diff --git a/tm2/pkg/bft/rpc/client/batch_test.go b/tm2/pkg/bft/rpc/client/batch_test.go index bbd034e7e59..5f0745ac3b3 100644 --- a/tm2/pkg/bft/rpc/client/batch_test.go +++ b/tm2/pkg/bft/rpc/client/batch_test.go @@ -8,7 +8,7 @@ import ( abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types" cstypes "github.com/gnolang/gno/tm2/pkg/bft/consensus/types" ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" - types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" bfttypes "github.com/gnolang/gno/tm2/pkg/bft/types" p2pTypes "github.com/gnolang/gno/tm2/pkg/p2p/types" "github.com/stretchr/testify/assert" @@ -26,10 +26,10 @@ func generateMockBatchClient( t.Helper() return &mockClient{ - sendBatchFn: func(_ context.Context, requests types.RPCRequests) (types.RPCResponses, error) { + sendBatchFn: func(_ context.Context, requests spec.BaseJSONRequests) (spec.BaseJSONResponses, error) { require.Len(t, requests, expectedRequests) - responses := make(types.RPCResponses, len(requests)) + responses := make(spec.BaseJSONResponses, len(requests)) for index, request := range requests { require.Equal(t, "2.0", request.JSONRPC) @@ -39,11 +39,13 @@ func generateMockBatchClient( result, err := amino.MarshalJSON(commonResult) require.NoError(t, err) - response := types.RPCResponse{ - JSONRPC: "2.0", - ID: request.ID, - Result: result, - Error: nil, + response := &spec.BaseJSONResponse{ + Result: result, + Error: nil, + BaseJSON: spec.BaseJSON{ + JSONRPC: spec.JSONRPCVersion, + ID: request.ID, + }, } responses[index] = response @@ -66,7 +68,7 @@ func TestRPCBatch_Count(t *testing.T) { assert.Equal(t, 0, batch.Count()) // Add a dummy request - require.NoError(t, batch.Status()) + batch.Status() // Make sure the request is enqueued assert.Equal(t, 1, batch.Count()) @@ -81,7 +83,7 @@ func TestRPCBatch_Clear(t *testing.T) { ) // Add a dummy request - require.NoError(t, batch.Status()) + batch.Status() // Make sure the request is enqueued assert.Equal(t, 1, batch.Count()) @@ -129,7 +131,7 @@ func TestRPCBatch_Send(t *testing.T) { // Enqueue the requests for range numRequests { - require.NoError(t, batch.Status()) + batch.Status() } // Send the batch @@ -165,7 +167,7 @@ func TestRPCBatch_Endpoints(t *testing.T) { }, }, func(batch *RPCBatch) { - require.NoError(t, batch.Status()) + batch.Status() }, func(result any) any { castResult, ok := result.(*ctypes.ResultStatus) @@ -182,7 +184,7 @@ func TestRPCBatch_Endpoints(t *testing.T) { }, }, func(batch *RPCBatch) { - require.NoError(t, batch.ABCIInfo()) + batch.ABCIInfo() }, func(result any) any { castResult, ok := result.(*ctypes.ResultABCIInfo) @@ -199,7 +201,7 @@ func TestRPCBatch_Endpoints(t *testing.T) { }, }, func(batch *RPCBatch) { - require.NoError(t, batch.ABCIQuery("path", []byte("dummy"))) + batch.ABCIQuery("path", []byte("dummy")) }, func(result any) any { castResult, ok := result.(*ctypes.ResultABCIQuery) @@ -214,7 +216,7 @@ func TestRPCBatch_Endpoints(t *testing.T) { Hash: []byte("dummy"), }, func(batch *RPCBatch) { - require.NoError(t, batch.BroadcastTxCommit([]byte("dummy"))) + batch.BroadcastTxCommit([]byte("dummy")) }, func(result any) any { castResult, ok := result.(*ctypes.ResultBroadcastTxCommit) @@ -229,7 +231,7 @@ func TestRPCBatch_Endpoints(t *testing.T) { Hash: []byte("dummy"), }, func(batch *RPCBatch) { - require.NoError(t, batch.BroadcastTxAsync([]byte("dummy"))) + batch.BroadcastTxAsync([]byte("dummy")) }, func(result any) any { castResult, ok := result.(*ctypes.ResultBroadcastTx) @@ -244,7 +246,7 @@ func TestRPCBatch_Endpoints(t *testing.T) { Hash: []byte("dummy"), }, func(batch *RPCBatch) { - require.NoError(t, batch.BroadcastTxSync([]byte("dummy"))) + batch.BroadcastTxSync([]byte("dummy")) }, func(result any) any { castResult, ok := result.(*ctypes.ResultBroadcastTx) @@ -259,7 +261,7 @@ func TestRPCBatch_Endpoints(t *testing.T) { Count: 10, }, func(batch *RPCBatch) { - require.NoError(t, batch.UnconfirmedTxs(0)) + batch.UnconfirmedTxs(0) }, func(result any) any { castResult, ok := result.(*ctypes.ResultUnconfirmedTxs) @@ -274,7 +276,7 @@ func TestRPCBatch_Endpoints(t *testing.T) { Count: 10, }, func(batch *RPCBatch) { - require.NoError(t, batch.NumUnconfirmedTxs()) + batch.NumUnconfirmedTxs() }, func(result any) any { castResult, ok := result.(*ctypes.ResultUnconfirmedTxs) @@ -289,7 +291,7 @@ func TestRPCBatch_Endpoints(t *testing.T) { NPeers: 10, }, func(batch *RPCBatch) { - require.NoError(t, batch.NetInfo()) + batch.NetInfo() }, func(result any) any { castResult, ok := result.(*ctypes.ResultNetInfo) @@ -306,7 +308,7 @@ func TestRPCBatch_Endpoints(t *testing.T) { }, }, func(batch *RPCBatch) { - require.NoError(t, batch.DumpConsensusState()) + batch.DumpConsensusState() }, func(result any) any { castResult, ok := result.(*ctypes.ResultDumpConsensusState) @@ -323,7 +325,7 @@ func TestRPCBatch_Endpoints(t *testing.T) { }, }, func(batch *RPCBatch) { - require.NoError(t, batch.ConsensusState()) + batch.ConsensusState() }, func(result any) any { castResult, ok := result.(*ctypes.ResultConsensusState) @@ -338,7 +340,7 @@ func TestRPCBatch_Endpoints(t *testing.T) { BlockHeight: 10, }, func(batch *RPCBatch) { - require.NoError(t, batch.ConsensusParams(nil)) + batch.ConsensusParams(nil) }, func(result any) any { castResult, ok := result.(*ctypes.ResultConsensusParams) @@ -351,7 +353,7 @@ func TestRPCBatch_Endpoints(t *testing.T) { healthMethod, &ctypes.ResultHealth{}, func(batch *RPCBatch) { - require.NoError(t, batch.Health()) + batch.Health() }, func(result any) any { castResult, ok := result.(*ctypes.ResultHealth) @@ -366,7 +368,7 @@ func TestRPCBatch_Endpoints(t *testing.T) { LastHeight: 100, }, func(batch *RPCBatch) { - require.NoError(t, batch.BlockchainInfo(0, 0)) + batch.BlockchainInfo(0, 0) }, func(result any) any { castResult, ok := result.(*ctypes.ResultBlockchainInfo) @@ -383,7 +385,7 @@ func TestRPCBatch_Endpoints(t *testing.T) { }, }, func(batch *RPCBatch) { - require.NoError(t, batch.Genesis()) + batch.Genesis() }, func(result any) any { castResult, ok := result.(*ctypes.ResultGenesis) @@ -402,7 +404,7 @@ func TestRPCBatch_Endpoints(t *testing.T) { }, }, func(batch *RPCBatch) { - require.NoError(t, batch.Block(nil)) + batch.Block(nil) }, func(result any) any { castResult, ok := result.(*ctypes.ResultBlock) @@ -417,7 +419,7 @@ func TestRPCBatch_Endpoints(t *testing.T) { Height: 10, }, func(batch *RPCBatch) { - require.NoError(t, batch.BlockResults(nil)) + batch.BlockResults(nil) }, func(result any) any { castResult, ok := result.(*ctypes.ResultBlockResults) @@ -432,7 +434,7 @@ func TestRPCBatch_Endpoints(t *testing.T) { CanonicalCommit: true, }, func(batch *RPCBatch) { - require.NoError(t, batch.Commit(nil)) + batch.Commit(nil) }, func(result any) any { castResult, ok := result.(*ctypes.ResultCommit) @@ -448,7 +450,7 @@ func TestRPCBatch_Endpoints(t *testing.T) { Height: 10, }, func(batch *RPCBatch) { - require.NoError(t, batch.Tx([]byte("tx hash"))) + batch.Tx([]byte("tx hash")) }, func(result any) any { castResult, ok := result.(*ctypes.ResultTx) @@ -463,7 +465,7 @@ func TestRPCBatch_Endpoints(t *testing.T) { BlockHeight: 10, }, func(batch *RPCBatch) { - require.NoError(t, batch.Validators(nil)) + batch.Validators(nil) }, func(result any) any { castResult, ok := result.(*ctypes.ResultValidators) diff --git a/tm2/pkg/bft/rpc/client/client.go b/tm2/pkg/bft/rpc/client/client.go index ba787086b9d..eea6f086a72 100644 --- a/tm2/pkg/bft/rpc/client/client.go +++ b/tm2/pkg/bft/rpc/client/client.go @@ -11,7 +11,7 @@ import ( "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/client/batch" "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/client/http" "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/client/ws" - rpctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" "github.com/gnolang/gno/tm2/pkg/bft/types" "github.com/rs/xid" ) @@ -108,13 +108,18 @@ func (c *RPCClient) NewBatch() *RPCBatch { } func (c *RPCClient) Status(ctx context.Context, heightGte *int64) (*ctypes.ResultStatus, error) { + var v int64 + if heightGte != nil { + v = *heightGte + } + return sendRequestCommon[ctypes.ResultStatus]( ctx, c.requestTimeout, c.caller, statusMethod, - map[string]any{ - "heightGte": heightGte, + []any{ + v, }, ) } @@ -125,7 +130,7 @@ func (c *RPCClient) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error c.requestTimeout, c.caller, abciInfoMethod, - map[string]any{}, + nil, ) } @@ -139,11 +144,11 @@ func (c *RPCClient) ABCIQueryWithOptions(ctx context.Context, path string, data c.requestTimeout, c.caller, abciQueryMethod, - map[string]any{ - "path": path, - "data": data, - "height": opts.Height, - "prove": opts.Prove, + []any{ + path, + data, + opts.Height, + opts.Prove, }, ) } @@ -154,7 +159,9 @@ func (c *RPCClient) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes c.requestTimeout, c.caller, broadcastTxCommitMethod, - map[string]any{"tx": tx}, + []any{ + tx, + }, ) } @@ -172,7 +179,9 @@ func (c *RPCClient) broadcastTX(ctx context.Context, route string, tx types.Tx) c.requestTimeout, c.caller, route, - map[string]any{"tx": tx}, + []any{ + tx, + }, ) } @@ -182,7 +191,9 @@ func (c *RPCClient) UnconfirmedTxs(ctx context.Context, limit int) (*ctypes.Resu c.requestTimeout, c.caller, unconfirmedTxsMethod, - map[string]any{"limit": limit}, + []any{ + limit, + }, ) } @@ -192,7 +203,7 @@ func (c *RPCClient) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconf c.requestTimeout, c.caller, numUnconfirmedTxsMethod, - map[string]any{}, + nil, ) } @@ -202,7 +213,7 @@ func (c *RPCClient) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) c.requestTimeout, c.caller, netInfoMethod, - map[string]any{}, + nil, ) } @@ -212,7 +223,7 @@ func (c *RPCClient) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpC c.requestTimeout, c.caller, dumpConsensusStateMethod, - map[string]any{}, + nil, ) } @@ -222,14 +233,14 @@ func (c *RPCClient) ConsensusState(ctx context.Context) (*ctypes.ResultConsensus c.requestTimeout, c.caller, consensusStateMethod, - map[string]any{}, + nil, ) } func (c *RPCClient) ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) { - params := map[string]any{} + var v int64 if height != nil { - params["height"] = height + v = *height } return sendRequestCommon[ctypes.ResultConsensusParams]( @@ -237,7 +248,9 @@ func (c *RPCClient) ConsensusParams(ctx context.Context, height *int64) (*ctypes c.requestTimeout, c.caller, consensusParamsMethod, - params, + []any{ + v, + }, ) } @@ -247,7 +260,7 @@ func (c *RPCClient) Health(ctx context.Context) (*ctypes.ResultHealth, error) { c.requestTimeout, c.caller, healthMethod, - map[string]any{}, + nil, ) } @@ -257,9 +270,9 @@ func (c *RPCClient) BlockchainInfo(ctx context.Context, minHeight, maxHeight int c.requestTimeout, c.caller, blockchainMethod, - map[string]any{ - "minHeight": minHeight, - "maxHeight": maxHeight, + []any{ + minHeight, + maxHeight, }, ) } @@ -270,14 +283,14 @@ func (c *RPCClient) Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) c.requestTimeout, c.caller, genesisMethod, - map[string]any{}, + nil, ) } func (c *RPCClient) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) { - params := map[string]any{} + var v int64 if height != nil { - params["height"] = height + v = *height } return sendRequestCommon[ctypes.ResultBlock]( @@ -285,14 +298,16 @@ func (c *RPCClient) Block(ctx context.Context, height *int64) (*ctypes.ResultBlo c.requestTimeout, c.caller, blockMethod, - params, + []any{ + v, + }, ) } func (c *RPCClient) BlockResults(ctx context.Context, height *int64) (*ctypes.ResultBlockResults, error) { - params := map[string]any{} + var v int64 if height != nil { - params["height"] = height + v = *height } return sendRequestCommon[ctypes.ResultBlockResults]( @@ -300,14 +315,16 @@ func (c *RPCClient) BlockResults(ctx context.Context, height *int64) (*ctypes.Re c.requestTimeout, c.caller, blockResultsMethod, - params, + []any{ + v, + }, ) } func (c *RPCClient) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) { - params := map[string]any{} + var v int64 if height != nil { - params["height"] = height + v = *height } return sendRequestCommon[ctypes.ResultCommit]( @@ -315,7 +332,9 @@ func (c *RPCClient) Commit(ctx context.Context, height *int64) (*ctypes.ResultCo c.requestTimeout, c.caller, commitMethod, - params, + []any{ + v, + }, ) } @@ -325,16 +344,16 @@ func (c *RPCClient) Tx(ctx context.Context, hash []byte) (*ctypes.ResultTx, erro c.requestTimeout, c.caller, txMethod, - map[string]any{ - "hash": hash, + []any{ + hash, }, ) } func (c *RPCClient) Validators(ctx context.Context, height *int64) (*ctypes.ResultValidators, error) { - params := map[string]any{} + var v int64 if height != nil { - params["height"] = height + v = *height } return sendRequestCommon[ctypes.ResultValidators]( @@ -342,16 +361,20 @@ func (c *RPCClient) Validators(ctx context.Context, height *int64) (*ctypes.Resu c.requestTimeout, c.caller, validatorsMethod, - params, + []any{ + v, + }, ) } // newRequest creates a new request based on the method // and given params -func newRequest(method string, params map[string]any) (rpctypes.RPCRequest, error) { - id := rpctypes.JSONRPCStringID(xid.New().String()) - - return rpctypes.MapToRequest(id, method, params) +func newRequest(method string, params []any) *spec.BaseJSONRequest { + return spec.NewJSONRequest( + spec.JSONRPCStringID(xid.New().String()), + method, + params, + ) } // sendRequestCommon is the common request creation, sending, and parsing middleware @@ -360,13 +383,10 @@ func sendRequestCommon[T any]( timeout time.Duration, caller rpcclient.Client, method string, - params map[string]any, + params []any, ) (*T, error) { // Prepare the RPC request - request, err := newRequest(method, params) - if err != nil { - return nil, err - } + request := newRequest(method, params) ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() diff --git a/tm2/pkg/bft/rpc/client/client_test.go b/tm2/pkg/bft/rpc/client/client_test.go index 63ede3e50f1..de0609fafb9 100644 --- a/tm2/pkg/bft/rpc/client/client_test.go +++ b/tm2/pkg/bft/rpc/client/client_test.go @@ -2,9 +2,6 @@ package client import ( "context" - "encoding/base64" - "encoding/json" - "fmt" "testing" "time" @@ -12,7 +9,7 @@ import ( abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types" cstypes "github.com/gnolang/gno/tm2/pkg/bft/consensus/types" ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" - types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" bfttypes "github.com/gnolang/gno/tm2/pkg/bft/types" p2pTypes "github.com/gnolang/gno/tm2/pkg/p2p/types" "github.com/stretchr/testify/assert" @@ -23,7 +20,7 @@ import ( func generateMockRequestClient( t *testing.T, method string, - verifyParamsFn func(*testing.T, map[string]any), + verifyParamsFn func(*testing.T, []any), responseData any, ) *mockClient { t.Helper() @@ -31,29 +28,28 @@ func generateMockRequestClient( return &mockClient{ sendRequestFn: func( _ context.Context, - request types.RPCRequest, - ) (*types.RPCResponse, error) { + request *spec.BaseJSONRequest, + ) (*spec.BaseJSONResponse, error) { // Validate the request require.Equal(t, "2.0", request.JSONRPC) require.NotNil(t, request.ID) require.Equal(t, request.Method, method) // Validate the params - var params map[string]any - require.NoError(t, json.Unmarshal(request.Params, ¶ms)) - - verifyParamsFn(t, params) + verifyParamsFn(t, request.Params) // Prepare the result result, err := amino.MarshalJSON(responseData) require.NoError(t, err) // Prepare the response - response := &types.RPCResponse{ - JSONRPC: "2.0", - ID: request.ID, - Result: result, - Error: nil, + response := &spec.BaseJSONResponse{ + Result: result, // direct + Error: nil, + BaseJSON: spec.BaseJSON{ + JSONRPC: spec.JSONRPCVersion, + ID: request.ID, + }, } return response, nil @@ -65,7 +61,7 @@ func generateMockRequestClient( func generateMockRequestsClient( t *testing.T, method string, - verifyParamsFn func(*testing.T, map[string]any), + verifyParamsFn func(*testing.T, []any), responseData []any, ) *mockClient { t.Helper() @@ -73,9 +69,9 @@ func generateMockRequestsClient( return &mockClient{ sendBatchFn: func( _ context.Context, - requests types.RPCRequests, - ) (types.RPCResponses, error) { - responses := make(types.RPCResponses, 0, len(requests)) + requests spec.BaseJSONRequests, + ) (spec.BaseJSONResponses, error) { + responses := make(spec.BaseJSONResponses, 0, len(requests)) // Validate the requests for index, r := range requests { @@ -84,21 +80,20 @@ func generateMockRequestsClient( require.Equal(t, r.Method, method) // Validate the params - var params map[string]any - require.NoError(t, json.Unmarshal(r.Params, ¶ms)) - - verifyParamsFn(t, params) + verifyParamsFn(t, r.Params) // Prepare the result result, err := amino.MarshalJSON(responseData[index]) require.NoError(t, err) // Prepare the response - response := types.RPCResponse{ - JSONRPC: "2.0", - ID: r.ID, - Result: result, - Error: nil, + response := &spec.BaseJSONResponse{ + Result: result, // direct + Error: nil, + BaseJSON: spec.BaseJSON{ + JSONRPC: spec.JSONRPCVersion, + ID: r.ID, + }, } responses = append(responses, response) @@ -119,7 +114,7 @@ func TestRPCClient_Status(t *testing.T) { }, } - verifyFn = func(t *testing.T, params map[string]any) { + verifyFn = func(t *testing.T, params []any) { t.Helper() assert.Len(t, params, 1) @@ -153,7 +148,7 @@ func TestRPCClient_ABCIInfo(t *testing.T) { }, } - verifyFn = func(t *testing.T, params map[string]any) { + verifyFn = func(t *testing.T, params []any) { t.Helper() assert.Len(t, params, 0) @@ -191,13 +186,13 @@ func TestRPCClient_ABCIQuery(t *testing.T) { }, } - verifyFn = func(t *testing.T, params map[string]any) { + verifyFn = func(t *testing.T, params []any) { t.Helper() - assert.Equal(t, path, params["path"]) - assert.Equal(t, base64.StdEncoding.EncodeToString(data), params["data"]) - assert.Equal(t, fmt.Sprintf("%d", opts.Height), params["height"]) - assert.Equal(t, opts.Prove, params["prove"]) + assert.Equal(t, path, params[0]) + assert.Equal(t, data, params[1]) + assert.Equal(t, opts.Height, params[2]) + assert.Equal(t, opts.Prove, params[3]) } mockClient = generateMockRequestClient( @@ -228,10 +223,10 @@ func TestRPCClient_BroadcastTxCommit(t *testing.T) { Hash: []byte("dummy"), } - verifyFn = func(t *testing.T, params map[string]any) { + verifyFn = func(t *testing.T, params []any) { t.Helper() - assert.Equal(t, base64.StdEncoding.EncodeToString(tx), params["tx"]) + assert.Equal(t, bfttypes.Tx(tx), params[0]) } mockClient = generateMockRequestClient( @@ -262,10 +257,10 @@ func TestRPCClient_BroadcastTxAsync(t *testing.T) { Hash: []byte("dummy"), } - verifyFn = func(t *testing.T, params map[string]any) { + verifyFn = func(t *testing.T, params []any) { t.Helper() - assert.Equal(t, base64.StdEncoding.EncodeToString(tx), params["tx"]) + assert.Equal(t, bfttypes.Tx(tx), params[0]) } mockClient = generateMockRequestClient( @@ -296,10 +291,10 @@ func TestRPCClient_BroadcastTxSync(t *testing.T) { Hash: []byte("dummy"), } - verifyFn = func(t *testing.T, params map[string]any) { + verifyFn = func(t *testing.T, params []any) { t.Helper() - assert.Equal(t, base64.StdEncoding.EncodeToString(tx), params["tx"]) + assert.Equal(t, bfttypes.Tx(tx), params[0]) } mockClient = generateMockRequestClient( @@ -330,10 +325,10 @@ func TestRPCClient_UnconfirmedTxs(t *testing.T) { Count: 10, } - verifyFn = func(t *testing.T, params map[string]any) { + verifyFn = func(t *testing.T, params []any) { t.Helper() - assert.Equal(t, fmt.Sprintf("%d", limit), params["limit"]) + assert.Equal(t, limit, params[0]) } mockClient = generateMockRequestClient( @@ -362,7 +357,7 @@ func TestRPCClient_NumUnconfirmedTxs(t *testing.T) { Count: 10, } - verifyFn = func(t *testing.T, params map[string]any) { + verifyFn = func(t *testing.T, params []any) { t.Helper() assert.Len(t, params, 0) @@ -394,7 +389,7 @@ func TestRPCClient_NetInfo(t *testing.T) { NPeers: 10, } - verifyFn = func(t *testing.T, params map[string]any) { + verifyFn = func(t *testing.T, params []any) { t.Helper() assert.Len(t, params, 0) @@ -428,7 +423,7 @@ func TestRPCClient_DumpConsensusState(t *testing.T) { }, } - verifyFn = func(t *testing.T, params map[string]any) { + verifyFn = func(t *testing.T, params []any) { t.Helper() assert.Len(t, params, 0) @@ -462,7 +457,7 @@ func TestRPCClient_ConsensusState(t *testing.T) { }, } - verifyFn = func(t *testing.T, params map[string]any) { + verifyFn = func(t *testing.T, params []any) { t.Helper() assert.Len(t, params, 0) @@ -496,10 +491,10 @@ func TestRPCClient_ConsensusParams(t *testing.T) { BlockHeight: blockHeight, } - verifyFn = func(t *testing.T, params map[string]any) { + verifyFn = func(t *testing.T, params []any) { t.Helper() - assert.Equal(t, fmt.Sprintf("%d", blockHeight), params["height"]) + assert.Equal(t, blockHeight, params[0]) } mockClient = generateMockRequestClient( @@ -526,7 +521,7 @@ func TestRPCClient_Health(t *testing.T) { var ( expectedResult = &ctypes.ResultHealth{} - verifyFn = func(t *testing.T, params map[string]any) { + verifyFn = func(t *testing.T, params []any) { t.Helper() assert.Len(t, params, 0) @@ -561,11 +556,11 @@ func TestRPCClient_BlockchainInfo(t *testing.T) { LastHeight: 100, } - verifyFn = func(t *testing.T, params map[string]any) { + verifyFn = func(t *testing.T, params []any) { t.Helper() - assert.Equal(t, fmt.Sprintf("%d", minHeight), params["minHeight"]) - assert.Equal(t, fmt.Sprintf("%d", maxHeight), params["maxHeight"]) + assert.Equal(t, minHeight, params[0]) + assert.Equal(t, maxHeight, params[1]) } mockClient = generateMockRequestClient( @@ -596,7 +591,7 @@ func TestRPCClient_Genesis(t *testing.T) { }, } - verifyFn = func(t *testing.T, params map[string]any) { + verifyFn = func(t *testing.T, params []any) { t.Helper() assert.Len(t, params, 0) @@ -634,10 +629,10 @@ func TestRPCClient_Block(t *testing.T) { }, } - verifyFn = func(t *testing.T, params map[string]any) { + verifyFn = func(t *testing.T, params []any) { t.Helper() - assert.Equal(t, fmt.Sprintf("%d", height), params["height"]) + assert.Equal(t, height, params[0]) } mockClient = generateMockRequestClient( @@ -668,10 +663,10 @@ func TestRPCClient_BlockResults(t *testing.T) { Height: height, } - verifyFn = func(t *testing.T, params map[string]any) { + verifyFn = func(t *testing.T, params []any) { t.Helper() - assert.Equal(t, fmt.Sprintf("%d", height), params["height"]) + assert.Equal(t, height, params[0]) } mockClient = generateMockRequestClient( @@ -702,10 +697,10 @@ func TestRPCClient_Commit(t *testing.T) { CanonicalCommit: true, } - verifyFn = func(t *testing.T, params map[string]any) { + verifyFn = func(t *testing.T, params []any) { t.Helper() - assert.Equal(t, fmt.Sprintf("%d", height), params["height"]) + assert.Equal(t, height, params[0]) } mockClient = generateMockRequestClient( @@ -737,10 +732,10 @@ func TestRPCClient_Tx(t *testing.T) { Height: 10, } - verifyFn = func(t *testing.T, params map[string]any) { + verifyFn = func(t *testing.T, params []any) { t.Helper() - assert.Equal(t, base64.StdEncoding.EncodeToString(hash), params["hash"]) + assert.Equal(t, hash, params[0]) } mockClient = generateMockRequestClient( @@ -771,10 +766,10 @@ func TestRPCClient_Validators(t *testing.T) { BlockHeight: height, } - verifyFn = func(t *testing.T, params map[string]any) { + verifyFn = func(t *testing.T, params []any) { t.Helper() - assert.Equal(t, fmt.Sprintf("%d", height), params["height"]) + assert.Equal(t, height, params[0]) } mockClient = generateMockRequestClient( @@ -827,7 +822,7 @@ func TestRPCClient_Batch(t *testing.T) { }, } - verifyFn = func(t *testing.T, params map[string]any) { + verifyFn = func(t *testing.T, params []any) { t.Helper() assert.Len(t, params, 0) @@ -847,9 +842,9 @@ func TestRPCClient_Batch(t *testing.T) { // Create the batch batch := c.NewBatch() - require.NoError(t, batch.Status()) - require.NoError(t, batch.Status()) - require.NoError(t, batch.Status()) + batch.Status() + batch.Status() + batch.Status() require.EqualValues(t, 3, batch.Count()) diff --git a/tm2/pkg/bft/rpc/client/e2e_test.go b/tm2/pkg/bft/rpc/client/e2e_test.go index d2d72304e04..9ea2249c4e9 100644 --- a/tm2/pkg/bft/rpc/client/e2e_test.go +++ b/tm2/pkg/bft/rpc/client/e2e_test.go @@ -12,7 +12,7 @@ import ( abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types" cstypes "github.com/gnolang/gno/tm2/pkg/bft/consensus/types" ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" - types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" bfttypes "github.com/gnolang/gno/tm2/pkg/bft/types" p2pTypes "github.com/gnolang/gno/tm2/pkg/p2p/types" "github.com/gorilla/websocket" @@ -46,7 +46,7 @@ func defaultHTTPHandler( require.Equal(t, "application/json", r.Header.Get("content-type")) // Parse the message - var req types.RPCRequest + var req *spec.BaseJSONRequest require.NoError(t, json.NewDecoder(r.Body).Decode(&req)) // Basic request validation @@ -58,10 +58,13 @@ func defaultHTTPHandler( require.NoError(t, err) // Send a response back - response := types.RPCResponse{ - JSONRPC: "2.0", - ID: req.ID, - Result: result, + response := spec.BaseJSONResponse{ + Result: result, // direct + Error: nil, + BaseJSON: spec.BaseJSON{ + JSONRPC: spec.JSONRPCVersion, + ID: req.ID, + }, } // Marshal the response @@ -98,7 +101,7 @@ func defaultWSHandler( require.NoError(t, err) // Parse the message - var req types.RPCRequest + var req *spec.BaseJSONRequest require.NoError(t, json.Unmarshal(message, &req)) // Basic request validation @@ -110,10 +113,13 @@ func defaultWSHandler( require.NoError(t, err) // Send a response back - response := types.RPCResponse{ - JSONRPC: "2.0", - ID: req.ID, - Result: result, + response := spec.BaseJSONResponse{ + Result: result, // direct + Error: nil, + BaseJSON: spec.BaseJSON{ + JSONRPC: spec.JSONRPCVersion, + ID: req.ID, + }, } // Marshal the response diff --git a/tm2/pkg/bft/rpc/client/local.go b/tm2/pkg/bft/rpc/client/local.go deleted file mode 100644 index 275e0d7cf8a..00000000000 --- a/tm2/pkg/bft/rpc/client/local.go +++ /dev/null @@ -1,136 +0,0 @@ -package client - -import ( - "context" - "log/slog" - - "github.com/gnolang/gno/tm2/pkg/bft/rpc/core" - ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" - rpctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" - "github.com/gnolang/gno/tm2/pkg/bft/types" - "github.com/gnolang/gno/tm2/pkg/log" -) - -// Local is a Client implementation that directly executes the rpc -// functions on a given node, without going through any network connection. -// -// As this connects directly to a Node instance, a Local client only works -// after the Node has been started. Note that the way this works is (alas) -// through the use of singletons in rpc/core. As a consequence, you may only -// have one active node at a time, and Local can only connect to that specific -// node. Keep this in mind for parallel tests, or attempting to simulate a -// network. -// -// This implementation is useful for: -// -// - Running tests against a node in-process without the overhead -// of going through an http server -// - Communication between an ABCI app and Tendermint core when they -// are compiled in process. -// -// For real clients, you probably want to use the [HTTP] client. For more -// powerful control during testing, you probably want the "client/mock" package. -type Local struct { - Logger *slog.Logger - ctx *rpctypes.Context -} - -// NewLocal configures a client that calls the Node directly through rpc/core, -// without requiring a network connection. See [Local]. -func NewLocal() *Local { - return &Local{ - Logger: log.NewNoopLogger(), - ctx: &rpctypes.Context{}, - } -} - -var _ Client = (*Local)(nil) - -// SetLogger allows to set a logger on the client. -func (c *Local) SetLogger(l *slog.Logger) { - c.Logger = l -} - -func (c *Local) Status(_ context.Context, heightGte *int64) (*ctypes.ResultStatus, error) { - return core.Status(c.ctx, heightGte) -} - -func (c *Local) ABCIInfo(_ context.Context) (*ctypes.ResultABCIInfo, error) { - return core.ABCIInfo(c.ctx) -} - -func (c *Local) ABCIQuery(ctx context.Context, path string, data []byte) (*ctypes.ResultABCIQuery, error) { - return c.ABCIQueryWithOptions(ctx, path, data, DefaultABCIQueryOptions) -} - -func (c *Local) ABCIQueryWithOptions(_ context.Context, path string, data []byte, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - return core.ABCIQuery(c.ctx, path, data, opts.Height, opts.Prove) -} - -func (c *Local) BroadcastTxCommit(_ context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { - return core.BroadcastTxCommit(c.ctx, tx) -} - -func (c *Local) BroadcastTxAsync(_ context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - return core.BroadcastTxAsync(c.ctx, tx) -} - -func (c *Local) BroadcastTxSync(_ context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - return core.BroadcastTxSync(c.ctx, tx) -} - -func (c *Local) UnconfirmedTxs(_ context.Context, limit int) (*ctypes.ResultUnconfirmedTxs, error) { - return core.UnconfirmedTxs(c.ctx, limit) -} - -func (c *Local) NumUnconfirmedTxs(_ context.Context) (*ctypes.ResultUnconfirmedTxs, error) { - return core.NumUnconfirmedTxs(c.ctx) -} - -func (c *Local) NetInfo(_ context.Context) (*ctypes.ResultNetInfo, error) { - return core.NetInfo(c.ctx) -} - -func (c *Local) DumpConsensusState(_ context.Context) (*ctypes.ResultDumpConsensusState, error) { - return core.DumpConsensusState(c.ctx) -} - -func (c *Local) ConsensusState(_ context.Context) (*ctypes.ResultConsensusState, error) { - return core.ConsensusState(c.ctx) -} - -func (c *Local) ConsensusParams(_ context.Context, height *int64) (*ctypes.ResultConsensusParams, error) { - return core.ConsensusParams(c.ctx, height) -} - -func (c *Local) Health(_ context.Context) (*ctypes.ResultHealth, error) { - return core.Health(c.ctx) -} - -func (c *Local) BlockchainInfo(_ context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { - return core.BlockchainInfo(c.ctx, minHeight, maxHeight) -} - -func (c *Local) Genesis(_ context.Context) (*ctypes.ResultGenesis, error) { - return core.Genesis(c.ctx) -} - -func (c *Local) Block(_ context.Context, height *int64) (*ctypes.ResultBlock, error) { - return core.Block(c.ctx, height) -} - -func (c *Local) BlockResults(_ context.Context, height *int64) (*ctypes.ResultBlockResults, error) { - return core.BlockResults(c.ctx, height) -} - -func (c *Local) Commit(_ context.Context, height *int64) (*ctypes.ResultCommit, error) { - return core.Commit(c.ctx, height) -} - -func (c *Local) Validators(_ context.Context, height *int64) (*ctypes.ResultValidators, error) { - return core.Validators(c.ctx, height) -} - -func (c *Local) Tx(_ context.Context, hash []byte) (*ctypes.ResultTx, error) { - return core.Tx(c.ctx, hash) -} diff --git a/tm2/pkg/bft/rpc/client/mock_test.go b/tm2/pkg/bft/rpc/client/mock_test.go index bc2d92367bc..baf03bb9679 100644 --- a/tm2/pkg/bft/rpc/client/mock_test.go +++ b/tm2/pkg/bft/rpc/client/mock_test.go @@ -3,12 +3,12 @@ package client import ( "context" - types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" ) type ( - sendRequestDelegate func(context.Context, types.RPCRequest) (*types.RPCResponse, error) - sendBatchDelegate func(context.Context, types.RPCRequests) (types.RPCResponses, error) + sendRequestDelegate func(context.Context, *spec.BaseJSONRequest) (*spec.BaseJSONResponse, error) + sendBatchDelegate func(context.Context, spec.BaseJSONRequests) (spec.BaseJSONResponses, error) closeDelegate func() error ) @@ -18,7 +18,7 @@ type mockClient struct { closeFn closeDelegate } -func (m *mockClient) SendRequest(ctx context.Context, request types.RPCRequest) (*types.RPCResponse, error) { +func (m *mockClient) SendRequest(ctx context.Context, request *spec.BaseJSONRequest) (*spec.BaseJSONResponse, error) { if m.sendRequestFn != nil { return m.sendRequestFn(ctx, request) } @@ -26,7 +26,7 @@ func (m *mockClient) SendRequest(ctx context.Context, request types.RPCRequest) return nil, nil } -func (m *mockClient) SendBatch(ctx context.Context, requests types.RPCRequests) (types.RPCResponses, error) { +func (m *mockClient) SendBatch(ctx context.Context, requests spec.BaseJSONRequests) (spec.BaseJSONResponses, error) { if m.sendBatchFn != nil { return m.sendBatchFn(ctx, requests) } diff --git a/tm2/pkg/bft/rpc/config/config.go b/tm2/pkg/bft/rpc/config/config.go deleted file mode 100644 index fe527450178..00000000000 --- a/tm2/pkg/bft/rpc/config/config.go +++ /dev/null @@ -1,174 +0,0 @@ -package config - -import ( - "errors" - "net/http" - "path/filepath" - "time" -) - -// ----------------------------------------------------------------------------- -// RPCConfig - -const ( - defaultConfigDir = "config" -) - -// RPCConfig defines the configuration options for the Tendermint RPC server -type RPCConfig struct { - RootDir string `json:"home" toml:"home"` - - // TCP or UNIX socket address for the RPC server to listen on - ListenAddress string `json:"laddr" toml:"laddr" comment:"TCP or UNIX socket address for the RPC server to listen on"` - - // A list of origins a cross-domain request can be executed from. - // If the special '*' value is present in the list, all origins will be allowed. - // An origin may contain a wildcard (*) to replace 0 or more characters (i.e.: http://*.domain.com). - // Only one wildcard can be used per origin. - CORSAllowedOrigins []string `json:"cors_allowed_origins" toml:"cors_allowed_origins" comment:"A list of origins a cross-domain request can be executed from\n Default value '[]' disables cors support\n Use '[\"*\"]' to allow any origin"` - - // A list of methods the client is allowed to use with cross-domain requests. - CORSAllowedMethods []string `json:"cors_allowed_methods" toml:"cors_allowed_methods" comment:"A list of methods the client is allowed to use with cross-domain requests"` - - // A list of non simple headers the client is allowed to use with cross-domain requests. - CORSAllowedHeaders []string `json:"cors_allowed_headers" toml:"cors_allowed_headers" comment:"A list of non simple headers the client is allowed to use with cross-domain requests"` - - // TCP or UNIX socket address for the gRPC server to listen on - // NOTE: This server only supports /broadcast_tx_commit - GRPCListenAddress string `json:"grpc_laddr" toml:"grpc_laddr" comment:"TCP or UNIX socket address for the gRPC server to listen on\n NOTE: This server only supports /broadcast_tx_commit"` - - // Maximum number of simultaneous connections. - // Does not include RPC (HTTP&WebSocket) connections. See max_open_connections - // If you want to accept a larger number than the default, make sure - // you increase your OS limits. - // 0 - unlimited. - GRPCMaxOpenConnections int `json:"grpc_max_open_connections" toml:"grpc_max_open_connections" comment:"Maximum number of simultaneous connections.\n Does not include RPC (HTTP&WebSocket) connections. See max_open_connections\n If you want to accept a larger number than the default, make sure\n you increase your OS limits.\n 0 - unlimited.\n Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}\n 1024 - 40 - 10 - 50 = 924 = ~900"` - - // Activate unsafe RPC commands like /dial_persistent_peers and /unsafe_flush_mempool - Unsafe bool `json:"unsafe" toml:"unsafe" comment:"Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool"` - - // Maximum number of simultaneous connections (including WebSocket). - // Does not include gRPC connections. See grpc_max_open_connections - // If you want to accept a larger number than the default, make sure - // you increase your OS limits. - // 0 - unlimited. - // Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} - // 1024 - 40 - 10 - 50 = 924 = ~900 - MaxOpenConnections int `json:"max_open_connections" toml:"max_open_connections" comment:"Maximum number of simultaneous connections (including WebSocket).\n Does not include gRPC connections. See grpc_max_open_connections\n If you want to accept a larger number than the default, make sure\n you increase your OS limits.\n 0 - unlimited.\n Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}\n 1024 - 40 - 10 - 50 = 924 = ~900"` - - // How long to wait for a tx to be committed during /broadcast_tx_commit - // WARNING: Using a value larger than 10s will result in increasing the - // global HTTP write timeout, which applies to all connections and endpoints. - // See https://github.com/gnolang/gno/tm2/pkg/bft/issues/3435 - TimeoutBroadcastTxCommit time.Duration `json:"timeout_broadcast_tx_commit" toml:"timeout_broadcast_tx_commit" comment:"How long to wait for a tx to be committed during /broadcast_tx_commit.\n WARNING: Using a value larger than 10s will result in increasing the\n global HTTP write timeout, which applies to all connections and endpoints.\n See https://github.com/tendermint/classic/issues/3435"` - - // Maximum size of request body, in bytes - MaxBodyBytes int64 `json:"max_body_bytes" toml:"max_body_bytes" comment:"Maximum size of request body, in bytes"` - - // Maximum size of request header, in bytes - MaxHeaderBytes int `json:"max_header_bytes" toml:"max_header_bytes" comment:"Maximum size of request header, in bytes"` - - // The path to a file containing certificate that is used to create the HTTPS server. - // Might be either absolute path or path related to tendermint's config directory. - // - // If the certificate is signed by a certificate authority, - // the certFile should be the concatenation of the server's certificate, any intermediates, - // and the CA's certificate. - // - // NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run. - TLSCertFile string `json:"tls_cert_file" toml:"tls_cert_file" comment:"The path to a file containing certificate that is used to create the HTTPS server.\n Might be either absolute path or path related to tendermint's config directory.\n If the certificate is signed by a certificate authority,\n the certFile should be the concatenation of the server's certificate, any intermediates,\n and the CA's certificate.\n NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run."` - - // The path to a file containing matching private key that is used to create the HTTPS server. - // Might be either absolute path or path related to tendermint's config directory. - // - // NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run. - TLSKeyFile string `json:"tls_key_file" toml:"tls_key_file" comment:"The path to a file containing matching private key that is used to create the HTTPS server.\n Might be either absolute path or path related to tendermint's config directory.\n NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run."` -} - -// DefaultRPCConfig returns a default configuration for the RPC server -func DefaultRPCConfig() *RPCConfig { - return &RPCConfig{ - ListenAddress: "tcp://127.0.0.1:26657", - CORSAllowedOrigins: []string{"*"}, - CORSAllowedMethods: []string{http.MethodHead, http.MethodGet, http.MethodPost, http.MethodOptions}, - CORSAllowedHeaders: []string{"Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time"}, - GRPCListenAddress: "", - GRPCMaxOpenConnections: 900, - - Unsafe: false, - MaxOpenConnections: 900, - - TimeoutBroadcastTxCommit: 10 * time.Second, - - MaxBodyBytes: int64(1000000), // 1MB - MaxHeaderBytes: 1 << 20, // same as the net/http default - - TLSCertFile: "", - TLSKeyFile: "", - } -} - -// TestRPCConfig returns a configuration for testing the RPC server -func TestRPCConfig() *RPCConfig { - cfg := DefaultRPCConfig() - cfg.ListenAddress = "tcp://0.0.0.0:26657" - cfg.GRPCListenAddress = "tcp://0.0.0.0:26658" - cfg.Unsafe = true - return cfg -} - -// ValidateBasic performs basic validation (checking param bounds, etc.) and -// returns an error if any check fails. -func (cfg *RPCConfig) ValidateBasic() error { - if cfg.GRPCMaxOpenConnections < 0 { - return errors.New("grpc_max_open_connections can't be negative") - } - if cfg.MaxOpenConnections < 0 { - return errors.New("max_open_connections can't be negative") - } - if cfg.TimeoutBroadcastTxCommit < 0 { - return errors.New("timeout_broadcast_tx_commit can't be negative") - } - if cfg.MaxBodyBytes < 0 { - return errors.New("max_body_bytes can't be negative") - } - if cfg.MaxHeaderBytes < 0 { - return errors.New("max_header_bytes can't be negative") - } - return nil -} - -// IsCorsEnabled returns true if cross-origin resource sharing is enabled. -// XXX review. -func (cfg *RPCConfig) IsCorsEnabled() bool { - return len(cfg.CORSAllowedOrigins) != 0 -} - -func (cfg RPCConfig) KeyFile() string { - path := cfg.TLSKeyFile - if filepath.IsAbs(path) { - return path - } - return join(cfg.RootDir, filepath.Join(defaultConfigDir, path)) -} - -func (cfg RPCConfig) CertFile() string { - path := cfg.TLSCertFile - if filepath.IsAbs(path) { - return path - } - return join(cfg.RootDir, filepath.Join(defaultConfigDir, path)) -} - -func (cfg RPCConfig) IsTLSEnabled() bool { - return cfg.TLSCertFile != "" && cfg.TLSKeyFile != "" -} - -// helper function to make config creation independent of root dir -func join(root, path string) string { - if filepath.IsAbs(path) { - return path - } - - return filepath.Join(root, path) -} diff --git a/tm2/pkg/bft/rpc/core/README.md b/tm2/pkg/bft/rpc/core/README.md deleted file mode 100644 index d767c5f7169..00000000000 --- a/tm2/pkg/bft/rpc/core/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# Tendermint RPC - -## Pagination - -Requests that return multiple items will be paginated to 30 items by default. -You can specify further pages with the ?page parameter. You can also set a -custom page size up to 100 with the ?per_page parameter. diff --git a/tm2/pkg/bft/rpc/core/abci.go b/tm2/pkg/bft/rpc/core/abci.go deleted file mode 100644 index aef90052f58..00000000000 --- a/tm2/pkg/bft/rpc/core/abci.go +++ /dev/null @@ -1,112 +0,0 @@ -package core - -import ( - abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types" - ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" - rpctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" -) - -// Query the application for some information. -// -// ```shell -// curl 'localhost:26657/abci_query?path=""&data="abcd"&prove=false' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// result, err := client.ABCIQuery("", "abcd", true) -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// -// { -// "error": "", -// "result": { -// "response": { -// "log": "exists", -// "height": "0", -// "proof": "010114FED0DAD959F36091AD761C922ABA3CBF1D8349990101020103011406AA2262E2F448242DF2C2607C3CDC705313EE3B0001149D16177BC71E445476174622EA559715C293740C", -// "value": "61626364", -// "key": "61626364", -// "index": "-1", -// "code": "0" -// } -// }, -// "id": "", -// "jsonrpc": "2.0" -// } -// -// ``` -// -// ### Query Parameters -// -// | Parameter | Type | Default | Required | Description | -// |-----------+--------+---------+----------+------------------------------------------------| -// | path | string | false | false | Path to the data ("/a/b/c") | -// | data | []byte | false | true | Data | -// | height | int64 | 0 | false | Height (0 means latest) | -// | prove | bool | false | false | Includes proof if true | -func ABCIQuery(ctx *rpctypes.Context, path string, data []byte, height int64, prove bool) (*ctypes.ResultABCIQuery, error) { - resQuery, err := proxyAppQuery.QuerySync(abci.RequestQuery{ - Path: path, - Data: data, - Height: height, - Prove: prove, - }) - if err != nil { - return nil, err - } - logger.Info("ABCIQuery", "path", path, "data", data, "result", resQuery) - return &ctypes.ResultABCIQuery{Response: resQuery}, nil -} - -// Get some info about the application. -// -// ```shell -// curl 'localhost:26657/abci_info' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// info, err := client.ABCIInfo() -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// -// { -// "error": "", -// "result": { -// "response": { -// "data": "{\"size\":3}" -// } -// }, -// "id": "", -// "jsonrpc": "2.0" -// } -// -// ``` -func ABCIInfo(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) { - resInfo, err := proxyAppQuery.InfoSync(abci.RequestInfo{}) - if err != nil { - return nil, err - } - return &ctypes.ResultABCIInfo{Response: resInfo}, nil -} diff --git a/tm2/pkg/bft/rpc/core/abci/abci.go b/tm2/pkg/bft/rpc/core/abci/abci.go new file mode 100644 index 00000000000..d8ea630c703 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/abci/abci.go @@ -0,0 +1,87 @@ +package abci + +import ( + abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types" + "github.com/gnolang/gno/tm2/pkg/bft/appconn" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/params" + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/metadata" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" +) + +// Handler is the ABCI RPC handler +type Handler struct { + proxyAppQuery appconn.Query +} + +// NewHandler creates a new instance of the ABCI RPC handler +func NewHandler(proxyAppQuery appconn.Query) *Handler { + return &Handler{ + proxyAppQuery: proxyAppQuery, + } +} + +// QueryHandler queries the application (synchronously) for some information +// +// Params: +// - path string (optional, default "") +// - data []byte (required) +// - height int64 (optional, default 0) +// - prove bool (optional, default false) +func (h *Handler) QueryHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + const ( + idxPath = 0 + idxData = 1 + idxHeight = 2 + idxProve = 3 + ) + + path, err := params.AsString(p, idxPath) + if err != nil { + return nil, err + } + + data, err := params.AsBytes(p, idxData, true) + if err != nil { + return nil, err + } + + height, err := params.AsInt64(p, idxHeight) + if err != nil { + return nil, err + } + + prove, err := params.AsBool(p, idxProve) + if err != nil { + return nil, err + } + + resQuery, queryErr := h.proxyAppQuery.QuerySync(abci.RequestQuery{ + Path: path, + Data: data, + Height: height, + Prove: prove, + }) + if queryErr != nil { + return nil, spec.GenerateResponseError(queryErr) + } + + return &ctypes.ResultABCIQuery{Response: resQuery}, nil +} + +// InfoHandler gets some info about the application. +// +// No params +func (h *Handler) InfoHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + // Make sure there are no params + if len(p) > 0 { + return nil, spec.GenerateInvalidParamError(1) + } + + resInfo, err := h.proxyAppQuery.InfoSync(abci.RequestInfo{}) + if err != nil { + return nil, spec.GenerateResponseError(err) + } + + return &ctypes.ResultABCIInfo{Response: resInfo}, nil +} diff --git a/tm2/pkg/bft/rpc/core/abci/abci_test.go b/tm2/pkg/bft/rpc/core/abci/abci_test.go new file mode 100644 index 00000000000..83b6501710a --- /dev/null +++ b/tm2/pkg/bft/rpc/core/abci/abci_test.go @@ -0,0 +1,239 @@ +package abci + +import ( + "errors" + "testing" + + abciTypes "github.com/gnolang/gno/tm2/pkg/bft/abci/types" + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestHandler_QueryHandler(t *testing.T) { + t.Parallel() + + t.Run("Missing data param", func(t *testing.T) { + t.Parallel() + + var ( + mockQuery = &mockQuery{ + querySyncFn: func(_ abciTypes.RequestQuery) (abciTypes.ResponseQuery, error) { + t.FailNow() + + return abciTypes.ResponseQuery{}, nil + }, + } + + params = []any{ + "some/path", + } + ) + + h := NewHandler(mockQuery) + + res, err := h.QueryHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("Query sync error", func(t *testing.T) { + t.Parallel() + + var ( + queryErr = errors.New("app query error") + params = []any{ + "some/path", + []byte("data"), + } + + mockQuery = &mockQuery{ + querySyncFn: func(_ abciTypes.RequestQuery) (abciTypes.ResponseQuery, error) { + return abciTypes.ResponseQuery{}, queryErr + }, + } + ) + + h := NewHandler(mockQuery) + + res, err := h.QueryHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + assert.Contains(t, err.Message, queryErr.Error()) + }) + + t.Run("Valid query", func(t *testing.T) { + t.Parallel() + + var ( + height = int64(10) + expectedResponse = abciTypes.ResponseQuery{ + Height: height, + } + + params = []any{ + "some/path", // path + []byte("payload"), // data + height, // height + true, // prove + } + + expectedRequest = abciTypes.RequestQuery{ + Path: "some/path", + Data: []byte("payload"), + Height: 10, + Prove: true, + } + + mockQuery = &mockQuery{ + querySyncFn: func(req abciTypes.RequestQuery) (abciTypes.ResponseQuery, error) { + assert.Equal(t, expectedRequest, req) + + return expectedResponse, nil + }, + } + ) + + h := NewHandler(mockQuery) + + res, err := h.QueryHandler(nil, params) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultABCIQuery) + require.True(t, ok) + + assert.Equal(t, expectedResponse, result.Response) + }) + + t.Run("Valid query with defaults", func(t *testing.T) { + t.Parallel() + + var ( + params = []any{ + // path="", height=0, prove=false defaults + nil, + []byte("data-only"), + } + expectedRequest = abciTypes.RequestQuery{ + Path: "", + Data: []byte("data-only"), + Height: 0, + Prove: false, + } + expectedResponse = abciTypes.ResponseQuery{} + + mockQuery = &mockQuery{ + querySyncFn: func(req abciTypes.RequestQuery) (abciTypes.ResponseQuery, error) { + assert.Equal(t, expectedRequest, req) + + return expectedResponse, nil + }, + } + ) + + h := NewHandler(mockQuery) + + res, err := h.QueryHandler(nil, params) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultABCIQuery) + require.True(t, ok) + + assert.Equal(t, expectedResponse, result.Response) + }) +} + +func TestHandler_InfoHandler(t *testing.T) { + t.Parallel() + + t.Run("Params not allowed", func(t *testing.T) { + t.Parallel() + + var ( + params = []any{"unexpected"} + + mockQuery = &mockQuery{ + infoSyncFn: func(_ abciTypes.RequestInfo) (abciTypes.ResponseInfo, error) { + t.FailNow() + + return abciTypes.ResponseInfo{}, nil + }, + } + ) + + h := NewHandler(mockQuery) + + res, err := h.InfoHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("Info error", func(t *testing.T) { + t.Parallel() + + var ( + infoErr = errors.New("info failed") + params = []any(nil) + + mockQuery = &mockQuery{ + infoSyncFn: func(req abciTypes.RequestInfo) (abciTypes.ResponseInfo, error) { + // The request should always be empty + assert.Equal(t, abciTypes.RequestInfo{}, req) + + return abciTypes.ResponseInfo{}, infoErr + }, + } + ) + + h := NewHandler(mockQuery) + + res, err := h.InfoHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + assert.Contains(t, err.Message, infoErr.Error()) + }) + + t.Run("Valid info", func(t *testing.T) { + t.Parallel() + + var ( + expectedResponse = abciTypes.ResponseInfo{ + ResponseBase: abciTypes.ResponseBase{ + Data: []byte("some-info"), + }, + ABCIVersion: "v1.2.3", + } + + mockQuery = &mockQuery{ + infoSyncFn: func(req abciTypes.RequestInfo) (abciTypes.ResponseInfo, error) { + // The request should always be empty + assert.Equal(t, abciTypes.RequestInfo{}, req) + + return expectedResponse, nil + }, + } + ) + + h := NewHandler(mockQuery) + + res, err := h.InfoHandler(nil, nil) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultABCIInfo) + require.True(t, ok) + + assert.Equal(t, expectedResponse, result.Response) + }) +} diff --git a/tm2/pkg/bft/rpc/core/abci/mock_test.go b/tm2/pkg/bft/rpc/core/abci/mock_test.go new file mode 100644 index 00000000000..5e2660d0116 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/abci/mock_test.go @@ -0,0 +1,51 @@ +package abci + +import ( + abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types" +) + +type ( + errorDelegate func() error + echoSyncDelegate func(string) (abci.ResponseEcho, error) + infoSyncDelegate func(abci.RequestInfo) (abci.ResponseInfo, error) + querySyncDelegate func(abci.RequestQuery) (abci.ResponseQuery, error) +) + +type mockQuery struct { + errorFn errorDelegate + echoSyncFn echoSyncDelegate + infoSyncFn infoSyncDelegate + querySyncFn querySyncDelegate +} + +func (m *mockQuery) Error() error { + if m.errorFn != nil { + return m.errorFn() + } + + return nil +} + +func (m *mockQuery) EchoSync(msg string) (abci.ResponseEcho, error) { + if m.echoSyncFn != nil { + return m.echoSyncFn(msg) + } + + return abci.ResponseEcho{}, nil +} + +func (m *mockQuery) InfoSync(info abci.RequestInfo) (abci.ResponseInfo, error) { + if m.infoSyncFn != nil { + return m.infoSyncFn(info) + } + + return abci.ResponseInfo{}, nil +} + +func (m *mockQuery) QuerySync(query abci.RequestQuery) (abci.ResponseQuery, error) { + if m.querySyncFn != nil { + return m.querySyncFn(query) + } + + return abci.ResponseQuery{}, nil +} diff --git a/tm2/pkg/bft/rpc/core/blocks.go b/tm2/pkg/bft/rpc/core/blocks.go deleted file mode 100644 index 9ca4e05a46f..00000000000 --- a/tm2/pkg/bft/rpc/core/blocks.go +++ /dev/null @@ -1,436 +0,0 @@ -package core - -import ( - "fmt" - - ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" - rpctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" - sm "github.com/gnolang/gno/tm2/pkg/bft/state" - "github.com/gnolang/gno/tm2/pkg/bft/types" -) - -// Get block headers for minHeight <= height <= maxHeight. -// Block headers are returned in descending order (highest first). -// -// ```shell -// curl 'localhost:26657/blockchain?minHeight=10&maxHeight=10' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// info, err := client.BlockchainInfo(10, 10) -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// -// { -// "error": "", -// "result": { -// "block_metas": [ -// { -// "header": { -// "app_hash": "", -// "chain_id": "test-chain-6UTNIN", -// "height": "10", -// "time": "2017-05-29T15:05:53.877Z", -// "num_txs": "0", -// "last_block_id": { -// "parts": { -// "hash": "3C78F00658E06744A88F24FF97A0A5011139F34A", -// "total": "1" -// }, -// "hash": "F70588DAB36BDA5A953D548A16F7D48C6C2DFD78" -// }, -// "last_commit_hash": "F31CC4282E50B3F2A58D763D233D76F26D26CABE", -// "data_hash": "", -// "validators_hash": "9365FC80F234C967BD233F5A3E2AB2F1E4B0E5AA" -// }, -// "block_id": { -// "parts": { -// "hash": "277A4DBEF91483A18B85F2F5677ABF9694DFA40F", -// "total": "1" -// }, -// "hash": "96B1D2F2D201BA4BC383EB8224139DB1294944E5" -// } -// } -// ], -// "last_height": "5493" -// }, -// "id": "", -// "jsonrpc": "2.0" -// } -// -// ``` -// -// -func BlockchainInfo(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { - // maximum 20 block metas - const limit int64 = 20 - var err error - minHeight, maxHeight, err = filterMinMax(blockStore.Height(), minHeight, maxHeight, limit) - if err != nil { - return nil, err - } - logger.Debug("BlockchainInfoHandler", "maxHeight", maxHeight, "minHeight", minHeight) - - blockMetas := []*types.BlockMeta{} - for height := maxHeight; height >= minHeight; height-- { - blockMeta := blockStore.LoadBlockMeta(height) - blockMetas = append(blockMetas, blockMeta) - } - - return &ctypes.ResultBlockchainInfo{ - LastHeight: blockStore.Height(), - BlockMetas: blockMetas, - }, nil -} - -// error if either low or high are negative or low > high -// if low is 0 it defaults to 1, if high is 0 it defaults to height (block height). -// limit sets the maximum amounts of values included within [low,high] (inclusive), -// increasing low as necessary. -func filterMinMax(height, low, high, limit int64) (int64, int64, error) { - // filter negatives - if low < 0 || high < 0 { - return low, high, fmt.Errorf("heights must be non-negative") - } - - // adjust for default values - if low == 0 { - low = 1 - } - if high == 0 { - high = height - } - - // limit high to the height - high = min(height, high) - - // limit low to within `limit` of max - // so the total number of blocks returned will be `limit` - low = max(low, high-limit+1) - - if low > high { - return low, high, fmt.Errorf("min height %d can't be greater than max height %d", low, high) - } - return low, high, nil -} - -// Get block at a given height. -// If no height is provided, it will fetch the latest block. -// -// ```shell -// curl 'localhost:26657/block?height=10' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// info, err := client.Block(10) -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// -// { -// "error": "", -// "result": { -// "block": { -// "last_commit": { -// "precommits": [ -// { -// "signature": { -// "data": "12C0D8893B8A38224488DC1DE6270DF76BB1A5E9DB1C68577706A6A97C6EC34FFD12339183D5CA8BC2F46148773823DE905B7F6F5862FD564038BB7AE03BF50D", -// "type": "ed25519" -// }, -// "block_id": { -// "parts": { -// "hash": "3C78F00658E06744A88F24FF97A0A5011139F34A", -// "total": "1" -// }, -// "hash": "F70588DAB36BDA5A953D548A16F7D48C6C2DFD78" -// }, -// "type": "2", -// "round": "0", -// "height": "9", -// "validator_index": "0", -// "validator_address": "E89A51D60F68385E09E716D353373B11F8FACD62" -// } -// ], -// "blockID": { -// "parts": { -// "hash": "3C78F00658E06744A88F24FF97A0A5011139F34A", -// "total": "1" -// }, -// "hash": "F70588DAB36BDA5A953D548A16F7D48C6C2DFD78" -// } -// }, -// "data": { -// "txs": [] -// }, -// "header": { -// "app_hash": "", -// "chain_id": "test-chain-6UTNIN", -// "height": "10", -// "time": "2017-05-29T15:05:53.877Z", -// "num_txs": "0", -// "last_block_id": { -// "parts": { -// "hash": "3C78F00658E06744A88F24FF97A0A5011139F34A", -// "total": "1" -// }, -// "hash": "F70588DAB36BDA5A953D548A16F7D48C6C2DFD78" -// }, -// "last_commit_hash": "F31CC4282E50B3F2A58D763D233D76F26D26CABE", -// "data_hash": "", -// "validators_hash": "9365FC80F234C967BD233F5A3E2AB2F1E4B0E5AA" -// } -// }, -// "block_meta": { -// "header": { -// "app_hash": "", -// "chain_id": "test-chain-6UTNIN", -// "height": "10", -// "time": "2017-05-29T15:05:53.877Z", -// "num_txs": "0", -// "last_block_id": { -// "parts": { -// "hash": "3C78F00658E06744A88F24FF97A0A5011139F34A", -// "total": "1" -// }, -// "hash": "F70588DAB36BDA5A953D548A16F7D48C6C2DFD78" -// }, -// "last_commit_hash": "F31CC4282E50B3F2A58D763D233D76F26D26CABE", -// "data_hash": "", -// "validators_hash": "9365FC80F234C967BD233F5A3E2AB2F1E4B0E5AA" -// }, -// "block_id": { -// "parts": { -// "hash": "277A4DBEF91483A18B85F2F5677ABF9694DFA40F", -// "total": "1" -// }, -// "hash": "96B1D2F2D201BA4BC383EB8224139DB1294944E5" -// } -// } -// }, -// "id": "", -// "jsonrpc": "2.0" -// } -// -// ``` -func Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlock, error) { - storeHeight := blockStore.Height() - height, err := getHeight(storeHeight, heightPtr) - if err != nil { - return nil, err - } - - blockMeta := blockStore.LoadBlockMeta(height) - block := blockStore.LoadBlock(height) - return &ctypes.ResultBlock{BlockMeta: blockMeta, Block: block}, nil -} - -// Get block commit at a given height. -// If no height is provided, it will fetch the commit for the latest block. -// -// ```shell -// curl 'localhost:26657/commit?height=11' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// info, err := client.Commit(11) -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// -// { -// "error": "", -// "result": { -// "canonical": true, -// "commit": { -// "precommits": [ -// { -// "signature": { -// "data": "00970429FEC652E9E21D106A90AE8C5413759A7488775CEF4A3F44DC46C7F9D941070E4FBE9ED54DF247FA3983359A0C3A238D61DE55C75C9116D72ABC9CF50F", -// "type": "ed25519" -// }, -// "block_id": { -// "parts": { -// "hash": "9E37CBF266BC044A779E09D81C456E653B89E006", -// "total": "1" -// }, -// "hash": "CC6E861E31CA4334E9888381B4A9137D1458AB6A" -// }, -// "type": "2", -// "round": "0", -// "height": "11", -// "validator_index": "0", -// "validator_address": "E89A51D60F68385E09E716D353373B11F8FACD62" -// } -// ], -// "blockID": { -// "parts": { -// "hash": "9E37CBF266BC044A779E09D81C456E653B89E006", -// "total": "1" -// }, -// "hash": "CC6E861E31CA4334E9888381B4A9137D1458AB6A" -// } -// }, -// "header": { -// "app_hash": "", -// "chain_id": "test-chain-6UTNIN", -// "height": "11", -// "time": "2017-05-29T15:05:54.893Z", -// "num_txs": "0", -// "last_block_id": { -// "parts": { -// "hash": "277A4DBEF91483A18B85F2F5677ABF9694DFA40F", -// "total": "1" -// }, -// "hash": "96B1D2F2D201BA4BC383EB8224139DB1294944E5" -// }, -// "last_commit_hash": "3CE0C9727CE524BA9CB7C91E28F08E2B94001087", -// "data_hash": "", -// "validators_hash": "9365FC80F234C967BD233F5A3E2AB2F1E4B0E5AA" -// } -// }, -// "id": "", -// "jsonrpc": "2.0" -// } -// -// ``` -func Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, error) { - storeHeight := blockStore.Height() - height, err := getHeight(storeHeight, heightPtr) - if err != nil { - return nil, err - } - - header := blockStore.LoadBlockMeta(height).Header - - // If the next block has not been committed yet, - // use a non-canonical commit - if height == storeHeight { - commit := blockStore.LoadSeenCommit(height) - return ctypes.NewResultCommit(&header, commit, false), nil - } - - // Return the canonical commit (comes from the block at height+1) - commit := blockStore.LoadBlockCommit(height) - return ctypes.NewResultCommit(&header, commit, true), nil -} - -// BlockResults gets ABCIResults at a given height. -// If no height is provided, it will fetch results for the latest block. -// -// Results are for the height of the block containing the txs. -// Thus response.results.deliver_tx[5] is the results of executing -// getBlock(h).Txs[5] -// -// ```shell -// curl 'localhost:26657/block_results?height=10' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// info, err := client.BlockResults(10) -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// -// { -// "jsonrpc": "2.0", -// "id": "", -// "result": { -// "height": "39", -// "results": { -// "deliver_tx": [ -// { -// "tags": [ -// { -// "key": "YXBwLmNyZWF0b3I=", -// "value": "Q29zbW9zaGkgTmV0b3dva28=" -// } -// ] -// } -// ], -// "end_block": { -// "validator_updates": null -// }, -// "begin_block": {} -// } -// } -// } -// -// ``` -func BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockResults, error) { - storeHeight := blockStore.Height() - height, err := getHeightWithMin(storeHeight, heightPtr, 0) - if err != nil { - return nil, err - } - - results, err := sm.LoadABCIResponses(stateDB, height) - if err != nil { - return nil, err - } - - res := &ctypes.ResultBlockResults{ - Height: height, - Results: results, - } - return res, nil -} - -func getHeight(currentHeight int64, heightPtr *int64) (int64, error) { - return getHeightWithMin(currentHeight, heightPtr, 1) -} - -func getHeightWithMin(currentHeight int64, heightPtr *int64, minVal int64) (int64, error) { - if heightPtr != nil { - height := *heightPtr - if height < minVal { - return 0, fmt.Errorf("height must be greater than or equal to %d", minVal) - } - if height > currentHeight { - return 0, fmt.Errorf("height must be less than or equal to the current blockchain height") - } - return height, nil - } - return currentHeight, nil -} diff --git a/tm2/pkg/bft/rpc/core/blocks/blocks.go b/tm2/pkg/bft/rpc/core/blocks/blocks.go new file mode 100644 index 00000000000..30d5178276f --- /dev/null +++ b/tm2/pkg/bft/rpc/core/blocks/blocks.go @@ -0,0 +1,230 @@ +package blocks + +import ( + "fmt" + + "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/params" + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/utils" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/metadata" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" + "github.com/gnolang/gno/tm2/pkg/bft/state" + "github.com/gnolang/gno/tm2/pkg/bft/types" + dbm "github.com/gnolang/gno/tm2/pkg/db" +) + +// Handler is the blocks RPC handler +type Handler struct { + store state.BlockStore + stateDB dbm.DB +} + +// NewHandler creates a new instance of the blocks RPC handler +func NewHandler(store state.BlockStore, stateDB dbm.DB) *Handler { + return &Handler{ + store: store, + stateDB: stateDB, + } +} + +// BlockchainInfoHandler fetches block headers for a given range. +// Block headers are returned in descending order (highest first) +// +// Params: +// - minHeight int64 (optional, default 1) +// - maxHeight int64 (optional, default latest height) +func (h *Handler) BlockchainInfoHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + const limit int64 = 20 + + const ( + idxMinHeight = 0 + idxMaxHeight = 1 + ) + + minHeight, err := params.AsInt64(p, idxMinHeight) + if err != nil { + return nil, err + } + + maxHeight, err := params.AsInt64(p, idxMaxHeight) + if err != nil { + return nil, err + } + + // Grab the latest height + storeHeight := h.store.Height() + + minHeight, maxHeight, filterErr := filterMinMax(storeHeight, minHeight, maxHeight, limit) + if filterErr != nil { + return nil, spec.GenerateResponseError(filterErr) + } + + blockMetas := make([]*types.BlockMeta, 0, maxHeight-minHeight+1) + for height := maxHeight; height >= minHeight; height-- { + blockMeta := h.store.LoadBlockMeta(height) + + if blockMeta == nil { + // This would be a huge problemo + continue + } + + blockMetas = append(blockMetas, blockMeta) + } + + return &ctypes.ResultBlockchainInfo{ + LastHeight: storeHeight, + BlockMetas: blockMetas, + }, nil +} + +// BlockHandler fetches the block at the given height. +// If no height is provided, it will fetch the latest block +// +// Params: +// - height int64 (optional, default latest height) +func (h *Handler) BlockHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + const idxHeight = 0 + + storeHeight := h.store.Height() + + height, err := params.AsInt64(p, idxHeight) + if err != nil { + return nil, err + } + + height, normalizeErr := utils.NormalizeHeight(storeHeight, height, 1) + if normalizeErr != nil { + return nil, spec.GenerateResponseError(normalizeErr) + } + + blockMeta := h.store.LoadBlockMeta(height) + if blockMeta == nil { + return nil, spec.GenerateResponseError( + fmt.Errorf("block meta not found for height %d", height), + ) + } + + block := h.store.LoadBlock(height) + if block == nil { + return nil, spec.GenerateResponseError( + fmt.Errorf("block not found for height %d", height), + ) + } + + return &ctypes.ResultBlock{ + BlockMeta: blockMeta, + Block: block, + }, nil +} + +// CommitHandler fetches the block commit for the given height. +// If no height is provided, it will fetch the commit for the latest block +// +// Params: +// - height int64 (optional, default latest height) +func (h *Handler) CommitHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + const idxHeight = 0 + + storeHeight := h.store.Height() + + height, err := params.AsInt64(p, idxHeight) + if err != nil { + return nil, err + } + + height, normalizeErr := utils.NormalizeHeight(storeHeight, height, 1) + if normalizeErr != nil { + return nil, spec.GenerateResponseError(normalizeErr) + } + + blockMeta := h.store.LoadBlockMeta(height) + if blockMeta == nil { + return nil, spec.GenerateResponseError( + fmt.Errorf("block meta not found for height %d", height), + ) + } + + header := blockMeta.Header + + if height == storeHeight { + // latest, non-canonical commit + commit := h.store.LoadSeenCommit(height) + if commit == nil { + return nil, spec.GenerateResponseError( + fmt.Errorf("seen commit not found for height %d", height), + ) + } + + return ctypes.NewResultCommit(&header, commit, false), nil + } + + // canonical commit (from height+1) + commit := h.store.LoadBlockCommit(height) + if commit == nil { + return nil, spec.GenerateResponseError( + fmt.Errorf("canonical commit not found for height %d", height), + ) + } + + return ctypes.NewResultCommit(&header, commit, true), nil +} + +// BlockResultsHandler fetches the ABCIResults for the given height. +// If no height is provided, it will fetch results for the latest block +// +// Params: +// - height int64 (optional, default latest height) +func (h *Handler) BlockResultsHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + storeHeight := h.store.Height() + + height, err := params.AsInt64(p, 0) + if err != nil { + return nil, err + } + + height, normalizeErr := utils.NormalizeHeight(storeHeight, height, 0) + if normalizeErr != nil { + return nil, spec.GenerateResponseError(normalizeErr) + } + + results, loadErr := state.LoadABCIResponses(h.stateDB, height) + if loadErr != nil { + return nil, spec.GenerateResponseError(loadErr) + } + + return &ctypes.ResultBlockResults{ + Height: height, + Results: results, + }, nil +} + +// error if either low or high are negative or low > high +// if low is 0 it defaults to 1, if high is 0 it defaults to height (block height). +// limit sets the maximum amounts of values included within [low,high] (inclusive), +// increasing low as necessary. +func filterMinMax(height, low, high, limit int64) (int64, int64, error) { + // filter negatives + if low < 0 || high < 0 { + return low, high, fmt.Errorf("heights must be non-negative") + } + + // adjust for default values + if low == 0 { + low = 1 + } + if high == 0 { + high = height + } + + // limit high to the height + high = min(height, high) + + // limit low to within `limit` of max + // so the total number of blocks returned will be `limit` + low = max(low, high-limit+1) + + if low > high { + return low, high, fmt.Errorf("min height %d can't be greater than max height %d", low, high) + } + return low, high, nil +} diff --git a/tm2/pkg/bft/rpc/core/blocks/blocks_test.go b/tm2/pkg/bft/rpc/core/blocks/blocks_test.go new file mode 100644 index 00000000000..b3a7305ac0f --- /dev/null +++ b/tm2/pkg/bft/rpc/core/blocks/blocks_test.go @@ -0,0 +1,815 @@ +package blocks + +import ( + "fmt" + "testing" + + "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/mock" + "github.com/gnolang/gno/tm2/pkg/db/memdb" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" + sm "github.com/gnolang/gno/tm2/pkg/bft/state" + "github.com/gnolang/gno/tm2/pkg/bft/types" +) + +func TestHandler_BlockchainInfoHandler(t *testing.T) { + t.Parallel() + + t.Run("Invalid min height param", func(t *testing.T) { + t.Parallel() + + var ( + store = &mock.BlockStore{} + params = []any{"foo", int64(10)} + ) + + h := NewHandler(store, nil) + + res, err := h.BlockchainInfoHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("Filter error negative heights", func(t *testing.T) { + t.Parallel() + + const storeHeight int64 = 10 + + var ( + store = &mock.BlockStore{ + HeightFn: func() int64 { + return storeHeight + }, + } + params = []any{int64(-1), int64(5)} + ) + + h := NewHandler(store, nil) + + res, err := h.BlockchainInfoHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + }) + + t.Run("Valid range default (no params)", func(t *testing.T) { + t.Parallel() + + const storeHeight int64 = 5 + + var ( + metas = map[int64]*types.BlockMeta{} + store = &mock.BlockStore{ + HeightFn: func() int64 { return storeHeight }, + LoadBlockMetaFn: func(h int64) *types.BlockMeta { + return metas[h] + }, + } + ) + + // Update meta range + for h := int64(1); h <= storeHeight; h++ { + metas[h] = &types.BlockMeta{ + Header: types.Header{Height: h}, + } + } + + h := NewHandler(store, nil) + + res, err := h.BlockchainInfoHandler(nil, nil) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultBlockchainInfo) + require.True(t, ok) + + assert.Equal(t, storeHeight, result.LastHeight) + require.Len(t, result.BlockMetas, int(storeHeight)) + + expectedHeight := storeHeight + for i := 0; i < int(storeHeight); i++ { + assert.Equal(t, expectedHeight, result.BlockMetas[i].Header.Height) + + expectedHeight-- + } + }) + + t.Run("Valid range limited to 20", func(t *testing.T) { + t.Parallel() + + const storeHeight int64 = 30 + + var ( + metas = map[int64]*types.BlockMeta{} + store = &mock.BlockStore{ + HeightFn: func() int64 { + return storeHeight + }, + LoadBlockMetaFn: func(h int64) *types.BlockMeta { + return metas[h] + }, + } + ) + + // Update the meta range + for h := int64(1); h <= storeHeight; h++ { + metas[h] = &types.BlockMeta{ + Header: types.Header{Height: h}, + } + } + + h := NewHandler(store, nil) + + res, err := h.BlockchainInfoHandler(nil, nil) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultBlockchainInfo) + require.True(t, ok) + + require.Len(t, result.BlockMetas, 20) + + expectedHeight := storeHeight + for i := 0; i < 20; i++ { + assert.Equal(t, expectedHeight, result.BlockMetas[i].Header.Height) + + expectedHeight-- + } + }) +} + +func TestHandler_BlockHandler(t *testing.T) { + t.Parallel() + + t.Run("Invalid height param", func(t *testing.T) { + t.Parallel() + + var ( + store = &mock.BlockStore{} + params = []any{"foo"} + ) + + h := NewHandler(store, nil) + + res, err := h.BlockHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("Height below minimum", func(t *testing.T) { + t.Parallel() + + const storeHeight int64 = 10 + + var ( + store = &mock.BlockStore{ + HeightFn: func() int64 { return storeHeight }, + } + params = []any{int64(-1)} + ) + + h := NewHandler(store, nil) + + res, err := h.BlockHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + }) + + t.Run("Height above latest", func(t *testing.T) { + t.Parallel() + + const storeHeight int64 = 10 + + var ( + store = &mock.BlockStore{ + HeightFn: func() int64 { return storeHeight }, + } + params = []any{storeHeight + 1} + ) + + h := NewHandler(store, nil) + + res, err := h.BlockHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + }) + + t.Run("Block meta missing", func(t *testing.T) { + t.Parallel() + + const storeHeight int64 = 10 + + var ( + store = &mock.BlockStore{ + HeightFn: func() int64 { + return storeHeight + }, + LoadBlockMetaFn: func(_ int64) *types.BlockMeta { + return nil // explicit + }, + } + params = []any{storeHeight} + ) + + h := NewHandler(store, nil) + + res, err := h.BlockHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + }) + + t.Run("Block missing", func(t *testing.T) { + t.Parallel() + + const storeHeight int64 = 10 + + var ( + store = &mock.BlockStore{ + HeightFn: func() int64 { return storeHeight }, + LoadBlockMetaFn: func(h int64) *types.BlockMeta { + if h == storeHeight { + return &types.BlockMeta{ + Header: types.Header{ + Height: h, + }, + } + } + + return nil + }, + LoadBlockFn: func(_ int64) *types.Block { + return nil // explicit + }, + } + params = []any{storeHeight} + ) + + h := NewHandler(store, nil) + + res, err := h.BlockHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + }) + + t.Run("Valid block latest by default", func(t *testing.T) { + t.Parallel() + + const storeHeight int64 = 10 + + var ( + meta = &types.BlockMeta{ + Header: types.Header{ + Height: storeHeight, + }, + } + block = &types.Block{ + Header: types.Header{ + Height: storeHeight, + }, + } + + store = &mock.BlockStore{ + HeightFn: func() int64 { return storeHeight }, + LoadBlockMetaFn: func(h int64) *types.BlockMeta { + if h == storeHeight { + return meta + } + + return nil + }, + LoadBlockFn: func(h int64) *types.Block { + if h == storeHeight { + return block + } + + return nil + }, + } + ) + + h := NewHandler(store, nil) + + res, err := h.BlockHandler(nil, nil) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultBlock) + require.True(t, ok) + + assert.Equal(t, meta, result.BlockMeta) + assert.Equal(t, block, result.Block) + }) + + t.Run("Valid block at explicit height", func(t *testing.T) { + t.Parallel() + + const ( + storeHeight int64 = 10 + blockHeight int64 = 7 + ) + + var ( + meta = &types.BlockMeta{ + Header: types.Header{ + Height: blockHeight, + }, + } + block = &types.Block{ + Header: types.Header{ + Height: blockHeight, + }, + } + + store = &mock.BlockStore{ + HeightFn: func() int64 { + return storeHeight + }, + LoadBlockMetaFn: func(h int64) *types.BlockMeta { + if h == blockHeight { + return meta + } + + return nil + }, + LoadBlockFn: func(h int64) *types.Block { + if h == blockHeight { + return block + } + + return nil + }, + } + ) + + h := NewHandler(store, nil) + + res, err := h.BlockHandler(nil, []any{blockHeight}) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultBlock) + require.True(t, ok) + + assert.Same(t, meta, result.BlockMeta) + assert.Same(t, block, result.Block) + }) +} + +func TestHandler_CommitHandler(t *testing.T) { + t.Parallel() + + t.Run("Invalid height param", func(t *testing.T) { + t.Parallel() + + var ( + store = &mock.BlockStore{} + h = NewHandler(store, nil) + params = []any{"foo"} + ) + + res, err := h.CommitHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("Height below minimum", func(t *testing.T) { + t.Parallel() + + const storeHeight int64 = 10 + + var ( + store = &mock.BlockStore{ + HeightFn: func() int64 { return storeHeight }, + } + params = []any{int64(-1)} + ) + + h := NewHandler(store, nil) + + res, err := h.CommitHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + }) + + t.Run("Block meta missing", func(t *testing.T) { + t.Parallel() + + const storeHeight int64 = 10 + + var ( + store = &mock.BlockStore{ + HeightFn: func() int64 { + return storeHeight + }, + LoadBlockMetaFn: func(_ int64) *types.BlockMeta { + return nil // explicit + }, + } + params = []any{storeHeight} + ) + + h := NewHandler(store, nil) + + res, err := h.CommitHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + }) + + t.Run("Seen commit missing at latest", func(t *testing.T) { + t.Parallel() + + const storeHeight int64 = 10 + + var ( + meta = &types.BlockMeta{ + Header: types.Header{ + Height: storeHeight, + }, + } + + store = &mock.BlockStore{ + HeightFn: func() int64 { + return storeHeight + }, + LoadBlockMetaFn: func(h int64) *types.BlockMeta { + if h == storeHeight { + return meta + } + + return nil + }, + LoadBlockCommitFn: func(_ int64) *types.Commit { + return nil // explicit + }, + } + params = []any{storeHeight} + ) + + h := NewHandler(store, nil) + + res, err := h.CommitHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + }) + + t.Run("Canonical commit missing for past height", func(t *testing.T) { + t.Parallel() + + const ( + storeHeight int64 = 10 + targetHeight int64 = 9 + ) + + var ( + meta = &types.BlockMeta{ + Header: types.Header{ + Height: storeHeight, + }, + } + + store = &mock.BlockStore{ + HeightFn: func() int64 { + return storeHeight + }, + LoadBlockMetaFn: func(h int64) *types.BlockMeta { + if h == targetHeight { + return meta + } + + return nil + }, + LoadBlockCommitFn: func(_ int64) *types.Commit { + return nil // explicit + }, + } + params = []any{targetHeight} + ) + + h := NewHandler(store, nil) + + res, err := h.CommitHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + }) + + t.Run("Non-canonical commit at latest height", func(t *testing.T) { + t.Parallel() + + const storeHeight int64 = 10 + + var ( + meta = &types.BlockMeta{ + Header: types.Header{ + Height: storeHeight, + }, + } + commit = &types.Commit{} + + store = &mock.BlockStore{ + HeightFn: func() int64 { + return storeHeight + }, + LoadBlockMetaFn: func(h int64) *types.BlockMeta { + if h == storeHeight { + return meta + } + + return nil + }, + LoadSeenCommitFn: func(h int64) *types.Commit { + if h == storeHeight { + return commit + } + + return nil + }, + } + ) + + h := NewHandler(store, nil) + + res, err := h.CommitHandler(nil, []any{storeHeight}) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultCommit) + require.True(t, ok) + + assert.False(t, result.CanonicalCommit) + }) + + t.Run("Canonical commit at past height", func(t *testing.T) { + t.Parallel() + + const ( + storeHeight int64 = 10 + targetHeight int64 = 9 + ) + + store := &mock.BlockStore{ + HeightFn: func() int64 { + return storeHeight + }, + LoadBlockMetaFn: func(h int64) *types.BlockMeta { + if h == targetHeight { + return &types.BlockMeta{ + Header: types.Header{ + Height: h, + }, + } + } + + return nil + }, + LoadBlockCommitFn: func(h int64) *types.Commit { + if h == targetHeight { + return &types.Commit{} + } + + return nil + }, + } + + h := NewHandler(store, nil) + + res, err := h.CommitHandler(nil, []any{targetHeight}) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultCommit) + require.True(t, ok) + + assert.True(t, result.CanonicalCommit) + }) +} + +func TestHandler_BlockResultsHandler(t *testing.T) { + t.Parallel() + + t.Run("Invalid height param", func(t *testing.T) { + t.Parallel() + + var ( + store = &mock.BlockStore{} + stateDB = memdb.NewMemDB() + params = []any{"foo"} + ) + + h := NewHandler(store, stateDB) + + res, err := h.BlockResultsHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("Height above latest", func(t *testing.T) { + t.Parallel() + + const storeHeight int64 = 10 + + var ( + store = &mock.BlockStore{ + HeightFn: func() int64 { return storeHeight }, + } + stateDB = memdb.NewMemDB() + params = []any{storeHeight + 1} + ) + + h := NewHandler(store, stateDB) + + res, err := h.BlockResultsHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + }) + + t.Run("ABCI response load error", func(t *testing.T) { + t.Parallel() + + const storeHeight int64 = 10 + + var ( + store = &mock.BlockStore{ + HeightFn: func() int64 { return storeHeight }, + } + stateDB = memdb.NewMemDB() + params = []any{storeHeight} + ) + + h := NewHandler(store, stateDB) + + res, err := h.BlockResultsHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + }) + + t.Run("Valid block results", func(t *testing.T) { + t.Parallel() + + const ( + storeHeight int64 = 10 + targetHeight int64 = 7 + ) + + var ( + expectedResponses = &sm.ABCIResponses{} + + store = &mock.BlockStore{ + HeightFn: func() int64 { + return storeHeight + }, + } + stateDB = memdb.NewMemDB() + ) + + h := NewHandler(store, stateDB) + + require.NotPanics(t, func() { + sm.SaveABCIResponses(stateDB, targetHeight, expectedResponses) + }) + + res, err := h.BlockResultsHandler(nil, []any{targetHeight}) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultBlockResults) + require.True(t, ok) + + assert.Equal(t, targetHeight, result.Height) + assert.NotNil(t, result.Results) + }) +} + +func TestFilterMinMax(t *testing.T) { + t.Parallel() + + t.Run("Negative heights", func(t *testing.T) { + t.Parallel() + + _, _, err := filterMinMax(10, -1, 5, 20) + require.Error(t, err) + + assert.Contains(t, err.Error(), "heights must be non-negative") + }) + + t.Run("Defaults within limit", func(t *testing.T) { + t.Parallel() + + low, high, err := filterMinMax(10, 0, 0, 20) + require.NoError(t, err) + + assert.Equal(t, int64(1), low) + assert.Equal(t, int64(10), high) + }) + + t.Run("Clamp high to current height", func(t *testing.T) { + t.Parallel() + + low, high, err := filterMinMax(10, 5, 100, 20) + require.NoError(t, err) + + assert.Equal(t, int64(5), low) + assert.Equal(t, int64(10), high) + }) + + t.Run("Limit window size", func(t *testing.T) { + t.Parallel() + + low, high, err := filterMinMax(100, 1, 100, 20) + require.NoError(t, err) + + assert.Equal(t, int64(81), low) + assert.Equal(t, int64(100), high) + }) + + t.Run("Low greater than high", func(t *testing.T) { + t.Parallel() + + low, high, err := filterMinMax(5, 10, 1, 20) + require.Error(t, err) + + assert.Greater(t, low, high) + assert.Contains(t, err.Error(), "min height") + }) +} + +func TestFilterMinMax_Legacy(t *testing.T) { + t.Parallel() + + cases := []struct { + minVal, maxVal int64 + height int64 + limit int64 + resultLength int64 + wantErr bool + }{ + // min > max + {0, 0, 0, 10, 0, true}, // min set to 1 + {0, 1, 0, 10, 0, true}, // max set to height (0) + {0, 0, 1, 10, 1, false}, // max set to height (1) + {2, 0, 1, 10, 0, true}, // max set to height (1) + {2, 1, 5, 10, 0, true}, + + // negative + {1, 10, 14, 10, 10, false}, // control + {-1, 10, 14, 10, 0, true}, + {1, -10, 14, 10, 0, true}, + {-9223372036854775808, -9223372036854775788, 100, 20, 0, true}, + + // check limit and height + {1, 1, 1, 10, 1, false}, + {1, 1, 5, 10, 1, false}, + {2, 2, 5, 10, 1, false}, + {1, 2, 5, 10, 2, false}, + {1, 5, 1, 10, 1, false}, + {1, 5, 10, 10, 5, false}, + {1, 15, 10, 10, 10, false}, + {1, 15, 15, 10, 10, false}, + {1, 15, 15, 20, 15, false}, + {1, 20, 15, 20, 15, false}, + {1, 20, 20, 20, 20, false}, + } + + for i, c := range cases { + caseString := fmt.Sprintf("test %d failed", i) + + minVal, maxVal, err := filterMinMax(c.height, c.minVal, c.maxVal, c.limit) + if c.wantErr { + require.Error(t, err, caseString) + } else { + require.NoError(t, err, caseString) + require.Equal(t, 1+maxVal-minVal, c.resultLength, caseString) + } + } +} diff --git a/tm2/pkg/bft/rpc/core/blocks_test.go b/tm2/pkg/bft/rpc/core/blocks_test.go deleted file mode 100644 index dd55784ada0..00000000000 --- a/tm2/pkg/bft/rpc/core/blocks_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package core - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestBlockchainInfo(t *testing.T) { - t.Parallel() - - cases := []struct { - minVal, maxVal int64 - height int64 - limit int64 - resultLength int64 - wantErr bool - }{ - // min > max - {0, 0, 0, 10, 0, true}, // min set to 1 - {0, 1, 0, 10, 0, true}, // max set to height (0) - {0, 0, 1, 10, 1, false}, // max set to height (1) - {2, 0, 1, 10, 0, true}, // max set to height (1) - {2, 1, 5, 10, 0, true}, - - // negative - {1, 10, 14, 10, 10, false}, // control - {-1, 10, 14, 10, 0, true}, - {1, -10, 14, 10, 0, true}, - {-9223372036854775808, -9223372036854775788, 100, 20, 0, true}, - - // check limit and height - {1, 1, 1, 10, 1, false}, - {1, 1, 5, 10, 1, false}, - {2, 2, 5, 10, 1, false}, - {1, 2, 5, 10, 2, false}, - {1, 5, 1, 10, 1, false}, - {1, 5, 10, 10, 5, false}, - {1, 15, 10, 10, 10, false}, - {1, 15, 15, 10, 10, false}, - {1, 15, 15, 20, 15, false}, - {1, 20, 15, 20, 15, false}, - {1, 20, 20, 20, 20, false}, - } - - for i, c := range cases { - caseString := fmt.Sprintf("test %d failed", i) - minVal, maxVal, err := filterMinMax(c.height, c.minVal, c.maxVal, c.limit) - if c.wantErr { - require.Error(t, err, caseString) - } else { - require.NoError(t, err, caseString) - require.Equal(t, 1+maxVal-minVal, c.resultLength, caseString) - } - } -} - -func TestGetHeight(t *testing.T) { - t.Parallel() - - cases := []struct { - currentHeight int64 - heightPtr *int64 - minVal int64 - res int64 - wantErr bool - }{ - // height >= min - {42, int64Ptr(0), 0, 0, false}, - {42, int64Ptr(1), 0, 1, false}, - - // height < min - {42, int64Ptr(0), 1, 0, true}, - - // nil height - {42, nil, 1, 42, false}, - } - - for i, c := range cases { - caseString := fmt.Sprintf("test %d failed", i) - res, err := getHeightWithMin(c.currentHeight, c.heightPtr, c.minVal) - if c.wantErr { - require.Error(t, err, caseString) - } else { - require.NoError(t, err, caseString) - require.Equal(t, res, c.res, caseString) - } - } -} - -func int64Ptr(v int64) *int64 { - return &v -} diff --git a/tm2/pkg/bft/rpc/core/consensus.go b/tm2/pkg/bft/rpc/core/consensus.go deleted file mode 100644 index e87e6b526d3..00000000000 --- a/tm2/pkg/bft/rpc/core/consensus.go +++ /dev/null @@ -1,360 +0,0 @@ -package core - -import ( - cstypes "github.com/gnolang/gno/tm2/pkg/bft/consensus/types" - ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" - rpctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" - sm "github.com/gnolang/gno/tm2/pkg/bft/state" - "github.com/gnolang/gno/tm2/pkg/bft/types" -) - -// Get the validator set at the given block height. -// If no height is provided, it will fetch the current validator set. -// Note the validators are sorted by their address - this is the canonical -// order for the validators in the set as used in computing their Merkle root. -// -// ```shell -// curl 'localhost:26657/validators' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// state, err := client.Validators() -// ``` -// -// The above command returns JSON structured like this: -// -// ```json -// -// { -// "error": "", -// "result": { -// "validators": [ -// { -// "proposer_priority": "0", -// "voting_power": "10", -// "pub_key": { -// "data": "68DFDA7E50F82946E7E8546BED37944A422CD1B831E70DF66BA3B8430593944D", -// "type": "ed25519" -// }, -// "address": "E89A51D60F68385E09E716D353373B11F8FACD62" -// } -// ], -// "block_height": "5241" -// }, -// "id": "", -// "jsonrpc": "2.0" -// } -// -// ``` -func Validators(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultValidators, error) { - // The latest validator that we know is the - // NextValidator of the last block. - height := consensusState.GetState().LastBlockHeight + 1 - height, err := getHeight(height, heightPtr) - if err != nil { - return nil, err - } - - validators, err := sm.LoadValidators(stateDB, height) - if err != nil { - return nil, err - } - return &ctypes.ResultValidators{ - BlockHeight: height, - Validators: validators.Validators, - }, nil -} - -// DumpConsensusState dumps consensus state. -// UNSTABLE -// -// ```shell -// curl 'localhost:26657/dump_consensus_state' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// state, err := client.DumpConsensusState() -// ``` -// -// The above command returns JSON structured like this: -// -// ```json -// -// { -// "jsonrpc": "2.0", -// "id": "", -// "result": { -// "round_state": { -// "height": "7185", -// "round": "0", -// "step": "1", -// "start_time": "2018-05-12T13:57:28.440293621-07:00", -// "commit_time": "2018-05-12T13:57:27.440293621-07:00", -// "validators": { -// "validators": [ -// { -// "address": "B5B3D40BE53982AD294EF99FF5A34C0C3E5A3244", -// "pub_key": { -// "type": "tendermint/PubKeyEd25519", -// "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg=" -// }, -// "voting_power": "10", -// "proposer_priority": "0" -// } -// ], -// "proposer": { -// "address": "B5B3D40BE53982AD294EF99FF5A34C0C3E5A3244", -// "pub_key": { -// "type": "tendermint/PubKeyEd25519", -// "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg=" -// }, -// "voting_power": "10", -// "proposer_priority": "0" -// } -// }, -// "proposal": null, -// "proposal_block": null, -// "proposal_block_parts": null, -// "locked_round": "0", -// "locked_block": null, -// "locked_block_parts": null, -// "valid_round": "0", -// "valid_block": null, -// "valid_block_parts": null, -// "votes": [ -// { -// "round": "0", -// "prevotes": "_", -// "precommits": "_" -// } -// ], -// "commit_round": "-1", -// "last_commit": { -// "votes": [ -// "Vote{0:B5B3D40BE539 7184/00/2(Precommit) 14F946FA7EF0 /702B1B1A602A.../ @ 2018-05-12T20:57:27.342Z}" -// ], -// "votes_bit_array": "x", -// "peer_maj_23s": {} -// }, -// "last_validators": { -// "validators": [ -// { -// "address": "B5B3D40BE53982AD294EF99FF5A34C0C3E5A3244", -// "pub_key": { -// "type": "tendermint/PubKeyEd25519", -// "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg=" -// }, -// "voting_power": "10", -// "proposer_priority": "0" -// } -// ], -// "proposer": { -// "address": "B5B3D40BE53982AD294EF99FF5A34C0C3E5A3244", -// "pub_key": { -// "type": "tendermint/PubKeyEd25519", -// "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg=" -// }, -// "voting_power": "10", -// "proposer_priority": "0" -// } -// } -// }, -// "peers": [ -// { -// "node_address": "30ad1854af22506383c3f0e57fb3c7f90984c5e8@172.16.63.221:26656", -// "peer_state": { -// "round_state": { -// "height": "7185", -// "round": "0", -// "step": "1", -// "start_time": "2018-05-12T13:57:27.438039872-07:00", -// "proposal": false, -// "proposal_block_parts_header": { -// "total": "0", -// "hash": "" -// }, -// "proposal_block_parts": null, -// "proposal_pol_round": "-1", -// "proposal_pol": "_", -// "prevotes": "_", -// "precommits": "_", -// "last_commit_round": "0", -// "last_commit": "x", -// "catchup_commit_round": "-1", -// "catchup_commit": "_" -// }, -// "stats": { -// "last_vote_height": "7184", -// "votes": "255", -// "last_block_part_height": "7184", -// "block_parts": "255" -// } -// } -// } -// ] -// } -// } -// -// ``` -func DumpConsensusState(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState, error) { - // Get Peer consensus states. - peers := p2pPeers.Peers().List() - peerStates := make([]ctypes.PeerStateInfo, len(peers)) - for i, peer := range peers { - peerState, ok := peer.Get(types.PeerStateKey).(interface { - GetExposed() cstypes.PeerStateExposed - }) - if !ok { // peer does not have a state yet - continue - } - peerStateJSON, err := peerState.GetExposed().ToJSON() - if err != nil { - return nil, err - } - peerStates[i] = ctypes.PeerStateInfo{ - // Peer basic info. - NodeAddress: peer.SocketAddr().String(), - // Peer consensus state. - PeerState: peerStateJSON, - } - } - // Get self round state. - config := consensusState.GetConfigDeepCopy() - roundState := consensusState.GetRoundStateDeepCopy() - return &ctypes.ResultDumpConsensusState{ - Config: config, - RoundState: roundState, - Peers: peerStates, - }, nil -} - -// ConsensusState returns a concise summary of the consensus state. -// UNSTABLE -// -// ```shell -// curl 'localhost:26657/consensus_state' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// state, err := client.ConsensusState() -// ``` -// -// The above command returns JSON structured like this: -// -// ```json -// -// { -// "jsonrpc": "2.0", -// "id": "", -// "result": { -// "round_state": { -// "height/round/step": "9336/0/1", -// "start_time": "2018-05-14T10:25:45.72595357-04:00", -// "proposal_block_hash": "", -// "locked_block_hash": "", -// "valid_block_hash": "", -// "height_vote_set": [ -// { -// "round": "0", -// "prevotes": [ -// "nil-Vote" -// ], -// "prevotes_bit_array": "BA{1:_} 0/10 = 0.00", -// "precommits": [ -// "nil-Vote" -// ], -// "precommits_bit_array": "BA{1:_} 0/10 = 0.00" -// } -// ] -// } -// } -// } -// -// ``` -func ConsensusState(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error) { - // Get self round state. - rs := consensusState.GetRoundStateSimple() - return &ctypes.ResultConsensusState{RoundState: rs}, nil -} - -// Get the consensus parameters at the given block height. -// If no height is provided, it will fetch the current consensus params. -// -// ```shell -// curl 'localhost:26657/consensus_params' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// state, err := client.ConsensusParams() -// ``` -// -// The above command returns JSON structured like this: -// -// ```json -// -// { -// "jsonrpc": "2.0", -// "id": "", -// "result": { -// "block_height": "1", -// "consensus_params": { -// "block_size_params": { -// "max_txs_bytes": "22020096", -// "max_gas": "-1" -// }, -// "evidence_params": { -// "max_age": "100000" -// } -// } -// } -// } -// -// ``` -func ConsensusParams(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultConsensusParams, error) { - height := consensusState.GetState().LastBlockHeight + 1 - height, err := getHeight(height, heightPtr) - if err != nil { - return nil, err - } - - consensusparams, err := sm.LoadConsensusParams(stateDB, height) - if err != nil { - return nil, err - } - return &ctypes.ResultConsensusParams{ - BlockHeight: height, - ConsensusParams: consensusparams, - }, nil -} diff --git a/tm2/pkg/bft/rpc/core/consensus/consensus.go b/tm2/pkg/bft/rpc/core/consensus/consensus.go new file mode 100644 index 00000000000..2b46d1a3874 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/consensus/consensus.go @@ -0,0 +1,150 @@ +package consensus + +import ( + cstypes "github.com/gnolang/gno/tm2/pkg/bft/consensus/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/params" + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/utils" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/metadata" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" + sm "github.com/gnolang/gno/tm2/pkg/bft/state" + "github.com/gnolang/gno/tm2/pkg/bft/types" + dbm "github.com/gnolang/gno/tm2/pkg/db" +) + +// Handler is the consensus RPC handler +type Handler struct { + consensusState Consensus + stateDB dbm.DB + peers ctypes.Peers +} + +// NewHandler creates a new instance of the consensus RPC handler +func NewHandler(consensusState Consensus, stateDB dbm.DB, peers ctypes.Peers) *Handler { + return &Handler{ + consensusState: consensusState, + stateDB: stateDB, + peers: peers, + } +} + +// ValidatorsHandler returns the validator set at the given height. +// If no height is provided, it will fetch the current validator set. +// Note the validators are sorted by their address - this is the canonical +// order for the validators in the set as used in computing their Merkle root +// +// Params: +// - height int64 (optional, default latest height) +func (h *Handler) ValidatorsHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + const idxHeight = 0 + + heightVal, err := params.AsInt64(p, idxHeight) + if err != nil { + return nil, err + } + + latest := h.consensusState.GetState().LastBlockHeight + 1 + + height, normErr := utils.NormalizeHeight(latest, heightVal, 1) + if normErr != nil { + return nil, spec.GenerateResponseError(normErr) + } + + validators, loadErr := sm.LoadValidators(h.stateDB, height) + if loadErr != nil { + return nil, spec.GenerateResponseError(loadErr) + } + + return &ctypes.ResultValidators{ + BlockHeight: height, + Validators: validators.Validators, + }, nil +} + +// DumpConsensusStateHandler dumps the full consensus state (UNSTABLE) +// +// No params +func (h *Handler) DumpConsensusStateHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + if len(p) > 0 { + return nil, spec.GenerateInvalidParamError(1) + } + + var ( + peers = h.peers.Peers().List() + peerStates = make([]ctypes.PeerStateInfo, len(peers)) + ) + + for i, peer := range peers { + ps, ok := peer.Get(types.PeerStateKey).(interface { + GetExposed() cstypes.PeerStateExposed + }) + + if !ok { + continue + } + + psJSON, err := ps.GetExposed().ToJSON() + if err != nil { + return nil, spec.GenerateResponseError(err) + } + + peerStates[i] = ctypes.PeerStateInfo{ + NodeAddress: peer.SocketAddr().String(), + PeerState: psJSON, + } + } + + var ( + config = h.consensusState.GetConfigDeepCopy() + roundState = h.consensusState.GetRoundStateDeepCopy() + ) + + return &ctypes.ResultDumpConsensusState{ + Config: config, + RoundState: roundState, + Peers: peerStates, + }, nil +} + +// ConsensusStateHandler returns a concise summary of the consensus state (UNSTABLE) +// +// No params +func (h *Handler) ConsensusStateHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + if len(p) > 0 { + return nil, spec.GenerateInvalidParamError(1) + } + + return &ctypes.ResultConsensusState{ + RoundState: h.consensusState.GetRoundStateSimple(), + }, nil +} + +// ConsensusParamsHandler returns consensus params at a given height. +// +// Params: +// - height int64 (optional, default latest height) +func (h *Handler) ConsensusParamsHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + const idxHeight = 0 + + heightVal, err := params.AsInt64(p, idxHeight) + if err != nil { + return nil, err + } + + latest := h.consensusState.GetState().LastBlockHeight + 1 + + height, normErr := utils.NormalizeHeight(latest, heightVal, 1) + if normErr != nil { + return nil, spec.GenerateResponseError(normErr) + } + + consensusParams, loadErr := sm.LoadConsensusParams(h.stateDB, height) + if loadErr != nil { + return nil, spec.GenerateResponseError(loadErr) + } + + return &ctypes.ResultConsensusParams{ + BlockHeight: height, + ConsensusParams: consensusParams, + }, nil +} diff --git a/tm2/pkg/bft/rpc/core/consensus/consensus_test.go b/tm2/pkg/bft/rpc/core/consensus/consensus_test.go new file mode 100644 index 00000000000..9176661fedf --- /dev/null +++ b/tm2/pkg/bft/rpc/core/consensus/consensus_test.go @@ -0,0 +1,350 @@ +package consensus + +import ( + "testing" + + abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/mock" + "github.com/gnolang/gno/tm2/pkg/db/memdb" + "github.com/gnolang/gno/tm2/pkg/p2p" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + cnscfg "github.com/gnolang/gno/tm2/pkg/bft/consensus/config" + cstypes "github.com/gnolang/gno/tm2/pkg/bft/consensus/types" + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" + sm "github.com/gnolang/gno/tm2/pkg/bft/state" + "github.com/gnolang/gno/tm2/pkg/bft/types" +) + +func TestHandler_ValidatorsHandler(t *testing.T) { + t.Parallel() + + t.Run("Invalid height param", func(t *testing.T) { + t.Parallel() + + var ( + db = memdb.NewMemDB() + mockConsensus = &mockConsensus{ + getStateFn: func() sm.State { + return sm.State{ + LastBlockHeight: 10, + } + }, + } + params = []any{"not-an-int"} + ) + + h := NewHandler(mockConsensus, db, &mock.Peers{}) + + res, err := h.ValidatorsHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("Height below minimum", func(t *testing.T) { + t.Parallel() + + var ( + db = memdb.NewMemDB() + mockConsensus = &mockConsensus{ + getStateFn: func() sm.State { + return sm.State{ + LastBlockHeight: 10, + } + }, + } + params = []any{int64(-1)} + ) + + h := NewHandler(mockConsensus, db, &mock.Peers{}) + + res, err := h.ValidatorsHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + }) + + t.Run("Validators not found", func(t *testing.T) { + t.Parallel() + + var ( + db = memdb.NewMemDB() + mockConsensus = &mockConsensus{ + getStateFn: func() sm.State { + return sm.State{ + LastBlockHeight: 0, + } + }, + } + ) + + h := NewHandler(mockConsensus, db, &mock.Peers{}) + + res, err := h.ValidatorsHandler(nil, nil) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + }) + + t.Run("Valid default height", func(t *testing.T) { + t.Parallel() + + var ( + db = memdb.NewMemDB() + + valSet = &types.ValidatorSet{} + consensusParams = abci.ConsensusParams{} + + st = sm.State{ + LastBlockHeight: 0, + Validators: valSet, + NextValidators: valSet, + LastHeightValidatorsChanged: 1, + ConsensusParams: consensusParams, + LastHeightConsensusParamsChanged: 1, + } + + mockConsensus = &mockConsensus{ + getStateFn: func() sm.State { + return st + }, + } + ) + + // Seed the state + sm.SaveState(db, st) + + h := NewHandler(mockConsensus, db, &mock.Peers{}) + + res, err := h.ValidatorsHandler(nil, nil) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultValidators) + require.True(t, ok) + + assert.Equal(t, int64(1), result.BlockHeight) + assert.Equal(t, valSet.Validators, result.Validators) + }) +} + +func TestHandler_DumpConsensusStateHandler(t *testing.T) { + t.Parallel() + + t.Run("Unexpected params", func(t *testing.T) { + t.Parallel() + + h := NewHandler(nil, nil, nil) + + res, err := h.DumpConsensusStateHandler(nil, []any{"extra"}) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("Valid dump", func(t *testing.T) { + t.Parallel() + + var ( + cfg = &cnscfg.ConsensusConfig{} + rs = &cstypes.RoundState{} + + mockConsensus = &mockConsensus{ + getConfigDeepCopyFn: func() *cnscfg.ConsensusConfig { + return cfg + }, + getRoundStateDeepCopyFn: func() *cstypes.RoundState { + return rs + }, + } + + mockPeers = &mock.Peers{ + PeersFn: func() p2p.PeerSet { + return &mock.PeerSet{} + }, + } + ) + + h := NewHandler(mockConsensus, nil, mockPeers) + + res, err := h.DumpConsensusStateHandler(nil, nil) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultDumpConsensusState) + require.True(t, ok) + + assert.Same(t, cfg, result.Config) + assert.Same(t, rs, result.RoundState) + assert.Len(t, result.Peers, 0) + }) +} + +func TestHandler_ConsensusStateHandler(t *testing.T) { + t.Parallel() + + t.Run("Unexpected params", func(t *testing.T) { + t.Parallel() + + h := NewHandler( + &mockConsensus{}, + memdb.NewMemDB(), + &mock.Peers{}, + ) + + res, err := h.ConsensusStateHandler(nil, []any{"extra"}) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("Valid simple round state", func(t *testing.T) { + t.Parallel() + + var ( + simple = cstypes.RoundStateSimple{ + HeightRoundStep: "10/0/0", + } + + mockConsensus = &mockConsensus{ + getRoundStateSimpleFn: func() cstypes.RoundStateSimple { + return simple + }, + } + ) + + h := NewHandler(mockConsensus, memdb.NewMemDB(), &mock.Peers{}) + + res, err := h.ConsensusStateHandler(nil, nil) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultConsensusState) + require.True(t, ok) + + assert.Equal(t, simple, result.RoundState) + }) +} + +func TestHandler_ConsensusParamsHandler(t *testing.T) { + t.Parallel() + + t.Run("Invalid height param", func(t *testing.T) { + t.Parallel() + + var ( + mockConsensus = &mockConsensus{ + getStateFn: func() sm.State { + return sm.State{ + LastBlockHeight: 10, + } + }, + } + db = memdb.NewMemDB() + params = []any{"not-an-int"} + ) + + h := NewHandler(mockConsensus, db, &mock.Peers{}) + + res, err := h.ConsensusParamsHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("Height below minimum", func(t *testing.T) { + t.Parallel() + + var ( + mockConsensus = &mockConsensus{ + getStateFn: func() sm.State { + return sm.State{ + LastBlockHeight: 10, + } + }, + } + + db = memdb.NewMemDB() + params = []any{int64(-1)} + ) + + h := NewHandler(mockConsensus, db, &mock.Peers{}) + + res, err := h.ConsensusParamsHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + }) + + t.Run("Consensus params not found", func(t *testing.T) { + t.Parallel() + + var ( + mockConsensus = &mockConsensus{ + getStateFn: func() sm.State { + return sm.State{ + LastBlockHeight: 0, + } + }, + } + + db = memdb.NewMemDB() + ) + + h := NewHandler(mockConsensus, db, &mock.Peers{}) + + res, err := h.ConsensusParamsHandler(nil, nil) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + }) + + t.Run("Valid latest height", func(t *testing.T) { + t.Parallel() + + var ( + db = memdb.NewMemDB() + consensusParams = abci.ConsensusParams{} + + st = sm.State{ + LastBlockHeight: 0, + Validators: &types.ValidatorSet{}, + NextValidators: &types.ValidatorSet{}, + LastHeightValidatorsChanged: 1, + ConsensusParams: consensusParams, + LastHeightConsensusParamsChanged: 1, + } + + mockConsensus = &mockConsensus{ + getStateFn: func() sm.State { + return st + }, + } + ) + + sm.SaveState(db, st) + + h := NewHandler(mockConsensus, db, &mock.Peers{}) + + res, err := h.ConsensusParamsHandler(nil, nil) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultConsensusParams) + require.True(t, ok) + + assert.Equal(t, int64(1), result.BlockHeight) + assert.Equal(t, consensusParams, result.ConsensusParams) + }) +} diff --git a/tm2/pkg/bft/rpc/core/consensus/mock_test.go b/tm2/pkg/bft/rpc/core/consensus/mock_test.go new file mode 100644 index 00000000000..ce3c5545f08 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/consensus/mock_test.go @@ -0,0 +1,53 @@ +package consensus + +import ( + cnscfg "github.com/gnolang/gno/tm2/pkg/bft/consensus/config" + cstypes "github.com/gnolang/gno/tm2/pkg/bft/consensus/types" + sm "github.com/gnolang/gno/tm2/pkg/bft/state" +) + +type ( + getConfigDeepCopyDelegate func() *cnscfg.ConsensusConfig + getStateDelegate func() sm.State + getRoundStateDeepCopyDelegate func() *cstypes.RoundState + getRoundStateSimpleDelegate func() cstypes.RoundStateSimple +) + +type mockConsensus struct { + getConfigDeepCopyFn getConfigDeepCopyDelegate + getStateFn getStateDelegate + getRoundStateDeepCopyFn getRoundStateDeepCopyDelegate + getRoundStateSimpleFn getRoundStateSimpleDelegate +} + +func (m *mockConsensus) GetConfigDeepCopy() *cnscfg.ConsensusConfig { + if m.getConfigDeepCopyFn != nil { + return m.getConfigDeepCopyFn() + } + + return nil +} + +func (m *mockConsensus) GetState() sm.State { + if m.getStateFn != nil { + return m.getStateFn() + } + + return sm.State{} +} + +func (m *mockConsensus) GetRoundStateDeepCopy() *cstypes.RoundState { + if m.getRoundStateDeepCopyFn != nil { + return m.getRoundStateDeepCopyFn() + } + + return nil +} + +func (m *mockConsensus) GetRoundStateSimple() cstypes.RoundStateSimple { + if m.getRoundStateSimpleFn != nil { + return m.getRoundStateSimpleFn() + } + + return cstypes.RoundStateSimple{} +} diff --git a/tm2/pkg/bft/rpc/core/consensus/types.go b/tm2/pkg/bft/rpc/core/consensus/types.go new file mode 100644 index 00000000000..344467ebd52 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/consensus/types.go @@ -0,0 +1,22 @@ +package consensus + +import ( + cnscfg "github.com/gnolang/gno/tm2/pkg/bft/consensus/config" + cstypes "github.com/gnolang/gno/tm2/pkg/bft/consensus/types" + sm "github.com/gnolang/gno/tm2/pkg/bft/state" +) + +// Consensus exposes read-only access to consensus state for RPC handlers +type Consensus interface { + // GetConfigDeepCopy returns a deep copy of the current consensus config + GetConfigDeepCopy() *cnscfg.ConsensusConfig + + // GetState returns a snapshot of the current consensus state + GetState() sm.State + + // GetRoundStateDeepCopy returns a deep copy of the full round state + GetRoundStateDeepCopy() *cstypes.RoundState + + // GetRoundStateSimple returns a concise summary of the round state + GetRoundStateSimple() cstypes.RoundStateSimple +} diff --git a/tm2/pkg/bft/rpc/core/dev.go b/tm2/pkg/bft/rpc/core/dev.go deleted file mode 100644 index a11c931b123..00000000000 --- a/tm2/pkg/bft/rpc/core/dev.go +++ /dev/null @@ -1,56 +0,0 @@ -package core - -import ( - "os" - "runtime/pprof" - - ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" - rpctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" -) - -// UnsafeFlushMempool removes all transactions from the mempool. -func UnsafeFlushMempool(ctx *rpctypes.Context) (*ctypes.ResultUnsafeFlushMempool, error) { - mempool.Flush() - return &ctypes.ResultUnsafeFlushMempool{}, nil -} - -var profFile *os.File - -// UnsafeStartCPUProfiler starts a pprof profiler using the given filename. -func UnsafeStartCPUProfiler(ctx *rpctypes.Context, filename string) (*ctypes.ResultUnsafeProfile, error) { - var err error - profFile, err = os.Create(filename) - if err != nil { - return nil, err - } - err = pprof.StartCPUProfile(profFile) - if err != nil { - return nil, err - } - return &ctypes.ResultUnsafeProfile{}, nil -} - -// UnsafeStopCPUProfiler stops the running pprof profiler. -func UnsafeStopCPUProfiler(ctx *rpctypes.Context) (*ctypes.ResultUnsafeProfile, error) { - pprof.StopCPUProfile() - if err := profFile.Close(); err != nil { - return nil, err - } - return &ctypes.ResultUnsafeProfile{}, nil -} - -// UnsafeWriteHeapProfile dumps a heap profile to the given filename. -func UnsafeWriteHeapProfile(ctx *rpctypes.Context, filename string) (*ctypes.ResultUnsafeProfile, error) { - memProfFile, err := os.Create(filename) - if err != nil { - return nil, err - } - if err := pprof.WriteHeapProfile(memProfFile); err != nil { - return nil, err - } - if err := memProfFile.Close(); err != nil { - return nil, err - } - - return &ctypes.ResultUnsafeProfile{}, nil -} diff --git a/tm2/pkg/bft/rpc/core/doc.go b/tm2/pkg/bft/rpc/core/doc.go deleted file mode 100644 index 2cdbe51fbb1..00000000000 --- a/tm2/pkg/bft/rpc/core/doc.go +++ /dev/null @@ -1,112 +0,0 @@ -/* -# Introduction - -Tendermint supports the following RPC protocols: - -* URI over HTTP -* JSONRPC over HTTP -* JSONRPC over websockets - -Tendermint RPC is built using our own RPC library which contains its own set of documentation and tests. -See it here: https://github.com/gnolang/gno/tm2/pkg/bft/tree/master/rpc/lib - -## Configuration - -RPC can be configured by tuning parameters under `[rpc]` table in the `$TMHOME/config/config.toml` file or by using the `--rpc.X` command-line flags. - -Default rpc listen address is `tcp://0.0.0.0:26657`. To set another address, set the `laddr` config parameter to desired value. -CORS (Cross-Origin Resource Sharing) can be enabled by setting `cors_allowed_origins`, `cors_allowed_methods`, `cors_allowed_headers` config parameters. - -## Arguments - -Arguments which expect strings or byte arrays may be passed as quoted strings, like `"abc"` or as `0x`-prefixed strings, like `0x616263`. - -## URI/HTTP - -```bash -curl 'localhost:26657/broadcast_tx_sync?tx="abc"' -``` - -> Response: - -```json - - { - "error": "", - "result": { - "hash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF", - "log": "", - "data": "", - "code": "0" - }, - "id": "", - "jsonrpc": "2.0" - } - -``` - -## JSONRPC/HTTP - -JSONRPC requests can be POST'd to the root RPC endpoint via HTTP (e.g. `http://localhost:26657/`). - -```json - - { - "method": "broadcast_tx_sync", - "jsonrpc": "2.0", - "params": [ "abc" ], - "id": "dontcare" - } - -``` - -## JSONRPC/websockets - -JSONRPC requests can be made via websocket. The websocket endpoint is at `/websocket`, e.g. `localhost:26657/websocket`. - -## More Examples - -See the various bash tests using curl in `test/`, and examples using the `Go` API in `rpc/client/`. - -## Get the list - -An HTTP Get request to the root RPC endpoint shows a list of available endpoints. - -```bash -curl 'localhost:26657' -``` - -> Response: - -```plain -Available endpoints: -/abci_info -/dump_consensus_state -/genesis -/net_info -/num_unconfirmed_txs -/status -/health -/unconfirmed_txs -/unsafe_flush_mempool -/unsafe_stop_cpu_profiler -/validators - -Endpoints that require arguments: -/abci_query?path=_&data=_&prove=_ -/block?height=_ -/blockchain?minHeight=_&maxHeight=_ -/broadcast_tx_async?tx=_ -/broadcast_tx_commit?tx=_ -/broadcast_tx_sync?tx=_ -/commit?height=_ -/dial_seeds?seeds=_ -/dial_persistent_peers?persistent_peers=_ -/tx?hash=_&prove=_ -/unsafe_start_cpu_profiler?filename=_ -/unsafe_write_heap_profile?filename=_ -``` - -# Endpoints -*/ -package core diff --git a/tm2/pkg/bft/rpc/core/doc_template.txt b/tm2/pkg/bft/rpc/core/doc_template.txt deleted file mode 100644 index 896d0c271f9..00000000000 --- a/tm2/pkg/bft/rpc/core/doc_template.txt +++ /dev/null @@ -1,8 +0,0 @@ -{{with .PDoc}} -{{comment_md .Doc}} -{{example_html $ ""}} - -{{range .Funcs}}{{$name_html := html .Name}}## [{{$name_html}}]({{posLink_url $ .Decl}}) -{{comment_md .Doc}}{{end}} -{{end}} ---- diff --git a/tm2/pkg/bft/rpc/core/health.go b/tm2/pkg/bft/rpc/core/health.go deleted file mode 100644 index f036ba9b896..00000000000 --- a/tm2/pkg/bft/rpc/core/health.go +++ /dev/null @@ -1,41 +0,0 @@ -package core - -import ( - ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" - rpctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" -) - -// Get node health. Returns empty result (200 OK) on success, no response - in -// case of an error. -// -// ```shell -// curl 'localhost:26657/health' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// result, err := client.Health() -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// -// { -// "error": "", -// "result": {}, -// "id": "", -// "jsonrpc": "2.0" -// } -// -// ``` -func Health(ctx *rpctypes.Context) (*ctypes.ResultHealth, error) { - return &ctypes.ResultHealth{}, nil -} diff --git a/tm2/pkg/bft/rpc/core/health/health.go b/tm2/pkg/bft/rpc/core/health/health.go new file mode 100644 index 00000000000..b26fcb549e0 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/health/health.go @@ -0,0 +1,19 @@ +package health + +import ( + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/metadata" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" +) + +// HealthHandler fetches the node health. +// Returns empty result (200 OK) on success, no response - in case of an error +// +// No params +func HealthHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + if len(p) > 0 { + return nil, spec.GenerateInvalidParamError(1) + } + + return &ctypes.ResultHealth{}, nil +} diff --git a/tm2/pkg/bft/rpc/core/health/health_test.go b/tm2/pkg/bft/rpc/core/health/health_test.go new file mode 100644 index 00000000000..628b79a3b12 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/health/health_test.go @@ -0,0 +1,37 @@ +package health + +import ( + "testing" + + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestHandler_Health(t *testing.T) { + t.Parallel() + + t.Run("Unexpected params", func(t *testing.T) { + t.Parallel() + + res, err := HealthHandler(nil, []any{"extra"}) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("Valid health status", func(t *testing.T) { + t.Parallel() + + res, err := HealthHandler(nil, nil) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultHealth) + require.True(t, ok) + + assert.Equal(t, &ctypes.ResultHealth{}, result) + }) +} diff --git a/tm2/pkg/bft/rpc/core/mempool.go b/tm2/pkg/bft/rpc/core/mempool.go deleted file mode 100644 index ba3750574ce..00000000000 --- a/tm2/pkg/bft/rpc/core/mempool.go +++ /dev/null @@ -1,464 +0,0 @@ -package core - -import ( - "fmt" - "sync" - "time" - - abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types" - ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" - rpctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" - "github.com/gnolang/gno/tm2/pkg/bft/types" - "github.com/gnolang/gno/tm2/pkg/errors" - "github.com/gnolang/gno/tm2/pkg/events" - "github.com/gnolang/gno/tm2/pkg/random" - "github.com/gnolang/gno/tm2/pkg/service" -) - -// ----------------------------------------------------------------------------- -// NOTE: tx should be signed, but this is only checked at the app level (not by Tendermint!) - -// Returns right away, with no response. Does not wait for CheckTx nor -// DeliverTx results. -// -// If you want to be sure that the transaction is included in a block, you can -// subscribe for the result using JSONRPC via a websocket. See -// https://tendermint.com/docs/app-dev/subscribing-to-events-via-websocket.html -// If you haven't received anything after a couple of blocks, resend it. If the -// same happens again, send it to some other node. A few reasons why it could -// happen: -// -// 1. malicious node can drop or pretend it had committed your tx -// 2. malicious proposer (not necessary the one you're communicating with) can -// drop transactions, which might become valid in the future -// (https://github.com/gnolang/gno/tm2/pkg/bft/issues/3322) -// 3. node can be offline -// -// Please refer to -// https://tendermint.com/docs/tendermint-core/using-tendermint.html#formatting -// for formatting/encoding rules. -// -// ```shell -// curl 'localhost:26657/broadcast_tx_async?tx="123"' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// result, err := client.BroadcastTxAsync("123") -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// -// { -// "error": "", -// "result": { -// "hash": "E39AAB7A537ABAA237831742DCE1117F187C3C52", -// "log": "", -// "data": "", -// "code": "0" -// }, -// "id": "", -// "jsonrpc": "2.0" -// } -// -// ``` -// -// ### Query Parameters -// -// | Parameter | Type | Default | Required | Description | -// |-----------+------+---------+----------+-----------------| -// | tx | Tx | nil | true | The transaction | -func BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - err := mempool.CheckTx(tx, nil) - if err != nil { - return nil, err - } - return &ctypes.ResultBroadcastTx{Hash: tx.Hash()}, nil -} - -// Returns with the response from CheckTx. Does not wait for DeliverTx result. -// -// If you want to be sure that the transaction is included in a block, you can -// subscribe for the result using JSONRPC via a websocket. See -// https://tendermint.com/docs/app-dev/subscribing-to-events-via-websocket.html -// If you haven't received anything after a couple of blocks, resend it. If the -// same happens again, send it to some other node. A few reasons why it could -// happen: -// -// 1. malicious node can drop or pretend it had committed your tx -// 2. malicious proposer (not necessary the one you're communicating with) can -// drop transactions, which might become valid in the future -// (https://github.com/gnolang/gno/tm2/pkg/bft/issues/3322) -// -// Please refer to -// https://tendermint.com/docs/tendermint-core/using-tendermint.html#formatting -// for formatting/encoding rules. -// -// ```shell -// curl 'localhost:26657/broadcast_tx_sync?tx="456"' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// result, err := client.BroadcastTxSync("456") -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// -// { -// "jsonrpc": "2.0", -// "id": "", -// "result": { -// "code": "0", -// "data": "", -// "log": "", -// "hash": "0D33F2F03A5234F38706E43004489E061AC40A2E" -// }, -// "error": "" -// } -// -// ``` -// -// ### Query Parameters -// -// | Parameter | Type | Default | Required | Description | -// |-----------+------+---------+----------+-----------------| -// | tx | Tx | nil | true | The transaction | -func BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - resCh := make(chan abci.Response, 1) - err := mempool.CheckTx(tx, func(res abci.Response) { - resCh <- res - }) - if err != nil { - return nil, err - } - res := <-resCh - r := res.(abci.ResponseCheckTx) - return &ctypes.ResultBroadcastTx{ - Error: r.Error, - Data: r.Data, - Log: r.Log, - Hash: tx.Hash(), - }, nil -} - -// Returns with the responses from CheckTx and DeliverTx. -// -// IMPORTANT: use only for testing and development. In production, use -// BroadcastTxSync or BroadcastTxAsync. You can subscribe for the transaction -// result using JSONRPC via a websocket. See -// https://tendermint.com/docs/app-dev/subscribing-to-events-via-websocket.html -// -// CONTRACT: only returns error if mempool.CheckTx() errs or if we timeout -// waiting for tx to commit. -// -// If CheckTx or DeliverTx fail, no error will be returned, but the returned result -// will contain a non-OK ABCI code. -// -// Please refer to -// https://tendermint.com/docs/tendermint-core/using-tendermint.html#formatting -// for formatting/encoding rules. -// -// ```shell -// curl 'localhost:26657/broadcast_tx_commit?tx="789"' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// result, err := client.BroadcastTxCommit("789") -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// -// { -// "error": "", -// "result": { -// "height": "26682", -// "hash": "75CA0F856A4DA078FC4911580360E70CEFB2EBEE", -// "deliver_tx": { -// "log": "", -// "data": "", -// "code": "0" -// }, -// "check_tx": { -// "log": "", -// "data": "", -// "code": "0" -// } -// }, -// "id": "", -// "jsonrpc": "2.0" -// } -// -// ``` -// -// ### Query Parameters -// -// | Parameter | Type | Default | Required | Description | -// |-----------+------+---------+----------+-----------------| -// | tx | Tx | nil | true | The transaction | -func BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { - // Broadcast tx and wait for CheckTx result - checkTxResCh := make(chan abci.Response, 1) - err := mempool.CheckTx(tx, func(res abci.Response) { - checkTxResCh <- res - }) - if err != nil { - logger.Error("Error on broadcastTxCommit", "err", err) - return nil, fmt.Errorf("error on broadcastTxCommit: %w", err) - } - checkTxResMsg := <-checkTxResCh - checkTxRes := checkTxResMsg.(abci.ResponseCheckTx) - if checkTxRes.Error != nil { - return &ctypes.ResultBroadcastTxCommit{ - CheckTx: checkTxRes, - DeliverTx: abci.ResponseDeliverTx{}, - Hash: tx.Hash(), - }, nil - } - - // Wait for the tx to be included in a block or timeout. - txRes, err := gTxDispatcher.getTxResult(tx, nil) - if err != nil { - return nil, err - } - return &ctypes.ResultBroadcastTxCommit{ - CheckTx: checkTxRes, - DeliverTx: txRes.Response, - Hash: tx.Hash(), - Height: txRes.Height, - }, nil -} - -// Get unconfirmed transactions (maximum ?limit entries) including their number. -// -// ```shell -// curl 'localhost:26657/unconfirmed_txs' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// result, err := client.UnconfirmedTxs() -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// -// { -// "result" : { -// "txs" : [], -// "total_bytes" : "0", -// "n_txs" : "0", -// "total" : "0" -// }, -// "jsonrpc" : "2.0", -// "id" : "" -// } -// -// ``` -// -// ### Query Parameters -// -// | Parameter | Type | Default | Required | Description | -// |-----------+------+---------+----------+--------------------------------------| -// | limit | int | 30 | false | Maximum number of entries (max: 100) | -// ``` -func UnconfirmedTxs(ctx *rpctypes.Context, limit int) (*ctypes.ResultUnconfirmedTxs, error) { - // reuse per_page validator - limit = validatePerPage(limit) - - txs := mempool.ReapMaxTxs(limit) - return &ctypes.ResultUnconfirmedTxs{ - Count: len(txs), - Total: mempool.Size(), - TotalBytes: mempool.TxsBytes(), - Txs: txs, - }, nil -} - -// Get number of unconfirmed transactions. -// -// ```shell -// curl 'localhost:26657/num_unconfirmed_txs' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// if err != nil { -// // handle error -// } -// defer client.Stop() -// result, err := client.UnconfirmedTxs() -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// -// { -// "jsonrpc" : "2.0", -// "id" : "", -// "result" : { -// "n_txs" : "0", -// "total_bytes" : "0", -// "total" : "0" -// "txs" : null, -// } -// } -// -// ``` -func NumUnconfirmedTxs(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, error) { - return &ctypes.ResultUnconfirmedTxs{ - Count: mempool.Size(), - Total: mempool.Size(), - TotalBytes: mempool.TxsBytes(), - }, nil -} - -// ---------------------------------------- -// txListener - -// NOTE: txDispatcher doesn't handle any throttling or resource management. -// The RPC websockets system is expected to throttle requests. -type txDispatcher struct { - service.BaseService - evsw events.EventSwitch - listenerID string - sub <-chan events.Event - - mtx sync.Mutex - waiters map[string]*txWaiter // string(types.Tx) -> *txWaiter -} - -func newTxDispatcher(evsw events.EventSwitch) *txDispatcher { - listenerID := fmt.Sprintf("txDispatcher#%v", random.RandStr(6)) - sub := events.SubscribeToEvent(evsw, listenerID, types.EventTx{}) - - td := &txDispatcher{ - evsw: evsw, - listenerID: listenerID, - sub: sub, - waiters: make(map[string]*txWaiter), - } - td.BaseService = *service.NewBaseService(nil, "txDispatcher", td) - err := td.Start() - if err != nil { - panic(err) - } - return td -} - -func (td *txDispatcher) OnStart() error { - go td.listenRoutine() - return nil -} - -func (td *txDispatcher) OnStop() { - td.evsw.RemoveListener(td.listenerID) -} - -func (td *txDispatcher) listenRoutine() { - for { - select { - case event, ok := <-td.sub: - if !ok { - td.Stop() - panic("txDispatcher subscription unexpectedly closed") - } - txEvent := event.(types.EventTx) - td.notifyTxEvent(txEvent) - case <-td.Quit(): - return - } - } -} - -func (td *txDispatcher) notifyTxEvent(txEvent types.EventTx) { - td.mtx.Lock() - defer td.mtx.Unlock() - - tx := txEvent.Result.Tx - waiter, ok := td.waiters[string(tx)] - if !ok { - return // nothing to do - } else { - waiter.txRes = txEvent.Result - close(waiter.waitCh) - } -} - -// blocking -// If the tx is already being waited on, returns the result from the original request. -// Upon result or timeout, the tx is forgotten from txDispatcher, and can be re-requested. -// If the tx times out, an error is returned. -// Quit can optionally be provided to terminate early (e.g. if the caller disconnects). -func (td *txDispatcher) getTxResult(tx types.Tx, quit chan struct{}) (types.TxResult, error) { - // Get or create waiter. - td.mtx.Lock() - waiter, ok := td.waiters[string(tx)] - if !ok { - waiter = newTxWaiter(tx) - td.waiters[string(tx)] = waiter - } - td.mtx.Unlock() - - select { - case <-waiter.waitCh: - return waiter.txRes, nil - case <-waiter.timeCh: - return types.TxResult{}, errors.New("request timeout") - case <-quit: - return types.TxResult{}, errors.New("caller quit") - } -} - -type txWaiter struct { - tx types.Tx - waitCh chan struct{} - timeCh <-chan time.Time - txRes types.TxResult -} - -func newTxWaiter(tx types.Tx) *txWaiter { - return &txWaiter{ - tx: tx, - waitCh: make(chan struct{}), - timeCh: time.After(config.TimeoutBroadcastTxCommit), - } -} diff --git a/tm2/pkg/bft/rpc/core/mempool/dispatcher.go b/tm2/pkg/bft/rpc/core/mempool/dispatcher.go new file mode 100644 index 00000000000..846fe28eb06 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/mempool/dispatcher.go @@ -0,0 +1,140 @@ +package mempool + +import ( + "errors" + "fmt" + "sync" + "time" + + "github.com/gnolang/gno/tm2/pkg/bft/types" + "github.com/gnolang/gno/tm2/pkg/events" + "github.com/gnolang/gno/tm2/pkg/random" + "github.com/gnolang/gno/tm2/pkg/service" +) + +// This code was moved over from the old Tendermint RPC implementation, and slightly cleaned up. +// If time allows, we should remove it altogether, and figure out a better mechanism for transaction waiting +type txDispatcher struct { + service.BaseService + + evsw events.EventSwitch + listenerID string + sub <-chan events.Event + + timeout time.Duration + + mtx sync.Mutex + waiters map[string]*txWaiter // string(tx) -> waiter shared by all callers +} + +func newTxDispatcher(evsw events.EventSwitch, timeout time.Duration) *txDispatcher { + listenerID := fmt.Sprintf("txDispatcher#%v", random.RandStr(6)) + sub := events.SubscribeToEvent(evsw, listenerID, types.EventTx{}) + + td := &txDispatcher{ + evsw: evsw, + listenerID: listenerID, + sub: sub, + timeout: timeout, + waiters: make(map[string]*txWaiter), + } + + td.BaseService = *service.NewBaseService(nil, "txDispatcher", td) + + if err := td.Start(); err != nil { + panic(err) + } + + return td +} + +func (td *txDispatcher) OnStart() error { + go td.listenRoutine() + return nil +} + +func (td *txDispatcher) OnStop() { + td.evsw.RemoveListener(td.listenerID) +} + +func (td *txDispatcher) listenRoutine() { + for { + select { + case event, ok := <-td.sub: + if !ok { + td.Stop() + panic("txDispatcher subscription unexpectedly closed") + } + + txEvent := event.(types.EventTx) + td.notifyTxEvent(txEvent) + + case <-td.Quit(): + return + } + } +} + +func (td *txDispatcher) notifyTxEvent(txEvent types.EventTx) { + key := string(txEvent.Result.Tx) + + td.mtx.Lock() + waiter, ok := td.waiters[key] + if !ok { + td.mtx.Unlock() + return + } + + delete(td.waiters, key) + + waiter.res = txEvent.Result + close(waiter.done) + + td.mtx.Unlock() +} + +// getTxResult blocks until: +// - the tx result arrives from events, OR +// - the dispatcher timeout expires, OR +// - the caller's quit channel fires (if non-nil). +// +// All callers waiting on the same tx share the same waiter and get the same result +func (td *txDispatcher) getTxResult(tx types.Tx, quit chan struct{}) (types.TxResult, error) { + key := string(tx) + + td.mtx.Lock() + waiter, ok := td.waiters[key] + if !ok { + waiter = newTxWaiter() + td.waiters[key] = waiter + } + td.mtx.Unlock() + + timeout := time.After(td.timeout) + + select { + case <-waiter.done: + return waiter.res, nil + + case <-timeout: + td.mtx.Lock() + delete(td.waiters, key) + td.mtx.Unlock() + + return types.TxResult{}, errors.New("request timeout") + + case <-quit: + return types.TxResult{}, errors.New("caller quit") + } +} + +type txWaiter struct { + done chan struct{} + res types.TxResult +} + +func newTxWaiter() *txWaiter { + return &txWaiter{ + done: make(chan struct{}), + } +} diff --git a/tm2/pkg/bft/rpc/core/mempool/mempool.go b/tm2/pkg/bft/rpc/core/mempool/mempool.go new file mode 100644 index 00000000000..a834346ac0d --- /dev/null +++ b/tm2/pkg/bft/rpc/core/mempool/mempool.go @@ -0,0 +1,176 @@ +package mempool + +import ( + "fmt" + "time" + + abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types" + coreparams "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/params" + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/utils" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/metadata" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" + "github.com/gnolang/gno/tm2/pkg/bft/types" + "github.com/gnolang/gno/tm2/pkg/events" +) + +// Handler is the mempool RPC handler +type Handler struct { + mempool Mempool + dispatcher *txDispatcher +} + +// NewHandler creates a new instance of the mempool RPC handler +func NewHandler( + mp Mempool, + evsw events.EventSwitch, +) *Handler { + return &Handler{ + mempool: mp, + dispatcher: newTxDispatcher(evsw, time.Second*10), + } +} + +// BroadcastTxAsyncHandler broadcasts the tx and returns right away, with no response. +// Does not wait for CheckTx nor DeliverTx results +// +// Params: +// - tx []byte (required) +func (h *Handler) BroadcastTxAsyncHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + const idxTx = 0 + + rawTx, err := coreparams.AsBytes(p, idxTx, true) + if err != nil { + return nil, err + } + + tx := types.Tx(rawTx) + + if checkErr := h.mempool.CheckTx(tx, nil); checkErr != nil { + return nil, spec.GenerateResponseError(checkErr) + } + + return &ctypes.ResultBroadcastTx{ + Hash: tx.Hash(), + }, nil +} + +// BroadcastTxSyncHandler broadcasts the tx and returns with the response from CheckTx. +// Does not wait for DeliverTx result +// +// Params: +// - tx []byte (required) +func (h *Handler) BroadcastTxSyncHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + const idxTx = 0 + + rawTx, err := coreparams.AsBytes(p, idxTx, true) + if err != nil { + return nil, err + } + + tx := types.Tx(rawTx) + + resCh := make(chan abci.Response, 1) + if checkErr := h.mempool.CheckTx(tx, func(res abci.Response) { + resCh <- res + }); checkErr != nil { + return nil, spec.GenerateResponseError(checkErr) + } + + res := <-resCh + r := res.(abci.ResponseCheckTx) + + return &ctypes.ResultBroadcastTx{ + Error: r.Error, + Data: r.Data, + Log: r.Log, + Hash: tx.Hash(), + }, nil +} + +// BroadcastTxCommitHandler broadcasts the tx and returns with the responses from CheckTx and DeliverTx. +// +// Params: +// - tx []byte (required) +func (h *Handler) BroadcastTxCommitHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + const idxTx = 0 + + rawTx, err := coreparams.AsBytes(p, idxTx, true) + if err != nil { + return nil, err + } + + tx := types.Tx(rawTx) + + checkTxResCh := make(chan abci.Response, 1) + if checkErr := h.mempool.CheckTx(tx, func(res abci.Response) { + checkTxResCh <- res + }); checkErr != nil { + return nil, spec.GenerateResponseError( + fmt.Errorf("error on BroadcastTxCommit: %w", checkErr), + ) + } + + checkTxResMsg := <-checkTxResCh + checkTxRes := checkTxResMsg.(abci.ResponseCheckTx) + + if checkTxRes.Error != nil { + return &ctypes.ResultBroadcastTxCommit{ + CheckTx: checkTxRes, + DeliverTx: abci.ResponseDeliverTx{}, + Hash: tx.Hash(), + }, nil + } + + txRes, txErr := h.dispatcher.getTxResult(tx, nil) + if txErr != nil { + return nil, spec.GenerateResponseError(txErr) + } + + return &ctypes.ResultBroadcastTxCommit{ + CheckTx: checkTxRes, + DeliverTx: txRes.Response, + Hash: tx.Hash(), + Height: txRes.Height, + }, nil +} + +// UnconfirmedTxsHandler fetches unconfirmed transactions (maximum ?limit entries) including their number. +// +// Params: +// - limit int64 (optional, default 30, max 100) +func (h *Handler) UnconfirmedTxsHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + const idxLimit = 0 + + limit64, err := coreparams.AsInt64(p, idxLimit) + if err != nil { + return nil, err + } + + var ( + limit = utils.ValidatePerPage(int(limit64)) + txs = h.mempool.ReapMaxTxs(limit) + ) + + return &ctypes.ResultUnconfirmedTxs{ + Count: len(txs), + Total: h.mempool.Size(), + TotalBytes: h.mempool.TxsBytes(), + Txs: txs, + }, nil +} + +// NumUnconfirmedTxsHandler fetches the number of unconfirmed transactions. +// +// No params +func (h *Handler) NumUnconfirmedTxsHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + if len(p) > 0 { + return nil, spec.GenerateInvalidParamError(1) + } + + return &ctypes.ResultUnconfirmedTxs{ + Count: h.mempool.Size(), + Total: h.mempool.Size(), + TotalBytes: h.mempool.TxsBytes(), + }, nil +} diff --git a/tm2/pkg/bft/rpc/core/mempool/mempool_test.go b/tm2/pkg/bft/rpc/core/mempool/mempool_test.go new file mode 100644 index 00000000000..05ccb3d0080 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/mempool/mempool_test.go @@ -0,0 +1,459 @@ +package mempool + +import ( + "errors" + "testing" + "time" + + abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types" + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" + "github.com/gnolang/gno/tm2/pkg/bft/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestHandler_BroadcastTxAsyncHandler(t *testing.T) { + t.Parallel() + + t.Run("Missing tx param", func(t *testing.T) { + t.Parallel() + + h := &Handler{ + mempool: &mockMempool{}, + } + + res, err := h.BroadcastTxAsyncHandler(nil, nil) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("CheckTx error", func(t *testing.T) { + t.Parallel() + + var ( + checkErr = errors.New("mempool error") + mp = &mockMempool{ + checkTxFn: func(tx types.Tx, cb func(abci.Response)) error { + return checkErr + }, + } + + h = &Handler{ + mempool: mp, + } + + txBytes = []byte("tx-bytes") + params = []any{txBytes} + ) + + res, err := h.BroadcastTxAsyncHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + assert.Contains(t, err.Message, checkErr.Error()) + }) + + t.Run("Valid broadcast", func(t *testing.T) { + t.Parallel() + + var ( + capturedTx types.Tx + mp = &mockMempool{ + checkTxFn: func(tx types.Tx, cb func(abci.Response)) error { + capturedTx = tx + return nil + }, + } + + h = &Handler{ + mempool: mp, + } + + txBytes = []byte("some-tx") + params = []any{txBytes} + ) + + res, err := h.BroadcastTxAsyncHandler(nil, params) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultBroadcastTx) + require.True(t, ok) + + expectedHash := types.Tx(txBytes).Hash() + assert.Equal(t, expectedHash, result.Hash) + assert.Equal(t, types.Tx(txBytes), capturedTx) + }) +} + +func TestHandler_BroadcastTxSyncHandler(t *testing.T) { + t.Parallel() + + t.Run("Missing tx param", func(t *testing.T) { + t.Parallel() + + h := &Handler{ + mempool: &mockMempool{}, + } + + res, err := h.BroadcastTxSyncHandler(nil, nil) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("CheckTx error", func(t *testing.T) { + t.Parallel() + + var ( + checkErr = errors.New("sync mempool error") + + mp = &mockMempool{ + checkTxFn: func(tx types.Tx, cb func(abci.Response)) error { + return checkErr + }, + } + + h = &Handler{ + mempool: mp, + } + + params = []any{[]byte("tx")} + ) + + res, err := h.BroadcastTxSyncHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + assert.Contains(t, err.Message, checkErr.Error()) + }) + + t.Run("Valid CheckTx response", func(t *testing.T) { + t.Parallel() + + var ( + txBytes = []byte("sync-tx") + tx = types.Tx(txBytes) + + checkResp = abci.ResponseCheckTx{ + ResponseBase: abci.ResponseBase{ + Data: []byte("data"), + Log: "log-message", + Error: nil, + }, + } + + mp = &mockMempool{ + checkTxFn: func(txArg types.Tx, cb func(abci.Response)) error { + assert.Equal(t, tx, txArg) + + cb(checkResp) + return nil + }, + } + + h = &Handler{ + mempool: mp, + } + + params = []any{txBytes} + ) + + res, err := h.BroadcastTxSyncHandler(nil, params) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultBroadcastTx) + require.True(t, ok) + + assert.Equal(t, checkResp.Error, result.Error) + assert.Equal(t, checkResp.Data, result.Data) + assert.Equal(t, checkResp.Log, result.Log) + assert.Equal(t, tx.Hash(), result.Hash) + }) +} + +func TestHandler_BroadcastTxCommitHandler(t *testing.T) { + t.Parallel() + + t.Run("Missing tx param", func(t *testing.T) { + t.Parallel() + + h := &Handler{ + mempool: &mockMempool{}, + } + + res, err := h.BroadcastTxCommitHandler(nil, nil) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("CheckTx call error", func(t *testing.T) { + t.Parallel() + + var ( + checkErr = errors.New("commit mempool error") + mp = &mockMempool{ + checkTxFn: func(tx types.Tx, cb func(abci.Response)) error { + return checkErr + }, + } + h = &Handler{ + mempool: mp, + } + + params = []any{[]byte("tx")} + ) + + res, err := h.BroadcastTxCommitHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + assert.Contains(t, err.Message, "error on BroadcastTxCommit") + assert.Contains(t, err.Message, checkErr.Error()) + }) + + t.Run("CheckTx response error", func(t *testing.T) { + t.Parallel() + + var ( + txBytes = []byte("commit-tx") + tx = types.Tx(txBytes) + + checkResp = abci.ResponseCheckTx{ + ResponseBase: abci.ResponseBase{ + Error: testABCIError{msg: "check failed"}, + Data: []byte("ignored"), + Log: "ignored", + }, + } + + mp = &mockMempool{ + checkTxFn: func(txArg types.Tx, cb func(abci.Response)) error { + assert.Equal(t, tx, txArg) + cb(checkResp) + return nil + }, + } + + h = &Handler{ + mempool: mp, + dispatcher: nil, // explicit + } + + params = []any{txBytes} + ) + + res, err := h.BroadcastTxCommitHandler(nil, params) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultBroadcastTxCommit) + require.True(t, ok) + + assert.Equal(t, checkResp, result.CheckTx) + assert.Equal(t, abci.ResponseDeliverTx{}, result.DeliverTx) + assert.Equal(t, tx.Hash(), result.Hash) + assert.Equal(t, int64(0), result.Height) + }) + + t.Run("Successful commit", func(t *testing.T) { + t.Parallel() + + var ( + txBytes = []byte("commit-success-tx") + tx = types.Tx(txBytes) + + checkResp = abci.ResponseCheckTx{ + ResponseBase: abci.ResponseBase{ + Error: nil, + Data: []byte("check-data"), + Log: "check-log", + }, + } + + expectedDeliver = abci.ResponseDeliverTx{ + ResponseBase: abci.ResponseBase{ + Data: []byte("deliver-data"), + Log: "deliver-log", + }, + } + expectedHeight = int64(42) + params = []any{txBytes} + ) + + waiter := newTxWaiter() + waiter.res = types.TxResult{ + Height: expectedHeight, + Response: expectedDeliver, + } + close(waiter.done) + + dispatcher := &txDispatcher{ + timeout: time.Minute, + waiters: map[string]*txWaiter{ + string(tx): waiter, + }, + } + + mp := &mockMempool{ + checkTxFn: func(txArg types.Tx, cb func(abci.Response)) error { + assert.Equal(t, tx, txArg) + cb(checkResp) + + return nil + }, + } + + h := &Handler{ + mempool: mp, + dispatcher: dispatcher, + } + + res, err := h.BroadcastTxCommitHandler(nil, params) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultBroadcastTxCommit) + require.True(t, ok) + + assert.Equal(t, checkResp, result.CheckTx) + assert.Equal(t, expectedDeliver, result.DeliverTx) + assert.Equal(t, expectedHeight, result.Height) + assert.Equal(t, tx.Hash(), result.Hash) + }) +} +func TestHandler_UnconfirmedTxsHandler(t *testing.T) { + t.Parallel() + + t.Run("Invalid limit param", func(t *testing.T) { + t.Parallel() + + var ( + h = &Handler{ + mempool: &mockMempool{}, + } + + params = []any{"not-an-int"} + ) + + res, err := h.UnconfirmedTxsHandler(nil, params) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("Valid limit and mempool data", func(t *testing.T) { + t.Parallel() + + var ( + expectedTxs = []types.Tx{ + []byte("tx1"), + []byte("tx2"), + []byte("tx3"), + } + + mp = &mockMempool{ + reapMaxTxsFn: func(max int) []types.Tx { + assert.Equal(t, 10, max) + return expectedTxs + }, + sizeFn: func() int { + return 5 + }, + txsBytesFn: func() int64 { + return 123 + }, + } + + h = &Handler{ + mempool: mp, + } + + params = []any{int64(10)} + ) + + res, err := h.UnconfirmedTxsHandler(nil, params) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultUnconfirmedTxs) + require.True(t, ok) + + assert.Equal(t, len(expectedTxs), result.Count) + assert.Equal(t, 5, result.Total) + assert.Equal(t, int64(123), result.TotalBytes) + assert.Equal(t, expectedTxs, result.Txs) + }) +} + +func TestHandler_NumUnconfirmedTxsHandler(t *testing.T) { + t.Parallel() + + t.Run("Unexpected params", func(t *testing.T) { + t.Parallel() + + h := &Handler{ + mempool: &mockMempool{}, + } + + res, err := h.NumUnconfirmedTxsHandler(nil, []any{"extra"}) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("Valid call", func(t *testing.T) { + t.Parallel() + + var ( + size = 7 + txsBytes = int64(456) + + mp = &mockMempool{ + sizeFn: func() int { + return size + }, + txsBytesFn: func() int64 { + return txsBytes + }, + } + + h = &Handler{ + mempool: mp, + } + ) + + res, err := h.NumUnconfirmedTxsHandler(nil, nil) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultUnconfirmedTxs) + require.True(t, ok) + + assert.Equal(t, size, result.Count) + assert.Equal(t, size, result.Total) + assert.Equal(t, txsBytes, result.TotalBytes) + }) +} + +type testABCIError struct { + msg string +} + +func (e testABCIError) Error() string { + return e.msg +} + +func (e testABCIError) AssertABCIError() {} diff --git a/tm2/pkg/bft/rpc/core/mempool/mock_test.go b/tm2/pkg/bft/rpc/core/mempool/mock_test.go new file mode 100644 index 00000000000..8b79baa9f81 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/mempool/mock_test.go @@ -0,0 +1,52 @@ +package mempool + +import ( + abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types" + "github.com/gnolang/gno/tm2/pkg/bft/types" +) + +type ( + checkTxDelegate func(tx types.Tx, cb func(abci.Response)) error + reapMaxTxsDelegate func(max int) types.Txs + sizeDelegate func() int + txsBytesDelegate func() int64 +) + +type mockMempool struct { + checkTxFn checkTxDelegate + reapMaxTxsFn reapMaxTxsDelegate + sizeFn sizeDelegate + txsBytesFn txsBytesDelegate +} + +func (m *mockMempool) CheckTx(tx types.Tx, cb func(abci.Response)) error { + if m.checkTxFn != nil { + return m.checkTxFn(tx, cb) + } + + return nil +} + +func (m *mockMempool) ReapMaxTxs(max int) []types.Tx { + if m.reapMaxTxsFn != nil { + return m.reapMaxTxsFn(max) + } + + return nil +} + +func (m *mockMempool) Size() int { + if m.sizeFn != nil { + return m.sizeFn() + } + + return 0 +} + +func (m *mockMempool) TxsBytes() int64 { + if m.txsBytesFn != nil { + return m.txsBytesFn() + } + + return 0 +} diff --git a/tm2/pkg/bft/rpc/core/mempool/types.go b/tm2/pkg/bft/rpc/core/mempool/types.go new file mode 100644 index 00000000000..0c916c027ff --- /dev/null +++ b/tm2/pkg/bft/rpc/core/mempool/types.go @@ -0,0 +1,22 @@ +package mempool + +import ( + abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types" + "github.com/gnolang/gno/tm2/pkg/bft/types" +) + +// Mempool is the minimal mempool interface the RPC handler needs +type Mempool interface { + // CheckTx submits a transaction to the mempool. + // If cb is non-nil, it is called with the CheckTx ABCI response + CheckTx(tx types.Tx, cb func(abci.Response)) error + + // ReapMaxTxs returns up to max pending transactions from the mempool + ReapMaxTxs(max int) types.Txs + + // Size returns the number of transactions currently in the mempool + Size() int + + // TxsBytes returns the total size (in bytes) of all transactions in the mempool + TxsBytes() int64 +} diff --git a/tm2/pkg/bft/rpc/core/mock/block.go b/tm2/pkg/bft/rpc/core/mock/block.go new file mode 100644 index 00000000000..0f89cc7a0c1 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/mock/block.go @@ -0,0 +1,85 @@ +package mock + +import "github.com/gnolang/gno/tm2/pkg/bft/types" + +type ( + HeightDelegate func() int64 + LoadBlockMetaDelegate func(int64) *types.BlockMeta + LoadBlockDelegate func(int64) *types.Block + LoadSeenCommitDelegate func(int64) *types.Commit + LoadBlockCommitDelegate func(int64) *types.Commit + LoadBlockByHashDelegate func([]byte) *types.Block + LoadBlockPartDelegate func(int64, int) *types.Part + SaveBlockDelegate func(*types.Block, *types.PartSet, *types.Commit) +) + +type BlockStore struct { + HeightFn HeightDelegate + LoadBlockMetaFn LoadBlockMetaDelegate + LoadBlockFn LoadBlockDelegate + LoadSeenCommitFn LoadSeenCommitDelegate + LoadBlockCommitFn LoadBlockCommitDelegate + LoadBlockByHashFn LoadBlockByHashDelegate + LoadBlockPartFn LoadBlockPartDelegate + SaveBlockFn SaveBlockDelegate +} + +func (m *BlockStore) Height() int64 { + if m.HeightFn != nil { + return m.HeightFn() + } + + return 0 +} + +func (m *BlockStore) LoadBlockMeta(h int64) *types.BlockMeta { + if m.LoadBlockMetaFn != nil { + return m.LoadBlockMetaFn(h) + } + + return nil +} + +func (m *BlockStore) LoadBlock(h int64) *types.Block { + if m.LoadBlockFn != nil { + return m.LoadBlockFn(h) + } + + return nil +} + +func (m *BlockStore) LoadSeenCommit(h int64) *types.Commit { + if m.LoadSeenCommitFn != nil { + return m.LoadSeenCommitFn(h) + } + + return nil +} + +func (m *BlockStore) LoadBlockCommit(h int64) *types.Commit { + if m.LoadBlockCommitFn != nil { + return m.LoadBlockCommitFn(h) + } + + return nil +} + +func (m *BlockStore) LoadBlockByHash(hash []byte) *types.Block { + if m.LoadBlockByHashFn != nil { + return m.LoadBlockByHashFn(hash) + } + + return nil +} +func (m *BlockStore) LoadBlockPart(height int64, index int) *types.Part { + if m.LoadBlockPartFn != nil { + return m.LoadBlockPartFn(height, index) + } + + return nil +} +func (m *BlockStore) SaveBlock(block *types.Block, set *types.PartSet, commit *types.Commit) { + if m.SaveBlockFn != nil { + m.SaveBlockFn(block, set, commit) + } +} diff --git a/tm2/pkg/bft/rpc/core/mock/p2p.go b/tm2/pkg/bft/rpc/core/mock/p2p.go new file mode 100644 index 00000000000..640535f5339 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/mock/p2p.go @@ -0,0 +1,136 @@ +package mock + +import ( + "net" + + "github.com/gnolang/gno/tm2/pkg/p2p" + p2pTypes "github.com/gnolang/gno/tm2/pkg/p2p/types" +) + +type ( + PeersDelegate func() p2p.PeerSet +) + +type Peers struct { + PeersFn PeersDelegate +} + +func (m *Peers) Peers() p2p.PeerSet { + if m.PeersFn != nil { + return m.PeersFn() + } + + return nil +} + +type ( + AddDelegate func(p2p.PeerConn) + RemoveDelegate func(p2pTypes.ID) bool + HasDelegate func(p2pTypes.ID) bool + HasIPDelegate func(net.IP) bool + GetPeerDelegate func(p2pTypes.ID) p2p.PeerConn + ListDelegate func() []p2p.PeerConn + NumInboundDelegate func() uint64 + NumOutboundDelegate func() uint64 +) + +type PeerSet struct { + AddFn AddDelegate + RemoveFn RemoveDelegate + HasFn HasDelegate + HasIPFn HasIPDelegate + GetFn GetPeerDelegate + ListFn ListDelegate + NumInboundFn NumInboundDelegate + NumOutboundFn NumOutboundDelegate +} + +func (m *PeerSet) Add(peer p2p.PeerConn) { + if m.AddFn != nil { + m.AddFn(peer) + } +} + +func (m *PeerSet) Remove(key p2pTypes.ID) bool { + if m.RemoveFn != nil { + m.RemoveFn(key) + } + + return false +} + +func (m *PeerSet) Has(key p2pTypes.ID) bool { + if m.HasFn != nil { + return m.HasFn(key) + } + + return false +} + +func (m *PeerSet) Get(key p2pTypes.ID) p2p.PeerConn { + if m.GetFn != nil { + return m.GetFn(key) + } + + return nil +} + +func (m *PeerSet) List() []p2p.PeerConn { + if m.ListFn != nil { + return m.ListFn() + } + + return nil +} + +func (m *PeerSet) NumInbound() uint64 { + if m.NumInboundFn != nil { + return m.NumInboundFn() + } + + return 0 +} + +func (m *PeerSet) NumOutbound() uint64 { + if m.NumOutboundFn != nil { + return m.NumOutboundFn() + } + + return 0 +} + +type ( + ListenersDelegate func() []string + IsListeningDelegate func() bool + NodeInfoDelegate func() p2pTypes.NodeInfo +) + +type Transport struct { + ListenersFn ListenersDelegate + IsListeningFn IsListeningDelegate + NodeInfoFn NodeInfoDelegate +} + +func (m *Transport) Listeners() []string { + if m.ListenersFn != nil { + return m.ListenersFn() + } + + return nil +} + +func (m *Transport) IsListening() bool { + if m.IsListeningFn != nil { + return m.IsListeningFn() + } + + return false +} + +func (m *Transport) NodeInfo() p2pTypes.NodeInfo { + if m.NodeInfoFn != nil { + return m.NodeInfoFn() + } + + return p2pTypes.NodeInfo{} +} diff --git a/tm2/pkg/bft/rpc/core/mock_test.go b/tm2/pkg/bft/rpc/core/mock_test.go deleted file mode 100644 index a6ffe948d00..00000000000 --- a/tm2/pkg/bft/rpc/core/mock_test.go +++ /dev/null @@ -1,78 +0,0 @@ -package core - -import "github.com/gnolang/gno/tm2/pkg/bft/types" - -type ( - heightDelegate func() int64 - loadBlockMetaDelegate func(int64) *types.BlockMeta - loadBlockDelegate func(int64) *types.Block - loadBlockPartDelegate func(int64, int) *types.Part - loadBlockCommitDelegate func(int64) *types.Commit - loadSeenCommitDelegate func(int64) *types.Commit - - saveBlockDelegate func(*types.Block, *types.PartSet, *types.Commit) -) - -type mockBlockStore struct { - heightFn heightDelegate - loadBlockMetaFn loadBlockMetaDelegate - loadBlockFn loadBlockDelegate - loadBlockPartFn loadBlockPartDelegate - loadBlockCommitFn loadBlockCommitDelegate - loadSeenCommitFn loadSeenCommitDelegate - saveBlockFn saveBlockDelegate -} - -func (m *mockBlockStore) Height() int64 { - if m.heightFn != nil { - return m.heightFn() - } - - return 0 -} - -func (m *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { - if m.loadBlockMetaFn != nil { - return m.loadBlockMetaFn(height) - } - - return nil -} - -func (m *mockBlockStore) LoadBlock(height int64) *types.Block { - if m.loadBlockFn != nil { - return m.loadBlockFn(height) - } - - return nil -} - -func (m *mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { - if m.loadBlockPartFn != nil { - return m.loadBlockPartFn(height, index) - } - - return nil -} - -func (m *mockBlockStore) LoadBlockCommit(height int64) *types.Commit { - if m.loadBlockCommitFn != nil { - return m.loadBlockCommitFn(height) - } - - return nil -} - -func (m *mockBlockStore) LoadSeenCommit(height int64) *types.Commit { - if m.loadSeenCommitFn != nil { - return m.loadSeenCommitFn(height) - } - - return nil -} - -func (m *mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { - if m.saveBlockFn != nil { - m.saveBlockFn(block, blockParts, seenCommit) - } -} diff --git a/tm2/pkg/bft/rpc/core/net.go b/tm2/pkg/bft/rpc/core/net.go deleted file mode 100644 index f8839b7d91f..00000000000 --- a/tm2/pkg/bft/rpc/core/net.go +++ /dev/null @@ -1,229 +0,0 @@ -package core - -import ( - ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" - rpctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" -) - -// Get network info. -// -// ```shell -// curl 'localhost:26657/net_info' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// info, err := client.NetInfo() -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// -// { -// "jsonrpc": "2.0", -// "id": "", -// "result": { -// "listening": true, -// "listeners": [ -// "Listener(@)" -// ], -// "n_peers": "3", -// "peers": [ -// { -// "node_info": { -// "protocol_version": { -// "p2p": "7", -// "block": "8", -// "app": "1" -// }, -// "id": "93529da3435c090d02251a050342b6a488d4ab56", -// "listen_addr": "tcp://0.0.0.0:26656", -// "network": "chain-RFo6qC", -// "version": "0.30.0", -// "channels": "4020212223303800", -// "moniker": "fc89e4ed23f2", -// "other": { -// "tx_index": "on", -// "rpc_address": "tcp://0.0.0.0:26657" -// } -// }, -// "is_outbound": true, -// "connection_status": { -// "Duration": "3475230558", -// "SendMonitor": { -// "Active": true, -// "Start": "2019-02-14T12:40:47.52Z", -// "Duration": "3480000000", -// "Idle": "240000000", -// "Bytes": "4512", -// "Samples": "9", -// "InstRate": "1338", -// "CurRate": "2046", -// "AvgRate": "1297", -// "PeakRate": "6570", -// "BytesRem": "0", -// "TimeRem": "0", -// "Progress": 0 -// }, -// "RecvMonitor": { -// "Active": true, -// "Start": "2019-02-14T12:40:47.52Z", -// "Duration": "3480000000", -// "Idle": "280000000", -// "Bytes": "4489", -// "Samples": "10", -// "InstRate": "1821", -// "CurRate": "1663", -// "AvgRate": "1290", -// "PeakRate": "5512", -// "BytesRem": "0", -// "TimeRem": "0", -// "Progress": 0 -// }, -// "Channels": [ -// { -// "ID": 48, -// "SendQueueCapacity": "1", -// "SendQueueSize": "0", -// "Priority": "5", -// "RecentlySent": "0" -// }, -// { -// "ID": 64, -// "SendQueueCapacity": "1000", -// "SendQueueSize": "0", -// "Priority": "10", -// "RecentlySent": "14" -// }, -// { -// "ID": 32, -// "SendQueueCapacity": "100", -// "SendQueueSize": "0", -// "Priority": "5", -// "RecentlySent": "619" -// }, -// { -// "ID": 33, -// "SendQueueCapacity": "100", -// "SendQueueSize": "0", -// "Priority": "10", -// "RecentlySent": "1363" -// }, -// { -// "ID": 34, -// "SendQueueCapacity": "100", -// "SendQueueSize": "0", -// "Priority": "5", -// "RecentlySent": "2145" -// }, -// { -// "ID": 35, -// "SendQueueCapacity": "2", -// "SendQueueSize": "0", -// "Priority": "1", -// "RecentlySent": "0" -// }, -// { -// "ID": 56, -// "SendQueueCapacity": "1", -// "SendQueueSize": "0", -// "Priority": "5", -// "RecentlySent": "0" -// }, -// { -// "ID": 0, -// "SendQueueCapacity": "10", -// "SendQueueSize": "0", -// "Priority": "1", -// "RecentlySent": "10" -// } -// ] -// }, -// "remote_ip": "192.167.10.3" -// }, -// ... -// } -// -// ``` -func NetInfo(_ *rpctypes.Context) (*ctypes.ResultNetInfo, error) { - var ( - set = p2pPeers.Peers() - out, in = set.NumOutbound(), set.NumInbound() - ) - - peers := make([]ctypes.Peer, 0, out+in) - for _, peer := range set.List() { - nodeInfo := peer.NodeInfo() - peers = append(peers, ctypes.Peer{ - NodeInfo: nodeInfo, - IsOutbound: peer.IsOutbound(), - ConnectionStatus: peer.Status(), - RemoteIP: peer.RemoteIP().String(), - }) - } - - return &ctypes.ResultNetInfo{ - Listening: p2pTransport.IsListening(), - Listeners: p2pTransport.Listeners(), - NPeers: len(peers), - Peers: peers, - }, nil -} - -// Get genesis file. -// -// ```shell -// curl 'localhost:26657/genesis' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// genesis, err := client.Genesis() -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// -// { -// "error": "", -// "result": { -// "genesis": { -// "app_hash": "", -// "validators": [ -// { -// "name": "", -// "power": "10", -// "pub_key": { -// "data": "68DFDA7E50F82946E7E8546BED37944A422CD1B831E70DF66BA3B8430593944D", -// "type": "ed25519" -// } -// } -// ], -// "chain_id": "test-chain-6UTNIN", -// "genesis_time": "2017-05-29T15:05:41.671Z" -// } -// }, -// "id": "", -// "jsonrpc": "2.0" -// } -// -// ``` -func Genesis(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) { - return &ctypes.ResultGenesis{Genesis: genDoc}, nil -} diff --git a/tm2/pkg/bft/rpc/core/net/net.go b/tm2/pkg/bft/rpc/core/net/net.go new file mode 100644 index 00000000000..dbfb5007bcc --- /dev/null +++ b/tm2/pkg/bft/rpc/core/net/net.go @@ -0,0 +1,71 @@ +package net + +import ( + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/metadata" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" + "github.com/gnolang/gno/tm2/pkg/bft/types" +) + +// Handler is the net RPC handler +type Handler struct { + genesisDoc *types.GenesisDoc + + peers ctypes.Peers + transport ctypes.Transport +} + +// NewHandler creates a new instance of the net RPC handler +func NewHandler( + peers ctypes.Peers, + transport ctypes.Transport, + genesisDoc *types.GenesisDoc, +) *Handler { + return &Handler{ + peers: peers, + transport: transport, + genesisDoc: genesisDoc, + } +} + +// NetInfoHandler fetches the current network info +// +// No params +func (h *Handler) NetInfoHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + if len(p) > 0 { + return nil, spec.GenerateInvalidParamError(1) + } + + var ( + set = h.peers.Peers() + out, in = set.NumOutbound(), set.NumInbound() + ) + + peers := make([]ctypes.Peer, 0, out+in) + for _, peer := range set.List() { + peers = append(peers, ctypes.Peer{ + NodeInfo: peer.NodeInfo(), + IsOutbound: peer.IsOutbound(), + ConnectionStatus: peer.Status(), + RemoteIP: peer.RemoteIP().String(), + }) + } + + return &ctypes.ResultNetInfo{ + Listening: h.transport.IsListening(), + Listeners: h.transport.Listeners(), + NPeers: len(peers), + Peers: peers, + }, nil +} + +// GenesisHandler fetches the genesis document (genesis.json) +// +// No params +func (h *Handler) GenesisHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + if len(p) > 0 { + return nil, spec.GenerateInvalidParamError(1) + } + + return &ctypes.ResultGenesis{Genesis: h.genesisDoc}, nil +} diff --git a/tm2/pkg/bft/rpc/core/net/net_test.go b/tm2/pkg/bft/rpc/core/net/net_test.go new file mode 100644 index 00000000000..128bcc71646 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/net/net_test.go @@ -0,0 +1,108 @@ +package net + +import ( + "testing" + + "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/mock" + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" + "github.com/gnolang/gno/tm2/pkg/bft/types" + "github.com/gnolang/gno/tm2/pkg/p2p" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestHandler_NetInfo(t *testing.T) { + t.Parallel() + + t.Run("Unexpected params", func(t *testing.T) { + t.Parallel() + + h := &Handler{} + + res, err := h.NetInfoHandler(nil, []any{"extra"}) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("Valid, empty peer set", func(t *testing.T) { + t.Parallel() + + var ( + mockPeerSet = &mock.PeerSet{} + + mockPeers = &mock.Peers{ + PeersFn: func() p2p.PeerSet { + return mockPeerSet + }, + } + + expectedListeners = []string{"tcp://0.0.0.0:26656"} + + mockTransport = &mock.Transport{ + ListenersFn: func() []string { + return expectedListeners + }, + IsListeningFn: func() bool { + return true + }, + } + ) + + h := &Handler{ + peers: mockPeers, + transport: mockTransport, + } + + res, err := h.NetInfoHandler(nil, nil) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultNetInfo) + require.True(t, ok) + + assert.True(t, result.Listening) + assert.Equal(t, expectedListeners, result.Listeners) + assert.Equal(t, 0, result.NPeers) + assert.Len(t, result.Peers, 0) + }) +} + +func TestHandler_GenesisHandler(t *testing.T) { + t.Parallel() + + t.Run("Unexpected params", func(t *testing.T) { + t.Parallel() + + h := &Handler{} + + res, err := h.GenesisHandler(nil, []any{"extra"}) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("Returns genesis doc", func(t *testing.T) { + t.Parallel() + + genDoc := &types.GenesisDoc{ + ChainID: "test-chain", + } + + h := &Handler{ + genesisDoc: genDoc, + } + + res, err := h.GenesisHandler(nil, nil) + require.Nil(t, err) + require.NotNil(t, res) + + result, ok := res.(*ctypes.ResultGenesis) + require.True(t, ok) + + assert.Equal(t, genDoc, result.Genesis) + }) +} diff --git a/tm2/pkg/bft/rpc/core/params/params.go b/tm2/pkg/bft/rpc/core/params/params.go new file mode 100644 index 00000000000..14507d57fac --- /dev/null +++ b/tm2/pkg/bft/rpc/core/params/params.go @@ -0,0 +1,180 @@ +package params + +import ( + "encoding/hex" + "encoding/json" + "strconv" + "strings" + + "github.com/gnolang/gno/tm2/pkg/amino" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" +) + +func get(params []any, idx int) any { + if idx < 0 || idx >= len(params) { + return nil + } + + return params[idx] +} + +func AsString(params []any, idx int) (string, *spec.BaseJSONError) { + raw := get(params, idx) + if raw == nil { + return "", nil + } + + switch v := raw.(type) { + case string: + // Query params are strings already + return v, nil + default: + // For JSON-RPC POSTs, go through Amino to preserve legacy behavior + b, err := json.Marshal(v) + if err != nil { + return "", spec.GenerateInvalidParamError(idx) + } + + var out string + if err = amino.UnmarshalJSON(b, &out); err != nil { + return "", spec.GenerateInvalidParamError(idx) + } + + return out, nil + } +} + +func AsBytes(params []any, idx int, required bool) ([]byte, *spec.BaseJSONError) { + raw := get(params, idx) + if raw == nil { + if required { + return nil, spec.GenerateInvalidParamError(idx) + } + + return nil, nil + } + + switch v := raw.(type) { + case string: + // HTTP GET compatibility, 0x-prefixed hex + if strings.HasPrefix(v, "0x") { + data, err := hex.DecodeString(v[2:]) + if err != nil { + return nil, spec.GenerateInvalidParamError(idx) + } + + return data, nil + } + + // For everything else, Amino semantics for []byte + b, err := amino.MarshalJSON(v) + if err != nil { + return nil, spec.GenerateInvalidParamError(idx) + } + + var out []byte + if err := amino.UnmarshalJSON(b, &out); err != nil { + return nil, spec.GenerateInvalidParamError(idx) + } + + return out, nil + + default: + // For JSON-RPC POSTs, the value is already decoded by encoding/json + b, err := json.Marshal(v) + if err != nil { + return nil, spec.GenerateInvalidParamError(idx) + } + + var out []byte + if err := amino.UnmarshalJSON(b, &out); err != nil { + return nil, spec.GenerateInvalidParamError(idx) + } + + return out, nil + } +} + +func AsInt64(params []any, idx int) (int64, *spec.BaseJSONError) { + raw := get(params, idx) + if raw == nil { + return 0, nil + } + + switch v := raw.(type) { + case int64: + return v, nil + + case int: + return int64(v), nil + + case float64: + // JSON numbers -> int64 (old Amino expected strings, but no client should rely on that distinction) + return int64(v), nil + + case string: + // HTTP GET: query param is always a string. + // Old Amino wrapped integer-looking strings in quotes and then used Amino decoding + if v == "" { + return 0, nil + } + + i, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return 0, spec.GenerateInvalidParamError(idx) + } + + return i, nil + + default: + // Fallback, json -> amino -> int64 + b, err := json.Marshal(v) + if err != nil { + return 0, spec.GenerateInvalidParamError(idx) + } + + var out int64 + if err := amino.UnmarshalJSON(b, &out); err != nil { + return 0, spec.GenerateInvalidParamError(idx) + } + + return out, nil + } +} + +func AsBool(params []any, idx int) (bool, *spec.BaseJSONError) { + raw := get(params, idx) + if raw == nil { + return false, nil + } + + switch v := raw.(type) { + case bool: + return v, nil + + case string: + // Accept "true"/"false" as HTTP query values + switch strings.ToLower(v) { + case "true": + return true, nil + case "false": + return false, nil + default: + return false, spec.GenerateInvalidParamError(idx) + } + + default: + // Fallback, json -> amino -> bool + b, err := json.Marshal(v) + if err != nil { + return false, spec.GenerateInvalidParamError(idx) + } + + var out bool + if err := amino.UnmarshalJSON(b, &out); err != nil { + return false, spec.GenerateInvalidParamError(idx) + } + + return out, nil + } +} diff --git a/tm2/pkg/bft/rpc/core/pipe.go b/tm2/pkg/bft/rpc/core/pipe.go deleted file mode 100644 index 085fc35da55..00000000000 --- a/tm2/pkg/bft/rpc/core/pipe.go +++ /dev/null @@ -1,162 +0,0 @@ -package core - -import ( - "fmt" - "log/slog" - - "github.com/gnolang/gno/tm2/pkg/bft/appconn" - cnscfg "github.com/gnolang/gno/tm2/pkg/bft/consensus/config" - cstypes "github.com/gnolang/gno/tm2/pkg/bft/consensus/types" - mempl "github.com/gnolang/gno/tm2/pkg/bft/mempool" - cfg "github.com/gnolang/gno/tm2/pkg/bft/rpc/config" - sm "github.com/gnolang/gno/tm2/pkg/bft/state" - "github.com/gnolang/gno/tm2/pkg/bft/types" - "github.com/gnolang/gno/tm2/pkg/crypto" - dbm "github.com/gnolang/gno/tm2/pkg/db" - "github.com/gnolang/gno/tm2/pkg/events" - "github.com/gnolang/gno/tm2/pkg/p2p" - p2pTypes "github.com/gnolang/gno/tm2/pkg/p2p/types" -) - -const ( - // see README - defaultPerPage = 30 - maxPerPage = 100 -) - -// ---------------------------------------------- -// These interfaces are used by RPC and must be thread safe - -type Consensus interface { - GetConfigDeepCopy() *cnscfg.ConsensusConfig - GetState() sm.State - GetValidators() (int64, []*types.Validator) - GetLastHeight() int64 - GetRoundStateDeepCopy() *cstypes.RoundState - GetRoundStateSimple() cstypes.RoundStateSimple -} - -type transport interface { - Listeners() []string - IsListening() bool - NodeInfo() p2pTypes.NodeInfo -} - -type peers interface { - Peers() p2p.PeerSet -} - -// ---------------------------------------------- -// These package level globals come with setters -// that are expected to be called only once, on startup - -var ( - // external, thread safe interfaces - proxyAppQuery appconn.Query - - // interfaces defined in types and above - stateDB dbm.DB - blockStore sm.BlockStore - consensusState Consensus - p2pPeers peers - p2pTransport transport - - // objects - pubKey crypto.PubKey - genDoc *types.GenesisDoc // cache the genesis structure - evsw events.EventSwitch - gTxDispatcher *txDispatcher - mempool mempl.Mempool - getFastSync func() bool // avoids dependency on consensus pkg - - logger *slog.Logger - - config cfg.RPCConfig -) - -func SetStateDB(db dbm.DB) { - stateDB = db -} - -func SetBlockStore(bs sm.BlockStore) { - blockStore = bs -} - -func SetMempool(mem mempl.Mempool) { - mempool = mem -} - -func SetConsensusState(cs Consensus) { - consensusState = cs -} - -func SetP2PPeers(p peers) { - p2pPeers = p -} - -func SetP2PTransport(t transport) { - p2pTransport = t -} - -func SetPubKey(pk crypto.PubKey) { - pubKey = pk -} - -func SetGenesisDoc(doc *types.GenesisDoc) { - genDoc = doc -} - -func SetProxyAppQuery(appConn appconn.Query) { - proxyAppQuery = appConn -} - -func SetGetFastSync(v func() bool) { - getFastSync = v -} - -func SetLogger(l *slog.Logger) { - logger = l -} - -func SetEventSwitch(sw events.EventSwitch) { - evsw = sw - gTxDispatcher = newTxDispatcher(evsw) -} - -func Start() { - gTxDispatcher.Start() -} - -// SetConfig sets an RPCConfig. -func SetConfig(c cfg.RPCConfig) { - config = c -} - -func validatePage(page, perPage, totalCount int) (int, error) { - if perPage < 1 { - panic(fmt.Sprintf("zero or negative perPage: %d", perPage)) - } - - if page == 0 { - return 1, nil // default - } - - pages := ((totalCount - 1) / perPage) + 1 - if pages == 0 { - pages = 1 // one page (even if it's empty) - } - if page < 0 || page > pages { - return 1, fmt.Errorf("page should be within [0, %d] range, given %d", pages, page) - } - - return page, nil -} - -func validatePerPage(perPage int) int { - if perPage < 1 { - return defaultPerPage - } else if perPage > maxPerPage { - return maxPerPage - } - return perPage -} diff --git a/tm2/pkg/bft/rpc/core/pipe_test.go b/tm2/pkg/bft/rpc/core/pipe_test.go deleted file mode 100644 index 6136f66c9d8..00000000000 --- a/tm2/pkg/bft/rpc/core/pipe_test.go +++ /dev/null @@ -1,76 +0,0 @@ -package core - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestPaginationPage(t *testing.T) { - t.Parallel() - - cases := []struct { - totalCount int - perPage int - page int - newPage int - expErr bool - }{ - {0, 10, 1, 1, false}, - - {0, 10, 0, 1, false}, - {0, 10, 1, 1, false}, - {0, 10, 2, 0, true}, - - {5, 10, -1, 0, true}, - {5, 10, 0, 1, false}, - {5, 10, 1, 1, false}, - {5, 10, 2, 0, true}, - {5, 10, 2, 0, true}, - - {5, 5, 1, 1, false}, - {5, 5, 2, 0, true}, - {5, 5, 3, 0, true}, - - {5, 3, 2, 2, false}, - {5, 3, 3, 0, true}, - - {5, 2, 2, 2, false}, - {5, 2, 3, 3, false}, - {5, 2, 4, 0, true}, - } - - for _, c := range cases { - p, err := validatePage(c.page, c.perPage, c.totalCount) - if c.expErr { - assert.Error(t, err) - continue - } - - assert.Equal(t, c.newPage, p, fmt.Sprintf("%v", c)) - } -} - -func TestPaginationPerPage(t *testing.T) { - t.Parallel() - - cases := []struct { - totalCount int - perPage int - newPerPage int - }{ - {5, 0, defaultPerPage}, - {5, 1, 1}, - {5, 2, 2}, - {5, defaultPerPage, defaultPerPage}, - {5, maxPerPage - 1, maxPerPage - 1}, - {5, maxPerPage, maxPerPage}, - {5, maxPerPage + 1, maxPerPage}, - } - - for _, c := range cases { - p := validatePerPage(c.perPage) - assert.Equal(t, c.newPerPage, p, fmt.Sprintf("%v", c)) - } -} diff --git a/tm2/pkg/bft/rpc/core/register.go b/tm2/pkg/bft/rpc/core/register.go new file mode 100644 index 00000000000..c8895d39b88 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/register.go @@ -0,0 +1,208 @@ +package core + +import ( + "github.com/gnolang/gno/tm2/pkg/bft/appconn" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/abci" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/blocks" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/consensus" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/health" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/mempool" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/net" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/status" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/tx" + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server" + "github.com/gnolang/gno/tm2/pkg/bft/state" + "github.com/gnolang/gno/tm2/pkg/bft/types" + dbm "github.com/gnolang/gno/tm2/pkg/db" + "github.com/gnolang/gno/tm2/pkg/events" +) + +// SetupABCI sets up the following endpoints: +// - abci_info +// - abci_query +func SetupABCI(server *server.JSONRPC, proxyAppConn appconn.Query) { + h := abci.NewHandler(proxyAppConn) + + server.RegisterHandler( + "abci_info", + h.InfoHandler, + ) + + server.RegisterHandler( + "abci_query", + h.QueryHandler, + "path", "data", "height", "prove", + ) +} + +// SetupBlocks sets up the following endpoints: +// - blockchain +// - block +// - commit +// - block_results +func SetupBlocks(server *server.JSONRPC, store state.BlockStore, stateDB dbm.DB) { + h := blocks.NewHandler(store, stateDB) + + server.RegisterHandler( + "blockchain", + h.BlockchainInfoHandler, + "minHeight", "maxHeight", + ) + + server.RegisterHandler( + "block", + h.BlockHandler, + "height", + ) + + server.RegisterHandler( + "commit", + h.CommitHandler, + "height", + ) + + server.RegisterHandler( + "block_results", + h.BlockResultsHandler, + "height", + ) +} + +// SetupConsensus sets up the following endpoints: +// - validators +// - dump_consensus_state +// - consensus_state +// - consensus_params +func SetupConsensus( + server *server.JSONRPC, + consensusState consensus.Consensus, + stateDB dbm.DB, + peers ctypes.Peers, +) { + h := consensus.NewHandler(consensusState, stateDB, peers) + + server.RegisterHandler( + "validators", + h.ValidatorsHandler, + "height", + ) + + server.RegisterHandler( + "dump_consensus_state", + h.DumpConsensusStateHandler, + ) + + server.RegisterHandler( + "consensus_state", + h.ConsensusStateHandler, + ) + + server.RegisterHandler( + "consensus_params", + h.ConsensusParamsHandler, + "height", + ) +} + +// SetupHealth sets up the following endpoints: +// - health +func SetupHealth(server *server.JSONRPC) { + server.RegisterHandler( + "health", + health.HealthHandler, + ) +} + +// SetupMempool sets up the following endpoints: +// - broadcast_tx_async +// - broadcast_tx_sync +// - broadcast_tx_commit +// - unconfirmed_txs +// - num_unconfirmed_txs +func SetupMempool( + server *server.JSONRPC, + mp mempool.Mempool, + evsw events.EventSwitch, +) { + h := mempool.NewHandler(mp, evsw) + + server.RegisterHandler( + "broadcast_tx_async", + h.BroadcastTxAsyncHandler, + "tx", + ) + + server.RegisterHandler( + "broadcast_tx_sync", + h.BroadcastTxSyncHandler, + "tx", + ) + + server.RegisterHandler( + "broadcast_tx_commit", + h.BroadcastTxCommitHandler, + "tx", + ) + + server.RegisterHandler( + "unconfirmed_txs", + h.UnconfirmedTxsHandler, + "limit", + ) + + server.RegisterHandler( + "num_unconfirmed_txs", + h.NumUnconfirmedTxsHandler, + ) +} + +// SetupNet sets up the following endpoints: +// - net_info +// - genesis +func SetupNet( + server *server.JSONRPC, + peers ctypes.Peers, + transport ctypes.Transport, + genesisDoc *types.GenesisDoc, +) { + h := net.NewHandler(peers, transport, genesisDoc) + + server.RegisterHandler( + "net_info", + h.NetInfoHandler, + ) + + server.RegisterHandler( + "genesis", + h.GenesisHandler, + ) +} + +// SetupTx sets up the following endpoints: +// - tx +func SetupTx( + server *server.JSONRPC, + blockStore state.BlockStore, + stateDB dbm.DB, +) { + h := tx.NewHandler(blockStore, stateDB) + + server.RegisterHandler( + "tx", + h.TxHandler, + "hash", + ) +} + +// SetupStatus sets up the following endpoints: +// - status +func SetupStatus(server *server.JSONRPC, buildFn status.BuildStatusFn) { + h := status.NewHandler(buildFn) + + server.RegisterHandler( + "status", + h.StatusHandler, + "heightGte", + ) +} diff --git a/tm2/pkg/bft/rpc/core/routes.go b/tm2/pkg/bft/rpc/core/routes.go deleted file mode 100644 index aee6c2896d7..00000000000 --- a/tm2/pkg/bft/rpc/core/routes.go +++ /dev/null @@ -1,45 +0,0 @@ -package core - -import ( - rpc "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server" -) - -// TODO: better system than "unsafe" prefix -// NOTE: Amino is registered in rpc/core/types/codec.go. -var Routes = map[string]*rpc.RPCFunc{ - // info API - "health": rpc.NewRPCFunc(Health, ""), - "status": rpc.NewRPCFunc(Status, "heightGte"), - "net_info": rpc.NewRPCFunc(NetInfo, ""), - "blockchain": rpc.NewRPCFunc(BlockchainInfo, "minHeight,maxHeight"), - "genesis": rpc.NewRPCFunc(Genesis, ""), - "block": rpc.NewRPCFunc(Block, "height"), - "block_results": rpc.NewRPCFunc(BlockResults, "height"), - "commit": rpc.NewRPCFunc(Commit, "height"), - "tx": rpc.NewRPCFunc(Tx, "hash"), - "validators": rpc.NewRPCFunc(Validators, "height"), - "dump_consensus_state": rpc.NewRPCFunc(DumpConsensusState, ""), - "consensus_state": rpc.NewRPCFunc(ConsensusState, ""), - "consensus_params": rpc.NewRPCFunc(ConsensusParams, "height"), - "unconfirmed_txs": rpc.NewRPCFunc(UnconfirmedTxs, "limit"), - "num_unconfirmed_txs": rpc.NewRPCFunc(NumUnconfirmedTxs, ""), - - // tx broadcast API - "broadcast_tx_commit": rpc.NewRPCFunc(BroadcastTxCommit, "tx"), - "broadcast_tx_sync": rpc.NewRPCFunc(BroadcastTxSync, "tx"), - "broadcast_tx_async": rpc.NewRPCFunc(BroadcastTxAsync, "tx"), - - // abci API - "abci_query": rpc.NewRPCFunc(ABCIQuery, "path,data,height,prove"), - "abci_info": rpc.NewRPCFunc(ABCIInfo, ""), -} - -func AddUnsafeRoutes() { - // control API - Routes["unsafe_flush_mempool"] = rpc.NewRPCFunc(UnsafeFlushMempool, "") - - // profiler API - Routes["unsafe_start_cpu_profiler"] = rpc.NewRPCFunc(UnsafeStartCPUProfiler, "filename") - Routes["unsafe_stop_cpu_profiler"] = rpc.NewRPCFunc(UnsafeStopCPUProfiler, "") - Routes["unsafe_write_heap_profile"] = rpc.NewRPCFunc(UnsafeWriteHeapProfile, "filename") -} diff --git a/tm2/pkg/bft/rpc/core/server.go b/tm2/pkg/bft/rpc/core/server.go new file mode 100644 index 00000000000..c772c513fa7 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/server.go @@ -0,0 +1,91 @@ +package core + +import ( + "context" + "errors" + "fmt" + "log/slog" + "net" + "net/http" + "sync" + "time" + + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/config" +) + +type Server struct { + h http.Handler + logger *slog.Logger + config *config.Config + + srv *http.Server + ln net.Listener + errCh chan error + stopOnce sync.Once +} + +func New(h http.Handler, config *config.Config, logger *slog.Logger) *Server { + return &Server{ + h: h, + config: config, + logger: logger, + errCh: make(chan error, 1), + } +} + +// Start starts the server asynchronously +func (s *Server) Start() error { + s.srv = &http.Server{ + Addr: s.config.ListenAddress, + Handler: s.h, + ReadHeaderTimeout: 60 * time.Second, + WriteTimeout: 60 * time.Second, + } + + ln, err := net.Listen("tcp", s.srv.Addr) + if err != nil { + return fmt.Errorf( + "unable to listen on address %s: %w", + s.srv.Addr, + err, + ) + } + + s.ln = ln + + s.logger.Info( + "RPC server started", + "address", ln.Addr().String(), + ) + + // Start serving async + go func() { + if err := s.srv.Serve(ln); err != nil && !errors.Is(err, http.ErrServerClosed) { + s.errCh <- err + } + + close(s.errCh) + }() + + return nil +} + +// Stop gracefully stops the server +func (s *Server) Stop() error { + var shutdownErr error + + s.stopOnce.Do(func() { + s.logger.Info("RPC server shutting down") + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + if s.srv != nil { + shutdownErr = s.srv.Shutdown(ctx) + } + + s.logger.Info("RPC server shut down") + }) + + return shutdownErr +} diff --git a/tm2/pkg/bft/rpc/core/status.go b/tm2/pkg/bft/rpc/core/status.go deleted file mode 100644 index 6851fd8b8c1..00000000000 --- a/tm2/pkg/bft/rpc/core/status.go +++ /dev/null @@ -1,160 +0,0 @@ -package core - -import ( - "fmt" - "time" - - ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" - rpctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" - sm "github.com/gnolang/gno/tm2/pkg/bft/state" - "github.com/gnolang/gno/tm2/pkg/bft/types" -) - -// Get Tendermint status including node info, pubkey, latest block -// hash, app hash, block height and time. -// -// ```shell -// curl 'localhost:26657/status' -// ``` -// -// Additionally, it has an optional `heightGte` parameter than will return a `409` if the latest chain height is less than it. -// This parameter is useful for readyness probes. -// -// ```shell -// curl 'localhost:26657/status?heightGte=1' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Start() -// -// if err != nil { -// // handle error -// } -// -// defer client.Stop() -// result, err := client.Status() -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// { -// "jsonrpc": "2.0", -// "id": "", -// -// "result": { -// "node_info": { -// "protocol_version": { -// "p2p": "4", -// "block": "7", -// "app": "0" -// }, -// "id": "53729852020041b956e86685e24394e0bee4373f", -// "listen_addr": "10.0.2.15:26656", -// "network": "test-chain-Y1OHx6", -// "version": "0.24.0-2ce1abc2", -// "channels": "4020212223303800", -// "moniker": "ubuntu-xenial", -// "other": { -// "tx_index": "on", -// "rpc_addr": "tcp://0.0.0.0:26657" -// } -// }, -// "sync_info": { -// "latest_block_hash": "F51538DA498299F4C57AC8162AAFA0254CE08286", -// "latest_app_hash": "0000000000000000", -// "latest_block_height": "18", -// "latest_block_time": "2018-09-17T11:42:19.149920551Z", -// "catching_up": false -// }, -// "validator_info": { -// "address": "D9F56456D7C5793815D0E9AF07C3A355D0FC64FD", -// "pub_key": { -// "type": "tendermint/PubKeyEd25519", -// "value": "wVxKNtEsJmR4vvh651LrVoRguPs+6yJJ9Bz174gw9DM=" -// }, -// "voting_power": "10" -// } -// } -// } -// -// ``` -func Status(ctx *rpctypes.Context, heightGtePtr *int64) (*ctypes.ResultStatus, error) { - var latestHeight int64 - if getFastSync() { - latestHeight = blockStore.Height() - } else { - latestHeight = consensusState.GetLastHeight() - } - - if heightGtePtr != nil && latestHeight < *heightGtePtr { - // Using `409 Conflict` since it's spec states: - // > 409 responses may be used for implementation-specific purposes - return nil, rpctypes.NewHTTPStatusError(409, fmt.Sprintf("latest height is %d, which is less than %d", latestHeight, *heightGtePtr)) - } - - var ( - latestBlockMeta *types.BlockMeta - latestBlockHash []byte - latestAppHash []byte - latestBlockTimeNano int64 - ) - if latestHeight != 0 { - latestBlockMeta = blockStore.LoadBlockMeta(latestHeight) - latestBlockHash = latestBlockMeta.BlockID.Hash - latestAppHash = latestBlockMeta.Header.AppHash - latestBlockTimeNano = latestBlockMeta.Header.Time.UnixNano() - } - - latestBlockTime := time.Unix(0, latestBlockTimeNano) - - var votingPower int64 - if val := validatorAtHeight(latestHeight); val != nil { - votingPower = val.VotingPower - } - - result := &ctypes.ResultStatus{ - NodeInfo: p2pTransport.NodeInfo(), - SyncInfo: ctypes.SyncInfo{ - LatestBlockHash: latestBlockHash, - LatestAppHash: latestAppHash, - LatestBlockHeight: latestHeight, - LatestBlockTime: latestBlockTime, - CatchingUp: getFastSync(), - }, - ValidatorInfo: ctypes.ValidatorInfo{ - Address: pubKey.Address(), - PubKey: pubKey, - VotingPower: votingPower, - }, - } - - return result, nil -} - -func validatorAtHeight(h int64) *types.Validator { - privValAddress := pubKey.Address() - - // If we're still at height h, search in the current validator set. - lastBlockHeight, vals := consensusState.GetValidators() - if lastBlockHeight == h { - for _, val := range vals { - if val.Address == privValAddress { - return val - } - } - } - - // If we've moved to the next height, retrieve the validator set from DB. - if lastBlockHeight > h { - vals, err := sm.LoadValidators(stateDB, h) - if err != nil { - return nil // should not happen - } - _, val := vals.GetByAddress(privValAddress) - return val - } - - return nil -} diff --git a/tm2/pkg/bft/rpc/core/status/status.go b/tm2/pkg/bft/rpc/core/status/status.go new file mode 100644 index 00000000000..bb439882d51 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/status/status.go @@ -0,0 +1,58 @@ +package status + +import ( + "fmt" + + "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/params" + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/metadata" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" +) + +type BuildStatusFn func() (*ctypes.ResultStatus, error) + +// Handler is the status RPC handler +type Handler struct { + buildFn BuildStatusFn +} + +// NewHandler creates a new instance of the status RPC handler +func NewHandler(buildFn BuildStatusFn) *Handler { + return &Handler{ + buildFn: buildFn, + } +} + +// StatusHandler fetches the Tendermint status, including node info, pubkey, latest block +// hash, app hash, block height and time. +// +// Params: +// - heightGte (optional, defaults to 0) +func (h *Handler) StatusHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + const idxHeightGte = 0 + + heightGte, err := params.AsInt64(p, idxHeightGte) + if err != nil { + return nil, err + } + + res, buildErr := h.buildFn() + if buildErr != nil { + return nil, spec.GenerateResponseError(buildErr) + } + + latestHeight := res.SyncInfo.LatestBlockHeight + + if heightGte > 0 && latestHeight < heightGte { + return nil, spec.NewJSONError( + fmt.Sprintf( + "latest height is %d, which is less than %d", + latestHeight, + heightGte, + ), + spec.InvalidRequestErrorCode, + ) + } + + return res, nil +} diff --git a/tm2/pkg/bft/rpc/core/status/status_test.go b/tm2/pkg/bft/rpc/core/status/status_test.go new file mode 100644 index 00000000000..c9bf2d17f33 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/status/status_test.go @@ -0,0 +1,112 @@ +package status + +import ( + "errors" + "testing" + + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestHandler_StatusHandler(t *testing.T) { + t.Parallel() + + t.Run("Invalid GTE param", func(t *testing.T) { + t.Parallel() + + h := NewHandler(func() (*ctypes.ResultStatus, error) { + t.FailNow() + + return nil, nil + }) + + res, err := h.StatusHandler(nil, []any{"not-an-int"}) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("Build status error", func(t *testing.T) { + t.Parallel() + + buildErr := errors.New("build failed") + + h := NewHandler(func() (*ctypes.ResultStatus, error) { + return nil, buildErr + }) + + res, err := h.StatusHandler(nil, nil) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + assert.Contains(t, err.Message, buildErr.Error()) + }) + + t.Run("heightGte not satisfied", func(t *testing.T) { + t.Parallel() + + h := NewHandler(func() (*ctypes.ResultStatus, error) { + return &ctypes.ResultStatus{ + SyncInfo: ctypes.SyncInfo{ + LatestBlockHeight: 5, + }, + }, nil + }) + + res, err := h.StatusHandler(nil, []any{int64(10)}) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidRequestErrorCode, err.Code) + }) + + t.Run("Valid status, no heightGte", func(t *testing.T) { + t.Parallel() + + expected := &ctypes.ResultStatus{ + SyncInfo: ctypes.SyncInfo{ + LatestBlockHeight: 12, + }, + } + + h := NewHandler(func() (*ctypes.ResultStatus, error) { + return expected, nil + }) + + res, err := h.StatusHandler(nil, nil) + require.Nil(t, err) + require.NotNil(t, res) + + out, ok := res.(*ctypes.ResultStatus) + require.True(t, ok) + + assert.Equal(t, expected, out) + }) + + t.Run("Valid status, heightGte satisfied", func(t *testing.T) { + t.Parallel() + + expected := &ctypes.ResultStatus{ + SyncInfo: ctypes.SyncInfo{ + LatestBlockHeight: 10, + }, + } + + h := NewHandler(func() (*ctypes.ResultStatus, error) { + return expected, nil + }) + + res, err := h.StatusHandler(nil, []any{int64(10)}) + require.Nil(t, err) + require.NotNil(t, res) + + out, ok := res.(*ctypes.ResultStatus) + require.True(t, ok) + + assert.Equal(t, expected, out) + }) +} diff --git a/tm2/pkg/bft/rpc/core/tx.go b/tm2/pkg/bft/rpc/core/tx.go deleted file mode 100644 index 255e33ca499..00000000000 --- a/tm2/pkg/bft/rpc/core/tx.go +++ /dev/null @@ -1,66 +0,0 @@ -package core - -import ( - "fmt" - - ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" - rpctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" - sm "github.com/gnolang/gno/tm2/pkg/bft/state" -) - -// Tx allows you to query the transaction results. `nil` could mean the -// transaction is in the mempool, invalidated, or was not sent in the first -// place -func Tx(_ *rpctypes.Context, hash []byte) (*ctypes.ResultTx, error) { - // Get the result index from storage, if any - resultIndex, err := sm.LoadTxResultIndex(stateDB, hash) - if err != nil { - return nil, err - } - - // Sanity check the block height - height, err := getHeight(blockStore.Height(), &resultIndex.BlockNum) - if err != nil { - return nil, err - } - - // Load the block - block := blockStore.LoadBlock(height) - numTxs := len(block.Txs) - - if int(resultIndex.TxIndex) > numTxs || numTxs == 0 { - return nil, fmt.Errorf( - "unable to get block transaction for block %d, index %d", - resultIndex.BlockNum, - resultIndex.TxIndex, - ) - } - - rawTx := block.Txs[resultIndex.TxIndex] - - // Fetch the block results - blockResults, err := sm.LoadABCIResponses(stateDB, resultIndex.BlockNum) - if err != nil { - return nil, fmt.Errorf("unable to load block results, %w", err) - } - - // Grab the block deliver response - if len(blockResults.DeliverTxs) < int(resultIndex.TxIndex) { - return nil, fmt.Errorf( - "unable to get deliver result for block %d, index %d", - resultIndex.BlockNum, - resultIndex.TxIndex, - ) - } - - deliverResponse := blockResults.DeliverTxs[resultIndex.TxIndex] - - // Craft the response - return &ctypes.ResultTx{ - Hash: hash, - Height: resultIndex.BlockNum, - Index: resultIndex.TxIndex, - TxResult: deliverResponse, - Tx: rawTx, - }, nil -} diff --git a/tm2/pkg/bft/rpc/core/tx/tx.go b/tm2/pkg/bft/rpc/core/tx/tx.go new file mode 100644 index 00000000000..0e43415c0ff --- /dev/null +++ b/tm2/pkg/bft/rpc/core/tx/tx.go @@ -0,0 +1,107 @@ +package tx + +import ( + "fmt" + + "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/params" + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/metadata" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" + sm "github.com/gnolang/gno/tm2/pkg/bft/state" + dbm "github.com/gnolang/gno/tm2/pkg/db" +) + +// Handler is the transaction RPC handler +type Handler struct { + stateDB dbm.DB + blockStore sm.BlockStore +} + +// NewHandler creates a new instance of the transaction RPC handler +func NewHandler(blockStore sm.BlockStore, stateDB dbm.DB) *Handler { + return &Handler{ + blockStore: blockStore, + stateDB: stateDB, + } +} + +// TxHandler allows for querying the transaction results. `nil` could mean the +// transaction is in the mempool, invalidated, or was not sent in the first +// place. +// +// Params: +// - hash []byte (required) +func (h *Handler) TxHandler(_ *metadata.Metadata, p []any) (any, *spec.BaseJSONError) { + const idxHash = 0 + + hash, err := params.AsBytes(p, idxHash, true) + if err != nil { + return nil, err + } + + // Get the result index from storage, if any + resultIndex, loadIdxErr := sm.LoadTxResultIndex(h.stateDB, hash) + if loadIdxErr != nil { + return nil, spec.GenerateResponseError(loadIdxErr) + } + + storeHeight := h.blockStore.Height() + if resultIndex.BlockNum < 1 || resultIndex.BlockNum > storeHeight { + return nil, spec.GenerateResponseError( + fmt.Errorf( + "height (%d) must be less than or equal to the current blockchain height (%d)", + resultIndex.BlockNum, + storeHeight, + ), + ) + } + + // Load the block + block := h.blockStore.LoadBlock(resultIndex.BlockNum) + if block == nil { + return nil, spec.GenerateResponseError( + fmt.Errorf("block not found for height %d", resultIndex.BlockNum), + ) + } + + numTxs := len(block.Txs) + if numTxs == 0 || int(resultIndex.TxIndex) >= numTxs { + return nil, spec.GenerateResponseError( + fmt.Errorf( + "unable to get block transaction for block %d, index %d", + resultIndex.BlockNum, + resultIndex.TxIndex, + ), + ) + } + + rawTx := block.Txs[resultIndex.TxIndex] + + // Fetch the block results + blockResults, loadResErr := sm.LoadABCIResponses(h.stateDB, resultIndex.BlockNum) + if loadResErr != nil { + return nil, spec.GenerateResponseError( + fmt.Errorf("unable to load block results, %w", loadResErr), + ) + } + + if int(resultIndex.TxIndex) >= len(blockResults.DeliverTxs) { + return nil, spec.GenerateResponseError( + fmt.Errorf( + "unable to get deliver result for block %d, index %d", + resultIndex.BlockNum, + resultIndex.TxIndex, + ), + ) + } + + deliverResponse := blockResults.DeliverTxs[resultIndex.TxIndex] + + return &ctypes.ResultTx{ + Hash: hash, + Height: resultIndex.BlockNum, + Index: resultIndex.TxIndex, + TxResult: deliverResponse, + Tx: rawTx, + }, nil +} diff --git a/tm2/pkg/bft/rpc/core/tx/tx_test.go b/tm2/pkg/bft/rpc/core/tx/tx_test.go new file mode 100644 index 00000000000..372314686f3 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/tx/tx_test.go @@ -0,0 +1,349 @@ +package tx + +import ( + "testing" + + "github.com/gnolang/gno/tm2/pkg/amino" + abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/mock" + ctypes "github.com/gnolang/gno/tm2/pkg/bft/rpc/core/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" + "github.com/gnolang/gno/tm2/pkg/bft/state" + "github.com/gnolang/gno/tm2/pkg/bft/types" + "github.com/gnolang/gno/tm2/pkg/db/memdb" + "github.com/gnolang/gno/tm2/pkg/std" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestHandler_TxHandler(t *testing.T) { + t.Parallel() + + t.Run("Missing hash param", func(t *testing.T) { + t.Parallel() + + var ( + sdb = memdb.NewMemDB() + mockBlockStore = &mock.BlockStore{ + HeightFn: func() int64 { + t.FailNow() + + return 0 + }, + } + ) + + h := NewHandler(mockBlockStore, sdb) + + res, err := h.TxHandler(nil, nil) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.InvalidParamsErrorCode, err.Code) + }) + + t.Run("Tx result index not found", func(t *testing.T) { + t.Parallel() + + var ( + sdb = memdb.NewMemDB() + hash = []byte("hash") + mockBlockStore = &mock.BlockStore{ + HeightFn: func() int64 { + t.FailNow() + + return 0 + }, + } + ) + + h := NewHandler(mockBlockStore, sdb) + + res, err := h.TxHandler(nil, []any{hash}) + require.Nil(t, res) + require.NotNil(t, err) + + assert.Equal(t, spec.ServerErrorCode, err.Code) + }) + + t.Run("Invalid tx height (> store height)", func(t *testing.T) { + t.Parallel() + + var ( + storeHeight = int64(9) + txHeight = int64(10) + + mockBlockStore = &mock.BlockStore{ + HeightFn: func() int64 { + return storeHeight + }, + LoadBlockFn: func(_ int64) *types.Block { + t.FailNow() + + return nil + }, + } + + idx = state.TxResultIndex{ + BlockNum: txHeight, + TxIndex: 0, + } + ) + + stdTx := &std.Tx{ + Memo: "example tx", + } + raw, err := amino.Marshal(stdTx) + require.NoError(t, err) + + tx := types.Tx(raw) + + sdb := memdb.NewMemDB() + sdb.Set(state.CalcTxResultKey(tx.Hash()), idx.Bytes()) + + h := NewHandler(mockBlockStore, sdb) + + res, e := h.TxHandler(nil, []any{tx.Hash()}) + require.Nil(t, res) + require.NotNil(t, e) + + assert.Equal(t, spec.ServerErrorCode, e.Code) + }) + + t.Run("Block not found", func(t *testing.T) { + t.Parallel() + + var ( + height = int64(10) + + mockBlockStore = &mock.BlockStore{ + HeightFn: func() int64 { return height }, + LoadBlockFn: func(hh int64) *types.Block { + assert.Equal(t, height, hh) + + return nil + }, + } + + idx = state.TxResultIndex{ + BlockNum: height, + TxIndex: 0, + } + ) + + stdTx := &std.Tx{ + Memo: "example tx", + } + raw, err := amino.Marshal(stdTx) + require.NoError(t, err) + + tx := types.Tx(raw) + + sdb := memdb.NewMemDB() + sdb.Set(state.CalcTxResultKey(tx.Hash()), idx.Bytes()) + + h := NewHandler(mockBlockStore, sdb) + + res, e := h.TxHandler(nil, []any{tx.Hash()}) + require.Nil(t, res) + require.NotNil(t, e) + + assert.Equal(t, spec.ServerErrorCode, e.Code) + }) + + t.Run("Invalid block transaction index (empty txs)", func(t *testing.T) { + t.Parallel() + + var ( + height = int64(10) + mockBlockStore = &mock.BlockStore{ + HeightFn: func() int64 { return height }, + LoadBlockFn: func(hh int64) *types.Block { + assert.Equal(t, height, hh) + return &types.Block{ + Data: types.Data{ + Txs: []types.Tx{}, + }, + } + }, + } + + idx = state.TxResultIndex{ + BlockNum: height, + TxIndex: 0, + } + ) + + stdTx := &std.Tx{ + Memo: "example tx", + } + raw, err := amino.Marshal(stdTx) + require.NoError(t, err) + + tx := types.Tx(raw) + + sdb := memdb.NewMemDB() + sdb.Set(state.CalcTxResultKey(tx.Hash()), idx.Bytes()) + + h := NewHandler(mockBlockStore, sdb) + + res, e := h.TxHandler(nil, []any{tx.Hash()}) + require.Nil(t, res) + require.NotNil(t, e) + + assert.Equal(t, spec.ServerErrorCode, e.Code) + }) + + t.Run("Unable to load block results", func(t *testing.T) { + t.Parallel() + + stdTx := &std.Tx{ + Memo: "example tx", + } + raw, err := amino.Marshal(stdTx) + require.NoError(t, err) + + tx := types.Tx(raw) + + var ( + height = int64(10) + mockBlockStore = &mock.BlockStore{ + HeightFn: func() int64 { return height }, + LoadBlockFn: func(hh int64) *types.Block { + assert.Equal(t, height, hh) + return &types.Block{ + Data: types.Data{ + Txs: []types.Tx{tx}, + }, + } + }, + } + + idx = state.TxResultIndex{ + BlockNum: height, + TxIndex: 0, + } + ) + + sdb := memdb.NewMemDB() + sdb.Set(state.CalcTxResultKey(tx.Hash()), idx.Bytes()) + + h := NewHandler(mockBlockStore, sdb) + + res, e := h.TxHandler(nil, []any{tx.Hash()}) + require.Nil(t, res) + require.NotNil(t, e) + + assert.Equal(t, spec.ServerErrorCode, e.Code) + }) + + t.Run("Invalid ABCI response index", func(t *testing.T) { + t.Parallel() + + stdTx := &std.Tx{ + Memo: "example tx", + } + raw, err := amino.Marshal(stdTx) + require.NoError(t, err) + + tx := types.Tx(raw) + + var ( + height = int64(10) + + mockBlockStore = &mock.BlockStore{ + HeightFn: func() int64 { return height }, + LoadBlockFn: func(hh int64) *types.Block { + assert.Equal(t, height, hh) + return &types.Block{ + Data: types.Data{ + Txs: []types.Tx{tx}, + }, + } + }, + } + + idx = state.TxResultIndex{ + BlockNum: height, + TxIndex: 0, + } + + responses = &state.ABCIResponses{ + DeliverTxs: []abci.ResponseDeliverTx{}, // empty + } + ) + + sdb := memdb.NewMemDB() + sdb.Set(state.CalcTxResultKey(tx.Hash()), idx.Bytes()) + sdb.Set(state.CalcABCIResponsesKey(height), responses.Bytes()) + + h := NewHandler(mockBlockStore, sdb) + + res, e := h.TxHandler(nil, []any{tx.Hash()}) + require.Nil(t, res) + require.NotNil(t, e) + + assert.Equal(t, spec.ServerErrorCode, e.Code) + }) + + t.Run("Valid tx result", func(t *testing.T) { + t.Parallel() + + stdTx := &std.Tx{ + Memo: "example tx", + } + raw, err := amino.Marshal(stdTx) + require.NoError(t, err) + + tx := types.Tx(raw) + + var ( + height = int64(10) + + mockBlockStore = &mock.BlockStore{ + HeightFn: func() int64 { return height }, + LoadBlockFn: func(hh int64) *types.Block { + require.Equal(t, height, hh) + return &types.Block{ + Data: types.Data{ + Txs: []types.Tx{tx}, + }, + } + }, + } + + txResultIndex = state.TxResultIndex{ + BlockNum: height, + TxIndex: 0, + } + + responses = &state.ABCIResponses{ + DeliverTxs: []abci.ResponseDeliverTx{ + { + GasWanted: 100, + }, + }, + } + ) + + sdb := memdb.NewMemDB() + sdb.Set(state.CalcTxResultKey(tx.Hash()), txResultIndex.Bytes()) + sdb.Set(state.CalcABCIResponsesKey(height), responses.Bytes()) + + h := NewHandler(mockBlockStore, sdb) + + out, e := h.TxHandler(nil, []any{tx.Hash()}) + require.Nil(t, e) + require.NotNil(t, out) + + result, ok := out.(*ctypes.ResultTx) + require.True(t, ok) + + assert.Equal(t, txResultIndex.BlockNum, result.Height) + assert.Equal(t, txResultIndex.TxIndex, result.Index) + assert.Equal(t, responses.DeliverTxs[0], result.TxResult) + assert.Equal(t, tx, result.Tx) + assert.Equal(t, tx.Hash(), result.Tx.Hash()) + assert.Equal(t, tx.Hash(), result.Hash) + }) +} diff --git a/tm2/pkg/bft/rpc/core/tx_test.go b/tm2/pkg/bft/rpc/core/tx_test.go deleted file mode 100644 index 5024980e0d7..00000000000 --- a/tm2/pkg/bft/rpc/core/tx_test.go +++ /dev/null @@ -1,222 +0,0 @@ -package core - -import ( - "testing" - - "github.com/gnolang/gno/tm2/pkg/amino" - abci "github.com/gnolang/gno/tm2/pkg/bft/abci/types" - "github.com/gnolang/gno/tm2/pkg/bft/state" - "github.com/gnolang/gno/tm2/pkg/bft/types" - "github.com/gnolang/gno/tm2/pkg/db/memdb" - "github.com/gnolang/gno/tm2/pkg/std" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestTxHandler(t *testing.T) { - // Tests are not run in parallel because the JSON-RPC - // handlers utilize global package-level variables, - // that are not friendly with concurrent test runs (or anything, really) - t.Run("tx result generated", func(t *testing.T) { - var ( - height = int64(10) - - stdTx = &std.Tx{ - Memo: "example tx", - } - - txResultIndex = state.TxResultIndex{ - BlockNum: height, - TxIndex: 0, - } - - responses = &state.ABCIResponses{ - DeliverTxs: []abci.ResponseDeliverTx{ - { - GasWanted: 100, - }, - }, - } - ) - - // Prepare the transaction - marshalledTx, err := amino.Marshal(stdTx) - require.NoError(t, err) - - tx := types.Tx(marshalledTx) - - // Prepare the DB - sdb := memdb.NewMemDB() - - // Save the result index to the DB - sdb.Set(state.CalcTxResultKey(tx.Hash()), txResultIndex.Bytes()) - - // Save the ABCI response to the DB - sdb.Set(state.CalcABCIResponsesKey(height), responses.Bytes()) - - // Set the GLOBALLY referenced db - SetStateDB(sdb) - - // Set the GLOBALLY referenced blockstore - blockStore := &mockBlockStore{ - heightFn: func() int64 { - return height - }, - loadBlockFn: func(h int64) *types.Block { - require.Equal(t, height, h) - - return &types.Block{ - Data: types.Data{ - Txs: []types.Tx{ - tx, - }, - }, - } - }, - } - - SetBlockStore(blockStore) - - // Load the result - loadedTxResult, err := Tx(nil, tx.Hash()) - - require.NoError(t, err) - require.NotNil(t, loadedTxResult) - - // Compare the result - assert.Equal(t, txResultIndex.BlockNum, loadedTxResult.Height) - assert.Equal(t, txResultIndex.TxIndex, loadedTxResult.Index) - assert.Equal(t, responses.DeliverTxs[0], loadedTxResult.TxResult) - assert.Equal(t, tx, loadedTxResult.Tx) - assert.Equal(t, tx.Hash(), loadedTxResult.Tx.Hash()) - }) - - t.Run("tx result index not found", func(t *testing.T) { - var ( - sdb = memdb.NewMemDB() - hash = []byte("hash") - expectedErr = state.NoTxResultForHashError{ - Hash: hash, - } - ) - - // Set the GLOBALLY referenced db - SetStateDB(sdb) - - // Load the result - loadedTxResult, err := Tx(nil, hash) - require.Nil(t, loadedTxResult) - - assert.Equal(t, expectedErr, err) - }) - - t.Run("invalid block transaction index", func(t *testing.T) { - var ( - height = int64(10) - - stdTx = &std.Tx{ - Memo: "example tx", - } - - txResultIndex = state.TxResultIndex{ - BlockNum: height, - TxIndex: 0, - } - ) - - // Prepare the transaction - marshalledTx, err := amino.Marshal(stdTx) - require.NoError(t, err) - - tx := types.Tx(marshalledTx) - - // Prepare the DB - sdb := memdb.NewMemDB() - - // Save the result index to the DB - sdb.Set(state.CalcTxResultKey(tx.Hash()), txResultIndex.Bytes()) - - // Set the GLOBALLY referenced db - SetStateDB(sdb) - - // Set the GLOBALLY referenced blockstore - blockStore := &mockBlockStore{ - heightFn: func() int64 { - return height - }, - loadBlockFn: func(h int64) *types.Block { - require.Equal(t, height, h) - - return &types.Block{ - Data: types.Data{ - Txs: []types.Tx{}, // empty - }, - } - }, - } - - SetBlockStore(blockStore) - - // Load the result - loadedTxResult, err := Tx(nil, tx.Hash()) - require.Nil(t, loadedTxResult) - - assert.ErrorContains(t, err, "unable to get block transaction") - }) - - t.Run("invalid ABCI response index (corrupted state)", func(t *testing.T) { - var ( - height = int64(10) - - stdTx = &std.Tx{ - Memo: "example tx", - } - - txResultIndex = state.TxResultIndex{ - BlockNum: height, - TxIndex: 0, - } - ) - - // Prepare the transaction - marshalledTx, err := amino.Marshal(stdTx) - require.NoError(t, err) - - tx := types.Tx(marshalledTx) - - // Prepare the DB - sdb := memdb.NewMemDB() - - // Save the result index to the DB - sdb.Set(state.CalcTxResultKey(tx.Hash()), txResultIndex.Bytes()) - - // Set the GLOBALLY referenced db - SetStateDB(sdb) - - // Set the GLOBALLY referenced blockstore - blockStore := &mockBlockStore{ - heightFn: func() int64 { - return height - }, - loadBlockFn: func(h int64) *types.Block { - require.Equal(t, height, h) - - return &types.Block{ - Data: types.Data{ - Txs: []types.Tx{ - tx, - }, - }, - } - }, - } - - SetBlockStore(blockStore) - - // Load the result - loadedTxResult, err := Tx(nil, tx.Hash()) - require.Nil(t, loadedTxResult) - - assert.ErrorContains(t, err, "unable to load block results") - }) -} diff --git a/tm2/pkg/bft/rpc/core/types/peers.go b/tm2/pkg/bft/rpc/core/types/peers.go new file mode 100644 index 00000000000..71aaf275105 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/types/peers.go @@ -0,0 +1,24 @@ +package core_types + +import ( + "github.com/gnolang/gno/tm2/pkg/p2p" + p2pTypes "github.com/gnolang/gno/tm2/pkg/p2p/types" +) + +// Peers exposes access to the current P2P peer set +type Peers interface { + // Peers returns the current peer set + Peers() p2p.PeerSet +} + +// Transport exposes read-only access to the P2P transport +type Transport interface { + // Listeners returns the addresses the node is currently listening on + Listeners() []string + + // IsListening reports whether the node is currently accepting incoming connections + IsListening() bool + + // NodeInfo returns the local node's P2P identity and metadata + NodeInfo() p2pTypes.NodeInfo +} diff --git a/tm2/pkg/bft/rpc/core/types/responses.go b/tm2/pkg/bft/rpc/core/types/responses.go index 76474867b27..6ef1ee6649b 100644 --- a/tm2/pkg/bft/rpc/core/types/responses.go +++ b/tm2/pkg/bft/rpc/core/types/responses.go @@ -14,6 +14,8 @@ import ( p2pTypes "github.com/gnolang/gno/tm2/pkg/p2p/types" ) +// TODO move to respective packages + // List of blocks type ResultBlockchainInfo struct { LastHeight int64 `json:"last_height"` diff --git a/tm2/pkg/bft/rpc/core/utils/utils.go b/tm2/pkg/bft/rpc/core/utils/utils.go new file mode 100644 index 00000000000..52af4ad5523 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/utils/utils.go @@ -0,0 +1,45 @@ +package utils + +import ( + "errors" + "fmt" +) + +const ( + defaultPerPage = 30 + maxPerPage = 100 +) + +// NormalizeHeight normalizes a requested height against the current chain height. +// +// Semantics: +// - requestedHeight == 0 -> use latest height +// - requestedHeight < minVal -> error +// - requestedHeight > currentHeight -> error +func NormalizeHeight(latestHeight, requestedHeight, minVal int64) (int64, error) { + // 0 means unspecified -> latest + if requestedHeight == 0 { + return latestHeight, nil + } + + if requestedHeight < minVal { + return 0, fmt.Errorf("height must be greater than or equal to %d", minVal) + } + + if requestedHeight > latestHeight { + return 0, errors.New("height must be less than or equal to the current blockchain height") + } + + return requestedHeight, nil +} + +// ValidatePerPage normalizes the page result limit (pagination) +func ValidatePerPage(perPage int) int { + if perPage < 1 { + return defaultPerPage + } else if perPage > maxPerPage { + return maxPerPage + } + + return perPage +} diff --git a/tm2/pkg/bft/rpc/core/utils/utils_test.go b/tm2/pkg/bft/rpc/core/utils/utils_test.go new file mode 100644 index 00000000000..4a10b790074 --- /dev/null +++ b/tm2/pkg/bft/rpc/core/utils/utils_test.go @@ -0,0 +1,47 @@ +package utils + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNormalizeHeight(t *testing.T) { + t.Parallel() + + t.Run("Zero height uses latest", func(t *testing.T) { + t.Parallel() + + height, err := NormalizeHeight(10, 0, 1) + require.NoError(t, err) + + assert.Equal(t, int64(10), height) + }) + + t.Run("Below minimum", func(t *testing.T) { + t.Parallel() + + _, err := NormalizeHeight(10, 1, 2) + require.Error(t, err) + + assert.Contains(t, err.Error(), "greater than or equal to 2") + }) + + t.Run("Above latest", func(t *testing.T) { + t.Parallel() + + _, err := NormalizeHeight(10, 11, 1) + require.Error(t, err) + + assert.Contains(t, err.Error(), "current blockchain height") + }) + + t.Run("Within range", func(t *testing.T) { + t.Parallel() + + height, err := NormalizeHeight(10, 7, 1) + require.NoError(t, err) + assert.Equal(t, int64(7), height) + }) +} diff --git a/tm2/pkg/bft/rpc/lib/client/batch/batch.go b/tm2/pkg/bft/rpc/lib/client/batch/batch.go index e507cd9408f..ef1b21c6454 100644 --- a/tm2/pkg/bft/rpc/lib/client/batch/batch.go +++ b/tm2/pkg/bft/rpc/lib/client/batch/batch.go @@ -3,11 +3,11 @@ package batch import ( "context" - types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" ) type Client interface { - SendBatch(context.Context, types.RPCRequests) (types.RPCResponses, error) + SendBatch(context.Context, spec.BaseJSONRequests) (spec.BaseJSONResponses, error) } // Batch allows us to buffer multiple request/response structures @@ -15,14 +15,14 @@ type Client interface { // NOT thread safe type Batch struct { client Client - requests types.RPCRequests + requests spec.BaseJSONRequests } // NewBatch creates a new batch object func NewBatch(client Client) *Batch { return &Batch{ client: client, - requests: make(types.RPCRequests, 0), + requests: make(spec.BaseJSONRequests, 0), } } @@ -38,14 +38,14 @@ func (b *Batch) Clear() int { func (b *Batch) clear() int { count := len(b.requests) - b.requests = make(types.RPCRequests, 0) + b.requests = make(spec.BaseJSONRequests, 0) return count } // Send will attempt to send the current batch of enqueued requests, and then // will clear out the requests once done -func (b *Batch) Send(ctx context.Context) (types.RPCResponses, error) { +func (b *Batch) Send(ctx context.Context) (spec.BaseJSONResponses, error) { defer func() { b.clear() }() @@ -59,6 +59,6 @@ func (b *Batch) Send(ctx context.Context) (types.RPCResponses, error) { } // AddRequest adds a new request onto the batch -func (b *Batch) AddRequest(request types.RPCRequest) { +func (b *Batch) AddRequest(request *spec.BaseJSONRequest) { b.requests = append(b.requests, request) } diff --git a/tm2/pkg/bft/rpc/lib/client/batch/batch_test.go b/tm2/pkg/bft/rpc/lib/client/batch/batch_test.go index 03f5181239c..4202c7420a6 100644 --- a/tm2/pkg/bft/rpc/lib/client/batch/batch_test.go +++ b/tm2/pkg/bft/rpc/lib/client/batch/batch_test.go @@ -4,22 +4,26 @@ import ( "context" "testing" - types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // generateRequests generates dummy RPC requests -func generateRequests(t *testing.T, count int) types.RPCRequests { +func generateRequests(t *testing.T, count int) spec.BaseJSONRequests { t.Helper() - requests := make(types.RPCRequests, 0, count) + requests := make(spec.BaseJSONRequests, 0, count) for i := range count { - requests = append(requests, types.RPCRequest{ - JSONRPC: "2.0", - ID: types.JSONRPCIntID(i), - }) + requests = append( + requests, + spec.NewJSONRequest( + spec.JSONRPCNumberID(i), + "", + nil, + ), + ) } return requests @@ -29,20 +33,17 @@ func TestBatch_AddRequest(t *testing.T) { t.Parallel() var ( - capturedSend types.RPCRequests + capturedSend spec.BaseJSONRequests requests = generateRequests(t, 100) mockClient = &mockClient{ - sendBatchFn: func(_ context.Context, requests types.RPCRequests) (types.RPCResponses, error) { + sendBatchFn: func(_ context.Context, requests spec.BaseJSONRequests) (spec.BaseJSONResponses, error) { capturedSend = requests - responses := make(types.RPCResponses, len(requests)) + responses := make(spec.BaseJSONResponses, len(requests)) for index, request := range requests { - responses[index] = types.RPCResponse{ - JSONRPC: "2.0", - ID: request.ID, - } + responses[index] = spec.NewJSONResponse(request.ID, nil, nil) } return responses, nil diff --git a/tm2/pkg/bft/rpc/lib/client/batch/mock_test.go b/tm2/pkg/bft/rpc/lib/client/batch/mock_test.go index 5865631feab..6efef3a1e75 100644 --- a/tm2/pkg/bft/rpc/lib/client/batch/mock_test.go +++ b/tm2/pkg/bft/rpc/lib/client/batch/mock_test.go @@ -3,16 +3,16 @@ package batch import ( "context" - types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" ) -type sendBatchDelegate func(context.Context, types.RPCRequests) (types.RPCResponses, error) +type sendBatchDelegate func(context.Context, spec.BaseJSONRequests) (spec.BaseJSONResponses, error) type mockClient struct { sendBatchFn sendBatchDelegate } -func (m *mockClient) SendBatch(ctx context.Context, requests types.RPCRequests) (types.RPCResponses, error) { +func (m *mockClient) SendBatch(ctx context.Context, requests spec.BaseJSONRequests) (spec.BaseJSONResponses, error) { if m.sendBatchFn != nil { return m.sendBatchFn(ctx, requests) } diff --git a/tm2/pkg/bft/rpc/lib/client/client.go b/tm2/pkg/bft/rpc/lib/client/client.go index 8fc78d9eb64..4972aaaa656 100644 --- a/tm2/pkg/bft/rpc/lib/client/client.go +++ b/tm2/pkg/bft/rpc/lib/client/client.go @@ -3,16 +3,16 @@ package rpcclient import ( "context" - types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" ) // Client is the JSON-RPC client abstraction type Client interface { // SendRequest sends a single RPC request to the JSON-RPC layer - SendRequest(context.Context, types.RPCRequest) (*types.RPCResponse, error) + SendRequest(context.Context, *spec.BaseJSONRequest) (*spec.BaseJSONResponse, error) // SendBatch sends a batch of RPC requests to the JSON-RPC layer - SendBatch(context.Context, types.RPCRequests) (types.RPCResponses, error) + SendBatch(context.Context, spec.BaseJSONRequests) (spec.BaseJSONResponses, error) // Close closes the RPC client Close() error @@ -21,10 +21,10 @@ type Client interface { // Batch is the JSON-RPC batch abstraction type Batch interface { // AddRequest adds a single request to the RPC batch - AddRequest(types.RPCRequest) + AddRequest(*spec.BaseJSONRequest) // Send sends the batch to the RPC layer - Send(context.Context) (types.RPCResponses, error) + Send(context.Context) (spec.BaseJSONResponses, error) // Clear clears out the batch Clear() int diff --git a/tm2/pkg/bft/rpc/lib/client/http/client.go b/tm2/pkg/bft/rpc/lib/client/http/client.go index 61b93d7b2c8..cb84d8813fc 100644 --- a/tm2/pkg/bft/rpc/lib/client/http/client.go +++ b/tm2/pkg/bft/rpc/lib/client/http/client.go @@ -11,7 +11,7 @@ import ( "net/http" "strings" - types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" ) const ( @@ -52,9 +52,9 @@ func NewClient(rpcURL string) (*Client, error) { } // SendRequest sends a single RPC request to the server -func (c *Client) SendRequest(ctx context.Context, request types.RPCRequest) (*types.RPCResponse, error) { +func (c *Client) SendRequest(ctx context.Context, request *spec.BaseJSONRequest) (*spec.BaseJSONResponse, error) { // Send the request - response, err := sendRequestCommon[types.RPCRequest, *types.RPCResponse](ctx, c.client, c.rpcURL, request) + response, err := sendRequestCommon[*spec.BaseJSONRequest, *spec.BaseJSONResponse](ctx, c.client, c.rpcURL, request) if err != nil { return nil, err } @@ -68,9 +68,9 @@ func (c *Client) SendRequest(ctx context.Context, request types.RPCRequest) (*ty } // SendBatch sends a single RPC batch request to the server -func (c *Client) SendBatch(ctx context.Context, requests types.RPCRequests) (types.RPCResponses, error) { +func (c *Client) SendBatch(ctx context.Context, requests spec.BaseJSONRequests) (spec.BaseJSONResponses, error) { // Send the batch - responses, err := sendRequestCommon[types.RPCRequests, types.RPCResponses](ctx, c.client, c.rpcURL, requests) + responses, err := sendRequestCommon[spec.BaseJSONRequests, spec.BaseJSONResponses](ctx, c.client, c.rpcURL, requests) if err != nil { return nil, err } @@ -97,11 +97,11 @@ func (c *Client) Close() error { type ( requestType interface { - types.RPCRequest | types.RPCRequests + *spec.BaseJSONRequest | spec.BaseJSONRequests } responseType interface { - *types.RPCResponse | types.RPCResponses + *spec.BaseJSONResponse | spec.BaseJSONResponses } ) diff --git a/tm2/pkg/bft/rpc/lib/client/http/client_test.go b/tm2/pkg/bft/rpc/lib/client/http/client_test.go index 0d88ee32650..91e6fbf4172 100644 --- a/tm2/pkg/bft/rpc/lib/client/http/client_test.go +++ b/tm2/pkg/bft/rpc/lib/client/http/client_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -112,25 +112,19 @@ func TestClient_SendRequest(t *testing.T) { t.Parallel() var ( - request = types.RPCRequest{ - JSONRPC: "2.0", - ID: types.JSONRPCStringID("id"), - } + request = spec.NewJSONRequest(spec.JSONRPCStringID("id"), "", nil) handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { require.Equal(t, http.MethodPost, r.Method) require.Equal(t, "application/json", r.Header.Get("content-type")) // Parse the message - var req types.RPCRequest + var req spec.BaseJSONRequest require.NoError(t, json.NewDecoder(r.Body).Decode(&req)) require.Equal(t, request.ID.String(), req.ID.String()) // Send an empty response back - response := types.RPCResponse{ - JSONRPC: "2.0", - ID: req.ID, - } + response := spec.NewJSONResponse(req.ID, nil, nil) // Marshal the response marshalledResponse, err := json.Marshal(response) @@ -164,10 +158,7 @@ func TestClient_SendRequest(t *testing.T) { t.Parallel() var ( - request = types.RPCRequest{ - JSONRPC: "2.0", - ID: types.JSONRPCStringID("id"), - } + request = spec.NewJSONRequest(spec.JSONRPCStringID("id"), "", nil) handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { require.Equal(t, http.MethodPost, r.Method) @@ -175,10 +166,7 @@ func TestClient_SendRequest(t *testing.T) { // Send an empty response back, // with an invalid ID - response := types.RPCResponse{ - JSONRPC: "2.0", - ID: types.JSONRPCStringID("totally random ID"), - } + response := spec.NewJSONResponse(spec.JSONRPCStringID("totally random ID"), nil, nil) // Marshal the response marshalledResponse, err := json.Marshal(response) @@ -209,12 +197,9 @@ func TestClient_SendBatchRequest(t *testing.T) { t.Parallel() var ( - request = types.RPCRequest{ - JSONRPC: "2.0", - ID: types.JSONRPCStringID("id"), - } + request = spec.NewJSONRequest(spec.JSONRPCStringID("id"), "", nil) - requests = types.RPCRequests{ + requests = spec.BaseJSONRequests{ request, request, } @@ -224,7 +209,7 @@ func TestClient_SendBatchRequest(t *testing.T) { require.Equal(t, "application/json", r.Header.Get("content-type")) // Parse the message - var reqs types.RPCRequests + var reqs spec.BaseJSONRequests require.NoError(t, json.NewDecoder(r.Body).Decode(&reqs)) require.Len(t, reqs, len(requests)) @@ -233,12 +218,9 @@ func TestClient_SendBatchRequest(t *testing.T) { } // Send an empty response batch back - response := types.RPCResponse{ - JSONRPC: "2.0", - ID: request.ID, - } + response := spec.NewJSONResponse(request.ID, nil, nil) - responses := types.RPCResponses{ + responses := spec.BaseJSONResponses{ response, response, } diff --git a/tm2/pkg/bft/rpc/lib/client/ws/client.go b/tm2/pkg/bft/rpc/lib/client/ws/client.go index 0b74cb7f5ce..9cc1b88ec3d 100644 --- a/tm2/pkg/bft/rpc/lib/client/ws/client.go +++ b/tm2/pkg/bft/rpc/lib/client/ws/client.go @@ -8,7 +8,7 @@ import ( "log/slog" "sync" - types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" "github.com/gnolang/gno/tm2/pkg/errors" "github.com/gnolang/gno/tm2/pkg/log" "github.com/gorilla/websocket" @@ -20,7 +20,7 @@ var ( ErrInvalidBatchResponse = errors.New("invalid ws batch response size") ) -type responseCh chan<- types.RPCResponses +type responseCh chan<- spec.BaseJSONResponses // Client is a WebSocket client implementation type Client struct { @@ -67,9 +67,9 @@ func NewClient(rpcURL string, opts ...Option) (*Client, error) { } // SendRequest sends a single RPC request to the server -func (c *Client) SendRequest(ctx context.Context, request types.RPCRequest) (*types.RPCResponse, error) { +func (c *Client) SendRequest(ctx context.Context, request *spec.BaseJSONRequest) (*spec.BaseJSONResponse, error) { // Create the response channel for the pipeline - responseCh := make(chan types.RPCResponses, 1) + responseCh := make(chan spec.BaseJSONResponses, 1) // Generate a unique request ID hash requestHash := generateIDHash(request.ID.String()) @@ -99,14 +99,14 @@ func (c *Client) SendRequest(ctx context.Context, request types.RPCRequest) (*ty return nil, ErrRequestResponseIDMismatch } - return &response[0], nil + return response[0], nil } } // SendBatch sends a batch of RPC requests to the server -func (c *Client) SendBatch(ctx context.Context, requests types.RPCRequests) (types.RPCResponses, error) { +func (c *Client) SendBatch(ctx context.Context, requests spec.BaseJSONRequests) (spec.BaseJSONResponses, error) { // Create the response channel for the pipeline - responseCh := make(chan types.RPCResponses, 1) + responseCh := make(chan spec.BaseJSONResponses, 1) // Generate a unique request ID hash requestIDs := make([]string, 0, len(requests)) @@ -216,14 +216,14 @@ func (c *Client) runReadRoutine(ctx context.Context) { } var ( - responses types.RPCResponses + responses spec.BaseJSONResponses responseHash string ) // Try to unmarshal as a batch of responses first if err := json.Unmarshal(data, &responses); err != nil { // Try to unmarshal as a single response - var response types.RPCResponse + var response *spec.BaseJSONResponse if err := json.Unmarshal(data, &response); err != nil { c.logger.Error("failed to parse response", "err", err, "data", string(data)) @@ -233,7 +233,7 @@ func (c *Client) runReadRoutine(ctx context.Context) { // This is a single response, generate the unique ID responseHash = generateIDHash(response.ID.String()) - responses = types.RPCResponses{response} + responses = spec.BaseJSONResponses{response} } else { // This is a batch response, generate the unique ID // from the combined IDs diff --git a/tm2/pkg/bft/rpc/lib/client/ws/client_test.go b/tm2/pkg/bft/rpc/lib/client/ws/client_test.go index c80b98b624f..21340e5c9d2 100644 --- a/tm2/pkg/bft/rpc/lib/client/ws/client_test.go +++ b/tm2/pkg/bft/rpc/lib/client/ws/client_test.go @@ -9,7 +9,7 @@ import ( "testing" "time" - types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" "github.com/gorilla/websocket" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -37,10 +37,11 @@ func TestClient_SendRequest(t *testing.T) { var ( upgrader = websocket.Upgrader{} - request = types.RPCRequest{ - JSONRPC: "2.0", - ID: types.JSONRPCStringID("id"), - } + request = spec.NewJSONRequest( + spec.JSONRPCStringID("id"), + "", + nil, + ) ) ctx, cancelFn := context.WithCancel(context.Background()) @@ -62,7 +63,7 @@ func TestClient_SendRequest(t *testing.T) { require.NoError(t, err) // Parse the message - var req types.RPCRequest + var req *spec.BaseJSONRequest require.NoError(t, json.Unmarshal(message, &req)) require.Equal(t, request.ID.String(), req.ID.String()) @@ -96,15 +97,17 @@ func TestClient_SendRequest(t *testing.T) { var ( upgrader = websocket.Upgrader{} - request = types.RPCRequest{ - JSONRPC: "2.0", - ID: types.JSONRPCStringID("id"), - } - - response = types.RPCResponse{ - JSONRPC: "2.0", - ID: request.ID, - } + request = spec.NewJSONRequest( + spec.JSONRPCStringID("id"), + "", + nil, + ) + + response = spec.NewJSONResponse( + request.ID, + nil, + nil, + ) ) // Create the server @@ -123,7 +126,7 @@ func TestClient_SendRequest(t *testing.T) { require.NoError(t, err) // Parse the message - var req types.RPCRequest + var req *spec.BaseJSONRequest require.NoError(t, json.Unmarshal(message, &req)) require.Equal(t, request.ID.String(), req.ID.String()) @@ -168,12 +171,13 @@ func TestClient_SendBatch(t *testing.T) { var ( upgrader = websocket.Upgrader{} - request = types.RPCRequest{ - JSONRPC: "2.0", - ID: types.JSONRPCStringID("id"), - } + request = spec.NewJSONRequest( + spec.JSONRPCStringID("id"), + "", + nil, + ) - batch = types.RPCRequests{request} + batch = spec.BaseJSONRequests{request} ) ctx, cancelFn := context.WithCancel(context.Background()) @@ -195,7 +199,7 @@ func TestClient_SendBatch(t *testing.T) { require.NoError(t, err) // Parse the message - var req types.RPCRequests + var req spec.BaseJSONRequests require.NoError(t, json.Unmarshal(message, &req)) require.Len(t, req, 1) @@ -231,18 +235,20 @@ func TestClient_SendBatch(t *testing.T) { var ( upgrader = websocket.Upgrader{} - request = types.RPCRequest{ - JSONRPC: "2.0", - ID: types.JSONRPCStringID("id"), - } + request = spec.NewJSONRequest( + spec.JSONRPCStringID("id"), + "", + nil, + ) - response = types.RPCResponse{ - JSONRPC: "2.0", - ID: request.ID, - } + response = spec.NewJSONResponse( + request.ID, + nil, + nil, + ) - batch = types.RPCRequests{request} - batchResponse = types.RPCResponses{response} + batch = spec.BaseJSONRequests{request} + batchResponse = spec.BaseJSONResponses{response} ) // Create the server @@ -261,7 +267,7 @@ func TestClient_SendBatch(t *testing.T) { require.NoError(t, err) // Parse the message - var req types.RPCRequests + var req spec.BaseJSONRequests require.NoError(t, json.Unmarshal(message, &req)) require.Len(t, req, 1) diff --git a/tm2/pkg/bft/rpc/lib/doc.go b/tm2/pkg/bft/rpc/lib/doc.go deleted file mode 100644 index 9b76f8a38ca..00000000000 --- a/tm2/pkg/bft/rpc/lib/doc.go +++ /dev/null @@ -1,84 +0,0 @@ -// HTTP RPC server supporting calls via uri params, jsonrpc, and jsonrpc over websockets -// -// # Client Requests -// -// Suppose we want to expose the rpc function `HelloWorld(name string, num int)`. -// -// GET (URI) -// -// As a GET request, it would have URI encoded parameters, and look like: -// -// curl 'http://localhost:8008/hello_world?name="my_world"&num=5' -// -// Note the `'` around the url, which is just so bash doesn't ignore the quotes in `"my_world"`. -// This should also work: -// -// curl http://localhost:8008/hello_world?name=\"my_world\"&num=5 -// -// A GET request to `/` returns a list of available endpoints. -// For those which take arguments, the arguments will be listed in order, with `_` where the actual value should be. -// -// POST (JSONRPC) -// -// As a POST request, we use JSONRPC. For instance, the same request would have this as the body: -// -// { -// "jsonrpc": "2.0", -// "id": "anything", -// "method": "hello_world", -// "params": { -// "name": "my_world", -// "num": 5 -// } -// } -// -// With the above saved in file `data.json`, we can make the request with -// -// curl --data @data.json http://localhost:8008 -// -// WebSocket (JSONRPC) -// -// All requests are exposed over websocket in the same form as the POST JSONRPC. -// Websocket connections are available at their own endpoint, typically `/websocket`, -// though this is configurable when starting the server. -// -// # Server Definition -// -// Define some types and routes: -// -// type ResultStatus struct { -// Value string -// } -// -// Define some routes -// -// var Routes = map[string]*rpcserver.RPCFunc{ -// "status": rpcserver.NewRPCFunc(Status, "arg"), -// } -// -// An rpc function: -// -// func Status(v string) (*ResultStatus, error) { -// return &ResultStatus{v}, nil -// } -// -// Now start the server: -// -// mux := http.NewServeMux() -// rpcserver.RegisterRPCFuncs(mux, Routes) -// wm := rpcserver.NewWebsocketManager(Routes) -// mux.HandleFunc("/websocket", wm.WebsocketHandler) -// logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) -// listener, err := rpc.Listen("0.0.0.0:8080", rpcserver.Config{}) -// if err != nil { panic(err) } -// go rpcserver.StartHTTPServer(listener, mux, logger) -// -// Note that unix sockets are supported as well (eg. `/path/to/socket` instead of `0.0.0.0:8008`) -// Now see all available endpoints by sending a GET request to `0.0.0.0:8008`. -// Each route is available as a GET request, as a JSONRPCv2 POST request, and via JSONRPCv2 over websockets. -// -// # Examples -// -// - [Tendermint](https://github.com/gnolang/gno/tm2/pkg/bft/blob/master/rpc/core/routes.go) -// - [tm-monitor](https://github.com/gnolang/gno/tm2/pkg/bft/blob/master/tools/tm-monitor/rpc.go) -package rpc diff --git a/tm2/pkg/bft/rpc/lib/server/config/config.go b/tm2/pkg/bft/rpc/lib/server/config/config.go new file mode 100644 index 00000000000..331563bd8c3 --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/server/config/config.go @@ -0,0 +1,14 @@ +package config + +// Config defines the configuration options for the Tendermint RPC server +type Config struct { + // TCP or UNIX socket address for the RPC server to listen on + ListenAddress string `json:"laddr" toml:"laddr" comment:"TCP or UNIX socket address for the RPC server to listen on"` +} + +// DefaultConfig returns a default configuration for the RPC server +func DefaultConfig() *Config { + return &Config{ + ListenAddress: "0.0.0.0:26657", + } +} diff --git a/tm2/pkg/bft/rpc/lib/server/conns/connection.go b/tm2/pkg/bft/rpc/lib/server/conns/connection.go new file mode 100644 index 00000000000..53a5585394c --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/server/conns/connection.go @@ -0,0 +1,25 @@ +package conns + +import ( + "github.com/olahol/melody" +) + +// ConnectionManager defines a connection manager interface +// for active WS connections +type ConnectionManager interface { + // AddWSConnection registers a new WS connection + AddWSConnection(id string, session *melody.Session) + + // RemoveWSConnection Removes the WS connection with the supplied ID + RemoveWSConnection(id string) + + // GetWSConnection fetches a WS connection, if any, using the supplied ID + GetWSConnection(id string) WSConnection +} + +// WSConnection represents a single WS connection +type WSConnection interface { + // WriteData pushes out data to the WS connection. + // Returns an error if the write failed (ex. connection closed) + WriteData(data any) error +} diff --git a/tm2/pkg/bft/rpc/lib/server/conns/wsconn/ws_conns.go b/tm2/pkg/bft/rpc/lib/server/conns/wsconn/ws_conns.go new file mode 100644 index 00000000000..f3f46f2a129 --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/server/conns/wsconn/ws_conns.go @@ -0,0 +1,97 @@ +package wsconn + +import ( + "context" + "fmt" + "log/slog" + "sync" + + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/conns" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/writer" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/writer/ws" + "github.com/olahol/melody" +) + +// Conns manages active WS connections +type Conns struct { + logger *slog.Logger + conns map[string]Conn // ws connection ID -> conn + + mux sync.RWMutex +} + +// NewConns creates a new instance of the WS connection manager +func NewConns(logger *slog.Logger) *Conns { + return &Conns{ + logger: logger, + conns: make(map[string]Conn), + } +} + +// AddWSConnection registers a new WS connection +func (pw *Conns) AddWSConnection(id string, session *melody.Session) { + pw.mux.Lock() + defer pw.mux.Unlock() + + ctx, cancelFn := context.WithCancel(context.Background()) + + pw.conns[id] = Conn{ + ctx: ctx, + cancelFn: cancelFn, + writer: ws.New( + pw.logger.With( + "ws-conn", + fmt.Sprintf("ws-%s", id), + ), + session, + ), + } +} + +// RemoveWSConnection removes an existing WS connection +func (pw *Conns) RemoveWSConnection(id string) { + pw.mux.Lock() + defer pw.mux.Unlock() + + conn, found := pw.conns[id] + if !found { + return + } + + // Cancel the connection context + conn.cancelFn() + + delete(pw.conns, id) +} + +// GetWSConnection fetches a WS connection, if any +func (pw *Conns) GetWSConnection(id string) conns.WSConnection { + pw.mux.RLock() + defer pw.mux.RUnlock() + + conn, found := pw.conns[id] + if !found { + return nil + } + + return &conn +} + +// Conn is a single WS connection +type Conn struct { + ctx context.Context + cancelFn context.CancelFunc + + writer writer.ResponseWriter +} + +// WriteData writes arbitrary data to the WS connection +func (c *Conn) WriteData(data any) error { + if c.ctx.Err() != nil { + return c.ctx.Err() + } + + c.writer.WriteResponse(data) + + return nil +} diff --git a/tm2/pkg/bft/rpc/lib/server/handler.go b/tm2/pkg/bft/rpc/lib/server/handler.go new file mode 100644 index 00000000000..7b371da7780 --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/server/handler.go @@ -0,0 +1,35 @@ +package server + +import ( + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/metadata" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" +) + +// Handler executes a method with accompanying +// data such as metadata and params +type Handler func(metadata *metadata.Metadata, params []any) (any, *spec.BaseJSONError) + +type handlerEntry struct { + fn Handler + paramNames []string // index i == position i in the params +} + +type handlers map[string]*handlerEntry // method name -> handler entry + +// newHandlers creates a new map of method handlers +func newHandlers() handlers { + return make(handlers) +} + +// addHandler adds a new method handler for the specified method name +func (h handlers) addHandler(method string, handler Handler, paramNames ...string) { + h[method] = &handlerEntry{ + fn: handler, + paramNames: paramNames, + } +} + +// removeHandler removes the method handler for the specified method, if any +func (h handlers) removeHandler(method string) { + delete(h, method) +} diff --git a/tm2/pkg/bft/rpc/lib/server/handler_test.go b/tm2/pkg/bft/rpc/lib/server/handler_test.go new file mode 100644 index 00000000000..e1f8112b0b8 --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/server/handler_test.go @@ -0,0 +1,180 @@ +package server + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "testing" + + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/metadata" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" + "github.com/gnolang/gno/tm2/pkg/log" + "github.com/go-chi/chi/v5" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func decodeResponse[T spec.BaseJSONResponse | spec.BaseJSONResponses](t *testing.T, responseBody []byte) *T { + t.Helper() + + var response *T + + require.NoError(t, json.NewDecoder(bytes.NewReader(responseBody)).Decode(&response)) + + return response +} + +// setupTestWebServer is a helper function for common setup logic +func setupTestWebServer(t *testing.T, callback func(s *JSONRPC)) *testWebServer { + t.Helper() + + s := newWebServer(t, callback) + s.start() + + return s +} + +// TestHTTP_Handle_BatchRequest verifies that the JSON-RPC server: +// - can handle a single HTTP request to a dummy endpoint +// - can handle a batch HTTP request to a dummy endpoint +func TestHTTP_Handle(t *testing.T) { + t.Parallel() + + var ( + commonResponse = "This is a common response!" + method = "dummy" + ) + + singleRequest, err := json.Marshal( + spec.NewJSONRequest(1, method, nil), + ) + require.NoError(t, err) + + requests := spec.BaseJSONRequests{ + spec.NewJSONRequest(1, method, nil), + spec.NewJSONRequest(2, method, nil), + spec.NewJSONRequest(3, method, nil), + } + + batchRequest, err := json.Marshal(requests) + require.NoError(t, err) + + testTable := []struct { + verifyResponse func(response []byte) error + name string + request []byte + }{ + { + func(resp []byte) error { + response := decodeResponse[spec.BaseJSONResponse](t, resp) + + assert.Equal(t, spec.NewJSONResponse(1, commonResponse, nil), response) + + return nil + }, + "single HTTP request", + singleRequest, + }, + { + func(resp []byte) error { + responses := decodeResponse[spec.BaseJSONResponses](t, resp) + + for index, response := range *responses { + assert.Equal( + t, + spec.NewJSONResponse(uint(index+1), commonResponse, nil), + response, + ) + } + + return nil + }, + "batch HTTP request", + batchRequest, + }, + } + + for _, testCase := range testTable { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + // Create a new JSON-RPC server + webServer := setupTestWebServer(t, func(s *JSONRPC) { + s.handlers = make(handlers) + + s.handlers.addHandler(method, func(_ *metadata.Metadata, _ []any) (any, *spec.BaseJSONError) { + return commonResponse, nil + }) + }) + + defer webServer.stop() + + respRaw, err := http.Post( + webServer.address(), + jsonMimeType, + bytes.NewBuffer(testCase.request), + ) + if err != nil { + t.Fatalf("unexpected HTTP error, %v", err) + } + + resp, err := io.ReadAll(respRaw.Body) + if err != nil { + t.Fatalf("unable to read response body, %v", err) + } + + if err := testCase.verifyResponse(resp); err != nil { + t.Fatalf("unable to verify response, %v", err) + } + }) + } +} + +type testWebServer struct { + mux *chi.Mux + listener net.Listener +} + +func newWebServer(t *testing.T, callbacks ...func(s *JSONRPC)) *testWebServer { + t.Helper() + + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("unable to start listen, %v", err) + } + + mux := chi.NewMux() + webServer := &testWebServer{ + mux: mux, + listener: listener, + } + + s := NewJSONRPC(WithLogger(log.NewNoopLogger())) + + for _, callback := range callbacks { + callback(s) + } + + // Hook up the JSON-RPC server to the mux + mux.Mount("/", s.SetupRoutes(chi.NewMux())) + + return webServer +} + +func (ms *testWebServer) start() { + go func() { + //nolint:errcheck // No need to check error + _ = http.Serve(ms.listener, ms.mux) + }() +} + +func (ms *testWebServer) stop() { + _ = ms.listener.Close() +} + +func (ms *testWebServer) address() string { + return fmt.Sprintf("http://%s", ms.listener.Addr().String()) +} diff --git a/tm2/pkg/bft/rpc/lib/server/handlers.go b/tm2/pkg/bft/rpc/lib/server/handlers.go deleted file mode 100644 index 1005f08b965..00000000000 --- a/tm2/pkg/bft/rpc/lib/server/handlers.go +++ /dev/null @@ -1,921 +0,0 @@ -package rpcserver - -import ( - "bytes" - "context" - "encoding/hex" - "encoding/json" - goerrors "errors" - "fmt" - "io" - "log/slog" - "net/http" - "reflect" - "runtime/debug" - "sort" - "strconv" - "strings" - "time" - - "github.com/gnolang/gno/tm2/pkg/telemetry" - "github.com/gnolang/gno/tm2/pkg/telemetry/metrics" - "github.com/gorilla/websocket" - - "github.com/gnolang/gno/tm2/pkg/amino" - types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" - "github.com/gnolang/gno/tm2/pkg/errors" - "github.com/gnolang/gno/tm2/pkg/log" - "github.com/gnolang/gno/tm2/pkg/service" -) - -// RegisterRPCFuncs adds a route for each function in the funcMap, as well as general jsonrpc and websocket handlers for all functions. -// "result" is the interface on which the result objects are registered, and is populated with every RPCResponse -func RegisterRPCFuncs(mux *http.ServeMux, funcMap map[string]*RPCFunc, logger *slog.Logger) { - // Check if metrics are enabled - if telemetry.MetricsEnabled() { - // HTTP endpoints - for funcName, rpcFunc := range funcMap { - mux.HandleFunc( - "/"+funcName, - telemetryMiddleware( - makeHTTPHandler(rpcFunc, logger), - ), - ) - } - - // JSONRPC endpoints - mux.HandleFunc( - "/", - telemetryMiddleware( - handleInvalidJSONRPCPaths(makeJSONRPCHandler(funcMap, logger)), - ), - ) - - return - } - - // HTTP endpoints - for funcName, rpcFunc := range funcMap { - mux.HandleFunc("/"+funcName, makeHTTPHandler(rpcFunc, logger)) - } - - // JSONRPC endpoints - mux.HandleFunc("/", handleInvalidJSONRPCPaths(makeJSONRPCHandler(funcMap, logger))) -} - -// ------------------------------------- -// function introspection - -// RPCFunc contains the introspected type information for a function -type RPCFunc struct { - f reflect.Value // underlying rpc function - args []reflect.Type // type of each function arg - returns []reflect.Type // type of each return arg - argNames []string // name of each argument - ws bool // websocket only -} - -// NewRPCFunc wraps a function for introspection. -// f is the function, args are comma separated argument names -func NewRPCFunc(f any, args string) *RPCFunc { - return newRPCFunc(f, args, false) -} - -// NewWSRPCFunc wraps a function for introspection and use in the websockets. -func NewWSRPCFunc(f any, args string) *RPCFunc { - return newRPCFunc(f, args, true) -} - -func newRPCFunc(f any, args string, ws bool) *RPCFunc { - var argNames []string - if args != "" { - argNames = strings.Split(args, ",") - } - return &RPCFunc{ - f: reflect.ValueOf(f), - args: funcArgTypes(f), - returns: funcReturnTypes(f), - argNames: argNames, - ws: ws, - } -} - -// return a function's argument types -func funcArgTypes(f any) []reflect.Type { - t := reflect.TypeOf(f) - n := t.NumIn() - typez := make([]reflect.Type, n) - for i := range n { - typez[i] = t.In(i) - } - return typez -} - -// return a function's return types -func funcReturnTypes(f any) []reflect.Type { - t := reflect.TypeOf(f) - n := t.NumOut() - typez := make([]reflect.Type, n) - for i := range n { - typez[i] = t.Out(i) - } - return typez -} - -// function introspection -// ----------------------------------------------------------------------------- -// rpc.json - -// jsonrpc calls grab the given method's function info and runs reflect.Call -func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger *slog.Logger) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - b, err := io.ReadAll(r.Body) - if err != nil { - WriteRPCResponseHTTP(w, types.RPCInvalidRequestError(types.JSONRPCStringID(""), errors.Wrap(err, "error reading request body"))) - return - } - // if its an empty request (like from a browser), - // just display a list of functions - if len(b) == 0 { - writeListOfEndpoints(w, r, funcMap) - return - } - - // --- Branch 1: Attempt to Unmarshal as a Batch (Slice) of Requests --- - var requests types.RPCRequests - if err := json.Unmarshal(b, &requests); err == nil { - var responses types.RPCResponses - for _, req := range requests { - if resp := processRequest(r, req, funcMap, logger); resp != nil { - responses = append(responses, *resp) - } - } - - if len(responses) > 0 { - WriteRPCResponseArrayHTTP(w, responses) - return - } - } - - // --- Branch 2: Attempt to Unmarshal as a Single Request --- - var request types.RPCRequest - if err := json.Unmarshal(b, &request); err == nil { - if resp := processRequest(r, request, funcMap, logger); resp != nil { - WriteRPCResponseHTTP(w, *resp) - return - } - } else { - WriteRPCResponseHTTP(w, types.RPCParseError(types.JSONRPCStringID(""), errors.Wrap(err, "error unmarshalling request"))) - return - } - } -} - -// processRequest checks and processes a single JSON-RPC request. -// If the request should produce a response, it returns a pointer to that response. -// Otherwise (e.g. if the request is a notification or fails validation), it returns nil. -func processRequest(r *http.Request, req types.RPCRequest, funcMap map[string]*RPCFunc, logger *slog.Logger) *types.RPCResponse { - // Skip notifications (an empty ID indicates no response should be sent) - if req.ID == types.JSONRPCStringID("") { - logger.Debug("Skipping notification (empty ID)") - return nil - } - - // Check that the URL path is valid (assume only "/" is acceptable) - if len(r.URL.Path) > 1 { - resp := types.RPCInvalidRequestError(req.ID, fmt.Errorf("invalid path: %s", r.URL.Path)) - return &resp - } - - // Look up the requested method in the function map. - rpcFunc, ok := funcMap[req.Method] - if !ok || rpcFunc.ws { - resp := types.RPCMethodNotFoundError(req.ID) - return &resp - } - - ctx := &types.Context{JSONReq: &req, HTTPReq: r} - args := []reflect.Value{reflect.ValueOf(ctx)} - if len(req.Params) > 0 { - fnArgs, err := jsonParamsToArgs(rpcFunc, req.Params) - if err != nil { - resp := types.RPCInvalidParamsError(req.ID, errors.Wrap(err, "error converting json params to arguments")) - return &resp - } - args = append(args, fnArgs...) - } - - // Call the RPC function using reflection. - returns := rpcFunc.f.Call(args) - logger.Info("HTTPJSONRPC", "method", req.Method, "args", args, "returns", returns) - - // Convert the reflection return values into a result value for JSON serialization. - result, err := unreflectResult(returns) - if err != nil { - resp := types.RPCInternalError(req.ID, err) - return &resp - } - - // Build and return a successful response. - resp := types.NewRPCSuccessResponse(req.ID, result) - return &resp -} - -func handleInvalidJSONRPCPaths(next http.HandlerFunc) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - // Since the pattern "/" matches all paths not matched by other registered patterns we check whether the path is indeed - // "/", otherwise return a 404 error - if r.URL.Path != "/" { - http.NotFound(w, r) - return - } - - next(w, r) - } -} - -// telemetryMiddleware is the telemetry middleware handler -func telemetryMiddleware(next http.Handler) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - start := time.Now() - - next.ServeHTTP(w, r) - - // Log the response time - metrics.HTTPRequestTime.Record( - context.Background(), - time.Since(start).Milliseconds(), - ) - } -} - -func mapParamsToArgs(rpcFunc *RPCFunc, params map[string]json.RawMessage, argsOffset int) ([]reflect.Value, error) { - values := make([]reflect.Value, len(rpcFunc.argNames)) - for i, argName := range rpcFunc.argNames { - argType := rpcFunc.args[i+argsOffset] - - if p, ok := params[argName]; ok && p != nil && len(p) > 0 { - val := reflect.New(argType) - err := amino.UnmarshalJSON(p, val.Interface()) - if err != nil { - return nil, err - } - values[i] = val.Elem() - } else { // use default for that type - values[i] = reflect.Zero(argType) - } - } - - return values, nil -} - -func arrayParamsToArgs(rpcFunc *RPCFunc, params []json.RawMessage, argsOffset int) ([]reflect.Value, error) { - if len(rpcFunc.argNames) != len(params) { - return nil, errors.New("expected %v parameters (%v), got %v (%v)", - len(rpcFunc.argNames), rpcFunc.argNames, len(params), params) - } - - values := make([]reflect.Value, len(params)) - for i, p := range params { - argType := rpcFunc.args[i+argsOffset] - val := reflect.New(argType) - err := amino.UnmarshalJSON(p, val.Interface()) - if err != nil { - return nil, err - } - values[i] = val.Elem() - } - return values, nil -} - -// raw is unparsed json (from json.RawMessage) encoding either a map or an -// array. -// -// Example: -// -// rpcFunc.args = [rpctypes.Context string] -// rpcFunc.argNames = ["arg"] -func jsonParamsToArgs(rpcFunc *RPCFunc, raw []byte) ([]reflect.Value, error) { - const argsOffset = 1 - - // TODO: Make more efficient, perhaps by checking the first character for '{' or '['? - // First, try to get the map. - var m map[string]json.RawMessage - err := json.Unmarshal(raw, &m) - if err == nil { - return mapParamsToArgs(rpcFunc, m, argsOffset) - } - - // Otherwise, try an array. - var a []json.RawMessage - err = json.Unmarshal(raw, &a) - if err == nil { - return arrayParamsToArgs(rpcFunc, a, argsOffset) - } - - // Otherwise, bad format, we cannot parse - return nil, errors.New("unknown type for JSON params: %v. Expected map or array", err) -} - -// rpc.json -// ----------------------------------------------------------------------------- -// rpc.http - -// convert from a function name to the http handler -func makeHTTPHandler(rpcFunc *RPCFunc, logger *slog.Logger) http.HandlerFunc { - // Exception for websocket endpoints - if rpcFunc.ws { - return func(w http.ResponseWriter, r *http.Request) { - WriteRPCResponseHTTP(w, types.RPCMethodNotFoundError(types.JSONRPCStringID(""))) - } - } - - // All other endpoints - return func(w http.ResponseWriter, r *http.Request) { - logger.Debug("HTTP HANDLER", "req", r) - - ctx := &types.Context{HTTPReq: r} - args := []reflect.Value{reflect.ValueOf(ctx)} - - fnArgs, err := httpParamsToArgs(rpcFunc, r) - if err != nil { - WriteRPCResponseHTTP(w, types.RPCInvalidParamsError(types.JSONRPCStringID(""), errors.Wrap(err, "error converting http params to arguments"))) - return - } - args = append(args, fnArgs...) - - returns := rpcFunc.f.Call(args) - - logger.Info("HTTPRestRPC", "method", r.URL.Path, "args", args, "returns", returns) - result, err := unreflectResult(returns) - if err != nil { - var statusErr *types.HTTPStatusError - if goerrors.As(err, &statusErr) { - WriteRPCResponseHTTPError(w, statusErr.Code, types.RPCInternalError(types.JSONRPCStringID(""), err)) - return - } - - WriteRPCResponseHTTP(w, types.RPCInternalError(types.JSONRPCStringID(""), err)) - return - } - WriteRPCResponseHTTP(w, types.NewRPCSuccessResponse(types.JSONRPCStringID(""), result)) - } -} - -// Convert an http query to a list of properly typed values. -// To be properly decoded the arg must be a concrete type from tendermint (if its an interface). -func httpParamsToArgs(rpcFunc *RPCFunc, r *http.Request) ([]reflect.Value, error) { - const argsOffset = 1 - - paramsMap := make(map[string]json.RawMessage) - for _, argName := range rpcFunc.argNames { - arg := GetParam(r, argName) - if arg == "" { - // Empty param - continue - } - - // Handle hex string - if strings.HasPrefix(arg, "0x") { - decoded, err := hex.DecodeString(arg[2:]) - if err != nil { - return nil, fmt.Errorf("unable to decode hex string: %w", err) - } - - data, err := amino.MarshalJSON(decoded) - if err != nil { - return nil, fmt.Errorf("unable to marshal argument to JSON: %w", err) - } - - paramsMap[argName] = data - - continue - } - - // Handle integer string by adding quotes to ensure it is treated as a JSON string. - // This is required by Amino JSON to unmarshal values into integers. - if _, err := strconv.Atoi(arg); err == nil { - // arg is a number, wrap it - arg = "\"" + arg + "\"" - } - - // Handle invalid JSON: ensure it's wrapped as a JSON-encoded string - if !json.Valid([]byte(arg)) { - data, err := amino.MarshalJSON(arg) - if err != nil { - return nil, fmt.Errorf("unable to marshal argument to JSON: %w", err) - } - - paramsMap[argName] = data - - continue - } - - // Default: treat the argument as a JSON raw message - paramsMap[argName] = json.RawMessage([]byte(arg)) - } - - return mapParamsToArgs(rpcFunc, paramsMap, argsOffset) -} - -// rpc.http -// ----------------------------------------------------------------------------- -// rpc.websocket - -const ( - defaultWSWriteChanCapacity = 1000 - defaultWSWriteWait = 10 * time.Second - defaultWSReadWait = 30 * time.Second - defaultWSPingPeriod = (defaultWSReadWait * 9) / 10 -) - -// A single websocket connection contains listener id, underlying ws -// connection. -// -// In case of an error, the connection is stopped. -type wsConnection struct { - service.BaseService - - remoteAddr string - baseConn *websocket.Conn - writeChan chan types.RPCResponses - - funcMap map[string]*RPCFunc - - // write channel capacity - writeChanCapacity int - - // each write times out after this. - writeWait time.Duration - - // Connection times out if we haven't received *anything* in this long, not even pings. - readWait time.Duration - - // Send pings to server with this period. Must be less than readWait, but greater than zero. - pingPeriod time.Duration - - // Maximum message size. - readLimit int64 - - // callback which is called upon disconnect - onDisconnect func(remoteAddr string) - - ctx context.Context - cancel context.CancelFunc -} - -// NewWSConnection wraps websocket.Conn. -// -// See the commentary on the func(*wsConnection) functions for a detailed -// description of how to configure ping period and pong wait time. NOTE: if the -// write buffer is full, pongs may be dropped, which may cause clients to -// disconnect. see https://github.com/gorilla/websocket/issues/97 -func NewWSConnection( - baseConn *websocket.Conn, - funcMap map[string]*RPCFunc, - options ...func(*wsConnection), -) *wsConnection { - wsc := &wsConnection{ - remoteAddr: baseConn.RemoteAddr().String(), - baseConn: baseConn, - funcMap: funcMap, - writeWait: defaultWSWriteWait, - writeChanCapacity: defaultWSWriteChanCapacity, - readWait: defaultWSReadWait, - pingPeriod: defaultWSPingPeriod, - } - for _, option := range options { - option(wsc) - } - wsc.baseConn.SetReadLimit(wsc.readLimit) - wsc.BaseService = *service.NewBaseService(nil, "wsConnection", wsc) - return wsc -} - -// OnDisconnect sets a callback which is used upon disconnect - not -// Goroutine-safe. Nop by default. -func OnDisconnect(onDisconnect func(remoteAddr string)) func(*wsConnection) { - return func(wsc *wsConnection) { - wsc.onDisconnect = onDisconnect - } -} - -// WriteWait sets the amount of time to wait before a websocket write times out. -// It should only be used in the constructor - not Goroutine-safe. -func WriteWait(writeWait time.Duration) func(*wsConnection) { - return func(wsc *wsConnection) { - wsc.writeWait = writeWait - } -} - -// WriteChanCapacity sets the capacity of the websocket write channel. -// It should only be used in the constructor - not Goroutine-safe. -func WriteChanCapacity(capacity int) func(*wsConnection) { - return func(wsc *wsConnection) { - wsc.writeChanCapacity = capacity - } -} - -// ReadWait sets the amount of time to wait before a websocket read times out. -// It should only be used in the constructor - not Goroutine-safe. -func ReadWait(readWait time.Duration) func(*wsConnection) { - return func(wsc *wsConnection) { - wsc.readWait = readWait - } -} - -// PingPeriod sets the duration for sending websocket pings. -// It should only be used in the constructor - not Goroutine-safe. -func PingPeriod(pingPeriod time.Duration) func(*wsConnection) { - return func(wsc *wsConnection) { - wsc.pingPeriod = pingPeriod - } -} - -// ReadLimit sets the maximum size for reading message. -// It should only be used in the constructor - not Goroutine-safe. -func ReadLimit(readLimit int64) func(*wsConnection) { - return func(wsc *wsConnection) { - wsc.readLimit = readLimit - } -} - -// OnStart implements service.Service by starting the read and write routines. It -// blocks until the connection closes. -func (wsc *wsConnection) OnStart() error { - wsc.writeChan = make(chan types.RPCResponses, wsc.writeChanCapacity) - - // Read subscriptions/unsubscriptions to events - go wsc.readRoutine() - // Write responses, BLOCKING. - wsc.writeRoutine() - - return nil -} - -// OnStop implements service.Service by unsubscribing remoteAddr from all subscriptions. -func (wsc *wsConnection) OnStop() { - // Both read and write loops close the websocket connection when they exit their loops. - // The writeChan is never closed, to allow WriteRPCResponses() to fail. - - if wsc.onDisconnect != nil { - wsc.onDisconnect(wsc.remoteAddr) - } - - if wsc.ctx != nil { - wsc.cancel() - } -} - -// GetRemoteAddr returns the remote address of the underlying connection. -// It implements WSRPCConnection -func (wsc *wsConnection) GetRemoteAddr() string { - return wsc.remoteAddr -} - -// WriteRPCResponse pushes a response to the writeChan, and blocks until it is accepted. -// It implements WSRPCConnection. It is Goroutine-safe. -func (wsc *wsConnection) WriteRPCResponses(resp types.RPCResponses) { - select { - case <-wsc.Quit(): - return - case wsc.writeChan <- resp: - } -} - -// TryWriteRPCResponse attempts to push a response to the writeChan, but does not block. -// It implements WSRPCConnection. It is Goroutine-safe -func (wsc *wsConnection) TryWriteRPCResponses(resp types.RPCResponses) bool { - select { - case <-wsc.Quit(): - return false - case wsc.writeChan <- resp: - return true - default: - return false - } -} - -// Context returns the connection's context. -// The context is canceled when the client's connection closes. -func (wsc *wsConnection) Context() context.Context { - if wsc.ctx != nil { - return wsc.ctx - } - wsc.ctx, wsc.cancel = context.WithCancel(context.Background()) - return wsc.ctx -} - -// Read from the socket and subscribe to or unsubscribe from events -func (wsc *wsConnection) readRoutine() { - defer func() { - if r := recover(); r != nil { - err, ok := r.(error) - if !ok { - err = fmt.Errorf("WSJSONRPC: %v", r) - } - wsc.Logger.Error("Panic in WSJSONRPC handler", "err", err, "stack", string(debug.Stack())) - wsc.WriteRPCResponses(types.RPCResponses{types.RPCInternalError(types.JSONRPCStringID("unknown"), err)}) - go wsc.readRoutine() - } else { - wsc.baseConn.Close() //nolint: errcheck - } - }() - - wsc.baseConn.SetPongHandler(func(m string) error { - return wsc.baseConn.SetReadDeadline(time.Now().Add(wsc.readWait)) - }) - - telemetryEnabled := telemetry.MetricsEnabled() - - for { - select { - case <-wsc.Quit(): - return - default: - // reset deadline for every type of message (control or data) - if err := wsc.baseConn.SetReadDeadline(time.Now().Add(wsc.readWait)); err != nil { - wsc.Logger.Error("failed to set read deadline", "err", err) - } - var in []byte - _, in, err := wsc.baseConn.ReadMessage() - if err != nil { - if websocket.IsCloseError(err, websocket.CloseNormalClosure) { - wsc.Logger.Info("Client closed the connection") - } else { - wsc.Logger.Error("Failed to read request", "err", err) - } - wsc.Stop() - return - } - - // Log the request response start time - responseStart := time.Now() - - // first try to unmarshal the incoming request as an array of RPC requests - var ( - requests types.RPCRequests - responses types.RPCResponses - ) - - // Try to unmarshal the requests as a batch - if err := json.Unmarshal(in, &requests); err != nil { - // Next, try to unmarshal as a single request - var request types.RPCRequest - if err := json.Unmarshal(in, &request); err != nil { - wsc.WriteRPCResponses( - types.RPCResponses{ - types.RPCParseError( - types.JSONRPCStringID(""), - errors.Wrap(err, "error unmarshalling request"), - ), - }, - ) - - return - } - - requests = []types.RPCRequest{request} - } - - for _, request := range requests { - request := request - - // A Notification is a Request object without an "id" member. - // The Server MUST NOT reply to a Notification, including those that are within a batch request. - if request.ID == types.JSONRPCStringID("") { - wsc.Logger.Debug("Skipping notification JSON-RPC request") - - continue - } - - // Now, fetch the RPCFunc and execute it. - rpcFunc := wsc.funcMap[request.Method] - if rpcFunc == nil { - responses = append(responses, types.RPCMethodNotFoundError(request.ID)) - - continue - } - - ctx := &types.Context{JSONReq: &request, WSConn: wsc} - args := []reflect.Value{reflect.ValueOf(ctx)} - if len(request.Params) > 0 { - fnArgs, err := jsonParamsToArgs(rpcFunc, request.Params) - if err != nil { - responses = append(responses, types.RPCInternalError(request.ID, errors.Wrap(err, "error converting json params to arguments"))) - - continue - } - args = append(args, fnArgs...) - } - - returns := rpcFunc.f.Call(args) - - // TODO: Need to encode args/returns to string if we want to log them - wsc.Logger.Info("WSJSONRPC", "method", request.Method) - - result, err := unreflectResult(returns) - if err != nil { - responses = append(responses, types.RPCInternalError(request.ID, err)) - - continue - } - - responses = append(responses, types.NewRPCSuccessResponse(request.ID, result)) - - if len(responses) > 0 { - wsc.WriteRPCResponses(responses) - - // Log telemetry - if telemetryEnabled { - metrics.WSRequestTime.Record( - context.Background(), - time.Since(responseStart).Milliseconds(), - ) - } - } - } - } - } -} - -// receives on a write channel and writes out on the socket -func (wsc *wsConnection) writeRoutine() { - pingTicker := time.NewTicker(wsc.pingPeriod) - defer func() { - pingTicker.Stop() - if err := wsc.baseConn.Close(); err != nil { - wsc.Logger.Error("Error closing connection", "err", err) - } - }() - - // https://github.com/gorilla/websocket/issues/97 - pongs := make(chan string, 1) - wsc.baseConn.SetPingHandler(func(m string) error { - select { - case pongs <- m: - default: - } - return nil - }) - - for { - select { - case m := <-pongs: - err := wsc.writeMessageWithDeadline(websocket.PongMessage, []byte(m)) - if err != nil { - wsc.Logger.Info("Failed to write pong (client may disconnect)", "err", err) - } - case <-pingTicker.C: - err := wsc.writeMessageWithDeadline(websocket.PingMessage, []byte{}) - if err != nil { - wsc.Logger.Error("Failed to write ping", "err", err) - wsc.Stop() - return - } - case msgs := <-wsc.writeChan: - var writeData any - - if len(msgs) == 1 { - writeData = msgs[0] - } else { - writeData = msgs - } - - jsonBytes, err := json.MarshalIndent(writeData, "", " ") - if err != nil { - wsc.Logger.Error("Failed to marshal RPCResponse to JSON", "err", err) - } else if err = wsc.writeMessageWithDeadline(websocket.TextMessage, jsonBytes); err != nil { - wsc.Logger.Error("Failed to write response", "err", err) - wsc.Stop() - return - } - case <-wsc.Quit(): - return - } - } -} - -// All writes to the websocket must (re)set the write deadline. -// If some writes don't set it while others do, they may timeout incorrectly (https://github.com/gnolang/gno/tm2/pkg/bft/issues/553) -func (wsc *wsConnection) writeMessageWithDeadline(msgType int, msg []byte) error { - if err := wsc.baseConn.SetWriteDeadline(time.Now().Add(wsc.writeWait)); err != nil { - return err - } - return wsc.baseConn.WriteMessage(msgType, msg) -} - -// ---------------------------------------- - -// WebsocketManager provides a WS handler for incoming connections and passes a -// map of functions along with any additional params to new connections. -// NOTE: The websocket path is defined externally, e.g. in node/node.go -type WebsocketManager struct { - websocket.Upgrader - - funcMap map[string]*RPCFunc - logger *slog.Logger - wsConnOptions []func(*wsConnection) -} - -// NewWebsocketManager returns a new WebsocketManager that passes a map of -// functions, connection options and logger to new WS connections. -func NewWebsocketManager(funcMap map[string]*RPCFunc, wsConnOptions ...func(*wsConnection)) *WebsocketManager { - return &WebsocketManager{ - funcMap: funcMap, - Upgrader: websocket.Upgrader{ - CheckOrigin: func(r *http.Request) bool { - // TODO ??? - return true - }, - }, - logger: log.NewNoopLogger(), - wsConnOptions: wsConnOptions, - } -} - -// SetLogger sets the logger. -func (wm *WebsocketManager) SetLogger(l *slog.Logger) { - wm.logger = l -} - -// WebsocketHandler upgrades the request/response (via http.Hijack) and starts -// the wsConnection. -func (wm *WebsocketManager) WebsocketHandler(w http.ResponseWriter, r *http.Request) { - wsConn, err := wm.Upgrade(w, r, nil) - if err != nil { - // TODO - return http error - wm.logger.Error("Failed to upgrade to websocket connection", "err", err) - return - } - - // register connection - con := NewWSConnection(wsConn, wm.funcMap, wm.wsConnOptions...) - con.SetLogger(wm.logger.With("remote", wsConn.RemoteAddr())) - wm.logger.Info("New websocket connection", "remote", con.remoteAddr) - err = con.Start() // Blocking - if err != nil { - wm.logger.Error("Error starting connection", "err", err) - } -} - -// rpc.websocket -// ----------------------------------------------------------------------------- - -// NOTE: assume returns is result struct and error. If error is not nil, return it -func unreflectResult(returns []reflect.Value) (any, error) { - errV := returns[1] - if errVI := errV.Interface(); errVI != nil { - return nil, errors.NewWithData(errVI) - } - rv := returns[0] - // If the result is a registered interface, we need a pointer to it so - // we can marshal with type info. - if rv.Kind() == reflect.Interface { - rvp := reflect.New(rv.Type()) - rvp.Elem().Set(rv) - return rvp.Interface(), nil - } else { - return rv.Interface(), nil - } -} - -// writes a list of available rpc endpoints as an html page -func writeListOfEndpoints(w http.ResponseWriter, r *http.Request, funcMap map[string]*RPCFunc) { - noArgNames := []string{} - argNames := []string{} - for name, funcData := range funcMap { - if len(funcData.args) == 0 { - noArgNames = append(noArgNames, name) - } else { - argNames = append(argNames, name) - } - } - sort.Strings(noArgNames) - sort.Strings(argNames) - buf := new(bytes.Buffer) - buf.WriteString("") - buf.WriteString("
Available endpoints:
") - - for _, name := range noArgNames { - link := fmt.Sprintf("//%s/%s", r.Host, name) - fmt.Fprintf(buf, "%s
", link, link) - } - - buf.WriteString("
Endpoints that require arguments:
") - for _, name := range argNames { - link := fmt.Sprintf("//%s/%s?", r.Host, name) - funcData := funcMap[name] - for i, argName := range funcData.argNames { - link += argName + "=_" - if i < len(funcData.argNames)-1 { - link += "&" - } - } - fmt.Fprintf(buf, "%s
", link, link) - } - buf.WriteString("") - w.Header().Set("Content-Type", "text/html") - w.WriteHeader(200) - w.Write(buf.Bytes()) //nolint: errcheck -} diff --git a/tm2/pkg/bft/rpc/lib/server/handlers_test.go b/tm2/pkg/bft/rpc/lib/server/handlers_test.go deleted file mode 100644 index 9e5138aa746..00000000000 --- a/tm2/pkg/bft/rpc/lib/server/handlers_test.go +++ /dev/null @@ -1,274 +0,0 @@ -package rpcserver_test - -import ( - "encoding/json" - "io" - "net/http" - "net/http/httptest" - "strings" - "testing" - - "github.com/gorilla/websocket" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - rs "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server" - types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" - "github.com/gnolang/gno/tm2/pkg/log" -) - -// ----------- -// HTTP REST API -// TODO - -// ----------- -// JSON-RPC over HTTP - -func testMux() *http.ServeMux { - funcMap := map[string]*rs.RPCFunc{ - "c": rs.NewRPCFunc(func(ctx *types.Context, s string, i int) (string, error) { return "foo", nil }, "s,i"), - } - mux := http.NewServeMux() - - rs.RegisterRPCFuncs(mux, funcMap, log.NewNoopLogger()) - - return mux -} - -func statusOK(code int) bool { return code >= 200 && code <= 299 } - -// Ensure that nefarious/unintended inputs to `params` -// do not crash our RPC handlers. -// See Issue https://github.com/gnolang/gno/tm2/pkg/bft/issues/708. -func TestRPCParams(t *testing.T) { - t.Parallel() - - mux := testMux() - tests := []struct { - payload string - wantErr string - expectedId any - }{ - // bad - {`{"jsonrpc": "2.0", "id": "0"}`, "Method not found", types.JSONRPCStringID("0")}, - {`{"jsonrpc": "2.0", "method": "y", "id": "0"}`, "Method not found", types.JSONRPCStringID("0")}, - {`{"method": "c", "id": "0", "params": a}`, "invalid character", types.JSONRPCStringID("")}, // id not captured in JSON parsing failures - {`{"method": "c", "id": "0", "params": ["a"]}`, "got 1", types.JSONRPCStringID("0")}, - {`{"method": "c", "id": "0", "params": ["a", "b"]}`, "invalid character", types.JSONRPCStringID("0")}, - {`{"method": "c", "id": "0", "params": [1, 1]}`, "of type string", types.JSONRPCStringID("0")}, - - // good - {`{"jsonrpc": "2.0", "method": "c", "id": "0", "params": null}`, "", types.JSONRPCStringID("0")}, - {`{"method": "c", "id": "0", "params": {}}`, "", types.JSONRPCStringID("0")}, - {`{"method": "c", "id": "0", "params": ["a", "10"]}`, "", types.JSONRPCStringID("0")}, - } - - for i, tt := range tests { - req, _ := http.NewRequest("POST", "http://localhost/", strings.NewReader(tt.payload)) - rec := httptest.NewRecorder() - mux.ServeHTTP(rec, req) - res := rec.Result() - // Always expecting back a JSONRPCResponse - assert.True(t, statusOK(res.StatusCode), "#%d: should always return 2XX", i) - blob, err := io.ReadAll(res.Body) - if err != nil { - t.Errorf("#%d: err reading body: %v", i, err) - continue - } - - recv := new(types.RPCResponse) - assert.Nil(t, json.Unmarshal(blob, recv), "#%d: expecting successful parsing of an RPCResponse:\nblob: %s", i, blob) - assert.NotEqual(t, recv, new(types.RPCResponse), "#%d: not expecting a blank RPCResponse", i) - assert.Equal(t, tt.expectedId, recv.ID, "#%d: expected ID not matched in RPCResponse", i) - if tt.wantErr == "" { - assert.Nil(t, recv.Error, "#%d: not expecting an error", i) - } else { - assert.True(t, recv.Error.Code < 0, "#%d: not expecting a positive JSONRPC code", i) - // The wanted error is either in the message or the data - assert.Contains(t, recv.Error.Message+recv.Error.Data, tt.wantErr, "#%d: expected substring", i) - } - } -} - -func TestJSONRPCID(t *testing.T) { - t.Parallel() - - mux := testMux() - tests := []struct { - payload string - wantErr bool - expectedId any - }{ - // good id - {`{"jsonrpc": "2.0", "method": "c", "id": "0", "params": ["a", "10"]}`, false, types.JSONRPCStringID("0")}, - {`{"jsonrpc": "2.0", "method": "c", "id": "abc", "params": ["a", "10"]}`, false, types.JSONRPCStringID("abc")}, - {`{"jsonrpc": "2.0", "method": "c", "id": 0, "params": ["a", "10"]}`, false, types.JSONRPCIntID(0)}, - {`{"jsonrpc": "2.0", "method": "c", "id": 1, "params": ["a", "10"]}`, false, types.JSONRPCIntID(1)}, - {`{"jsonrpc": "2.0", "method": "c", "id": 1.3, "params": ["a", "10"]}`, false, types.JSONRPCIntID(1)}, - {`{"jsonrpc": "2.0", "method": "c", "id": -1, "params": ["a", "10"]}`, false, types.JSONRPCIntID(-1)}, - {`{"jsonrpc": "2.0", "method": "c", "id": null, "params": ["a", "10"]}`, false, nil}, - - // bad id - {`{"jsonrpc": "2.0", "method": "c", "id": {}, "params": ["a", "10"]}`, true, nil}, - {`{"jsonrpc": "2.0", "method": "c", "id": [], "params": ["a", "10"]}`, true, nil}, - } - - for i, tt := range tests { - req, _ := http.NewRequest("POST", "http://localhost/", strings.NewReader(tt.payload)) - rec := httptest.NewRecorder() - mux.ServeHTTP(rec, req) - res := rec.Result() - // Always expecting back a JSONRPCResponse - assert.True(t, statusOK(res.StatusCode), "#%d: should always return 2XX", i) - blob, err := io.ReadAll(res.Body) - if err != nil { - t.Errorf("#%d: err reading body: %v", i, err) - continue - } - - recv := new(types.RPCResponse) - err = json.Unmarshal(blob, recv) - assert.Nil(t, err, "#%d: expecting successful parsing of an RPCResponse:\nblob: %s", i, blob) - if !tt.wantErr { - assert.NotEqual(t, recv, new(types.RPCResponse), "#%d: not expecting a blank RPCResponse", i) - assert.Equal(t, tt.expectedId, recv.ID, "#%d: expected ID not matched in RPCResponse", i) - assert.Nil(t, recv.Error, "#%d: not expecting an error", i) - } else { - assert.True(t, recv.Error.Code < 0, "#%d: not expecting a positive JSONRPC code", i) - } - } -} - -func TestRPCNotification(t *testing.T) { - t.Parallel() - - mux := testMux() - body := strings.NewReader(`{"jsonrpc": "2.0", "id": ""}`) - req, _ := http.NewRequest("POST", "http://localhost/", body) - rec := httptest.NewRecorder() - mux.ServeHTTP(rec, req) - res := rec.Result() - - // Always expecting back a JSONRPCResponse - require.True(t, statusOK(res.StatusCode), "should always return 2XX") - blob, err := io.ReadAll(res.Body) - require.Nil(t, err, "reading from the body should not give back an error") - require.Equal(t, len(blob), 0, "a notification SHOULD NOT be responded to by the server") -} - -func TestRPCNotificationInBatch(t *testing.T) { - t.Parallel() - - mux := testMux() - tests := []struct { - payload string - expectCount int - }{ - { - `[ - {"jsonrpc": "2.0","id": ""}, - {"jsonrpc": "2.0","method":"c","id":"abc","params":["a","10"]} - ]`, - 1, - }, - { - `[ - {"jsonrpc": "2.0","method":"c","id":"abc","params":["a","10"]} - ]`, - 1, - }, - { - `[ - {"jsonrpc": "2.0","id": ""}, - {"jsonrpc": "2.0","method":"c","id":"abc","params":["a","10"]}, - {"jsonrpc": "2.0","id": ""}, - {"jsonrpc": "2.0","method":"c","id":"abc","params":["a","10"]} - ]`, - 2, - }, - } - for i, tt := range tests { - req, _ := http.NewRequest("POST", "http://localhost/", strings.NewReader(tt.payload)) - rec := httptest.NewRecorder() - mux.ServeHTTP(rec, req) - res := rec.Result() - // Always expecting back a JSONRPCResponse - assert.True(t, statusOK(res.StatusCode), "#%d: should always return 2XX", i) - blob, err := io.ReadAll(res.Body) - if err != nil { - t.Errorf("#%d: err reading body: %v", i, err) - continue - } - - var responses types.RPCResponses - // try to unmarshal an array first - err = json.Unmarshal(blob, &responses) - if err != nil { - t.Errorf("#%d: expected an array, couldn't unmarshal it\nblob: %s", i, blob) - continue - } - if tt.expectCount != len(responses) { - t.Errorf("#%d: expected %d response(s), but got %d\nblob: %s", i, tt.expectCount, len(responses), blob) - continue - } - for _, response := range responses { - assert.NotEqual(t, response, new(types.RPCResponse), "#%d: not expecting a blank RPCResponse", i) - } - } -} - -func TestUnknownRPCPath(t *testing.T) { - t.Parallel() - - mux := testMux() - req, _ := http.NewRequest("GET", "http://localhost/unknownrpcpath", nil) - rec := httptest.NewRecorder() - mux.ServeHTTP(rec, req) - res := rec.Result() - - // Always expecting back a 404 error - require.Equal(t, http.StatusNotFound, res.StatusCode, "should always return 404") -} - -// ----------- -// JSON-RPC over WEBSOCKETS - -func TestWebsocketManagerHandler(t *testing.T) { - t.Parallel() - - s := newWSServer() - defer s.Close() - - // check upgrader works - d := websocket.Dialer{} - c, dialResp, err := d.Dial("ws://"+s.Listener.Addr().String()+"/websocket", nil) - require.NoError(t, err) - - if got, want := dialResp.StatusCode, http.StatusSwitchingProtocols; got != want { - t.Errorf("dialResp.StatusCode = %q, want %q", got, want) - } - - // check basic functionality works - req, err := types.MapToRequest(types.JSONRPCStringID("TestWebsocketManager"), "c", map[string]any{"s": "a", "i": 10}) - require.NoError(t, err) - err = c.WriteJSON(req) - require.NoError(t, err) - - var resp types.RPCResponse - err = c.ReadJSON(&resp) - require.NoError(t, err) - require.Nil(t, resp.Error) -} - -func newWSServer() *httptest.Server { - funcMap := map[string]*rs.RPCFunc{ - "c": rs.NewWSRPCFunc(func(ctx *types.Context, s string, i int) (string, error) { return "foo", nil }, "s,i"), - } - wm := rs.NewWebsocketManager(funcMap) - wm.SetLogger(log.NewNoopLogger()) - - mux := http.NewServeMux() - mux.HandleFunc("/websocket", wm.WebsocketHandler) - - return httptest.NewServer(mux) -} diff --git a/tm2/pkg/bft/rpc/lib/server/http_params.go b/tm2/pkg/bft/rpc/lib/server/http_params.go deleted file mode 100644 index da6e98e7400..00000000000 --- a/tm2/pkg/bft/rpc/lib/server/http_params.go +++ /dev/null @@ -1,76 +0,0 @@ -package rpcserver - -import ( - "encoding/hex" - "net/http" - "regexp" - "strconv" - - "github.com/gnolang/gno/tm2/pkg/errors" -) - -func GetParam(r *http.Request, param string) string { - s := r.URL.Query().Get(param) - if s == "" { - s = r.FormValue(param) - } - return s -} - -func GetParamByteSlice(r *http.Request, param string) ([]byte, error) { - s := GetParam(r, param) - return hex.DecodeString(s) -} - -func GetParamInt64(r *http.Request, param string) (int64, error) { - s := GetParam(r, param) - i, err := strconv.ParseInt(s, 10, 64) - if err != nil { - return 0, errors.New(param, err.Error()) - } - return i, nil -} - -func GetParamInt32(r *http.Request, param string) (int32, error) { - s := GetParam(r, param) - i, err := strconv.ParseInt(s, 10, 32) - if err != nil { - return 0, errors.New(param, err.Error()) - } - return int32(i), nil -} - -func GetParamUint64(r *http.Request, param string) (uint64, error) { - s := GetParam(r, param) - i, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return 0, errors.New(param, err.Error()) - } - return i, nil -} - -func GetParamUint(r *http.Request, param string) (uint, error) { - s := GetParam(r, param) - i, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return 0, errors.New(param, err.Error()) - } - return uint(i), nil -} - -func GetParamRegexp(r *http.Request, param string, re *regexp.Regexp) (string, error) { - s := GetParam(r, param) - if !re.MatchString(s) { - return "", errors.New(param, "did not match regular expression %v", re.String()) - } - return s, nil -} - -func GetParamFloat64(r *http.Request, param string) (float64, error) { - s := GetParam(r, param) - f, err := strconv.ParseFloat(s, 64) - if err != nil { - return 0, errors.New(param, err.Error()) - } - return f, nil -} diff --git a/tm2/pkg/bft/rpc/lib/server/http_server.go b/tm2/pkg/bft/rpc/lib/server/http_server.go deleted file mode 100644 index a5cec3d5c81..00000000000 --- a/tm2/pkg/bft/rpc/lib/server/http_server.go +++ /dev/null @@ -1,235 +0,0 @@ -// Commons for HTTP handling -package rpcserver - -import ( - "bufio" - "encoding/json" - "fmt" - "log/slog" - "net" - "net/http" - "runtime/debug" - "strings" - "time" - - "golang.org/x/net/netutil" - - types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" - "github.com/gnolang/gno/tm2/pkg/errors" -) - -// Config is a RPC server configuration. -type Config struct { - // see netutil.LimitListener - MaxOpenConnections int - // mirrors http.Server#ReadTimeout - ReadTimeout time.Duration - // mirrors http.Server#WriteTimeout - WriteTimeout time.Duration - // MaxBodyBytes controls the maximum number of bytes the - // server will read parsing the request body. - MaxBodyBytes int64 - // mirrors http.Server#MaxHeaderBytes - MaxHeaderBytes int -} - -// DefaultConfig returns a default configuration. -func DefaultConfig() *Config { - return &Config{ - MaxOpenConnections: 0, // unlimited - ReadTimeout: 10 * time.Second, - WriteTimeout: 10 * time.Second, - MaxBodyBytes: int64(5000000), // 5MB - MaxHeaderBytes: 1 << 20, // same as the net/http default - } -} - -// StartHTTPServer takes a listener and starts an HTTP server with the given handler. -// It wraps handler with RecoverAndLogHandler. -// NOTE: This function blocks - you may want to call it in a go-routine. -func StartHTTPServer(listener net.Listener, handler http.Handler, logger *slog.Logger, config *Config) error { - logger.Info(fmt.Sprintf("Starting RPC HTTP server on %s", listener.Addr())) - s := &http.Server{ - Handler: RecoverAndLogHandler(maxBytesHandler{h: handler, n: config.MaxBodyBytes}, logger), - ReadTimeout: config.ReadTimeout, - ReadHeaderTimeout: 60 * time.Second, - WriteTimeout: config.WriteTimeout, - MaxHeaderBytes: config.MaxHeaderBytes, - } - err := s.Serve(listener) - logger.Info("RPC HTTP server stopped", "err", err) - return err -} - -// StartHTTPAndTLSServer takes a listener and starts an HTTPS server with the given handler. -// It wraps handler with RecoverAndLogHandler. -// NOTE: This function blocks - you may want to call it in a go-routine. -func StartHTTPAndTLSServer( - listener net.Listener, - handler http.Handler, - certFile, keyFile string, - logger *slog.Logger, - config *Config, -) error { - logger.Info(fmt.Sprintf("Starting RPC HTTPS server on %s (cert: %q, key: %q)", - listener.Addr(), certFile, keyFile)) - s := &http.Server{ - Handler: RecoverAndLogHandler(maxBytesHandler{h: handler, n: config.MaxBodyBytes}, logger), - ReadTimeout: config.ReadTimeout, - ReadHeaderTimeout: 60 * time.Second, - WriteTimeout: config.WriteTimeout, - MaxHeaderBytes: config.MaxHeaderBytes, - } - err := s.ServeTLS(listener, certFile, keyFile) - - logger.Error("RPC HTTPS server stopped", "err", err) - return err -} - -func WriteRPCResponseHTTPError( - w http.ResponseWriter, - httpCode int, - res types.RPCResponse, -) { - jsonBytes, err := json.MarshalIndent(res, "", " ") - if err != nil { - panic(err) - } - - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(httpCode) - if _, err := w.Write(jsonBytes); err != nil { - panic(err) - } -} - -func WriteRPCResponseHTTP(w http.ResponseWriter, res types.RPCResponse) { - jsonBytes, err := json.MarshalIndent(res, "", " ") - if err != nil { - panic(err) - } - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(200) - if _, err := w.Write(jsonBytes); err != nil { - panic(err) - } -} - -// WriteRPCResponseArrayHTTP will do the same as WriteRPCResponseHTTP, except it -// can write arrays of responses for batched request/response interactions via -// the JSON RPC. -func WriteRPCResponseArrayHTTP(w http.ResponseWriter, res types.RPCResponses) { - jsonBytes, err := json.MarshalIndent(res, "", " ") - if err != nil { - panic(err) - } - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(200) - if _, err := w.Write(jsonBytes); err != nil { - panic(err) - } -} - -// ----------------------------------------------------------------------------- - -// RecoverAndLogHandler wraps an HTTP handler, adding error logging. -// If the inner function panics, the outer function recovers, logs, sends an -// HTTP 500 error response. -func RecoverAndLogHandler(handler http.Handler, logger *slog.Logger) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Wrap the ResponseWriter to remember the status - rww := &ResponseWriterWrapper{-1, w} - begin := time.Now() - - rww.Header().Set("X-Server-Time", fmt.Sprintf("%v", begin.Unix())) - - defer func() { - // Send a 500 error if a panic happens during a handler. - // Without this, Chrome & Firefox were retrying aborted ajax requests, - // at least to my localhost. - if e := recover(); e != nil { - switch e := e.(type) { - case types.RPCResponse: - WriteRPCResponseHTTP(rww, e) - - case error: - logger.Error( - "Panic in RPC HTTP handler", "err", e, "stack", - string(debug.Stack()), - ) - WriteRPCResponseHTTPError(rww, http.StatusInternalServerError, - types.RPCInternalError(types.JSONRPCStringID(""), e)) - - default: // handle string type and any other types - logger.Error( - "Panic in RPC HTTP handler", "err", e, "stack", - string(debug.Stack()), - ) - WriteRPCResponseHTTPError(rww, http.StatusInternalServerError, - types.RPCInternalError(types.JSONRPCStringID(""), fmt.Errorf("%v", e))) - } - } - - // Finally, log. - durationMS := time.Since(begin).Nanoseconds() / 1000000 - if rww.Status == -1 { - rww.Status = 200 - } - logger.Info("Served RPC HTTP response", - "method", r.Method, "url", r.URL, - "status", rww.Status, "duration", durationMS, - "remoteAddr", r.RemoteAddr, - ) - }() - - handler.ServeHTTP(rww, r) - }) -} - -// Remember the status for logging -type ResponseWriterWrapper struct { - Status int - http.ResponseWriter -} - -func (w *ResponseWriterWrapper) WriteHeader(status int) { - w.Status = status - w.ResponseWriter.WriteHeader(status) -} - -// implements http.Hijacker -func (w *ResponseWriterWrapper) Hijack() (net.Conn, *bufio.ReadWriter, error) { - return w.ResponseWriter.(http.Hijacker).Hijack() -} - -type maxBytesHandler struct { - h http.Handler - n int64 -} - -func (h maxBytesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - r.Body = http.MaxBytesReader(w, r.Body, h.n) - h.h.ServeHTTP(w, r) -} - -// Listen starts a new net.Listener on the given address. -// It returns an error if the address is invalid or the call to Listen() fails. -func Listen(addr string, config *Config) (listener net.Listener, err error) { - parts := strings.SplitN(addr, "://", 2) - if len(parts) != 2 { - return nil, errors.New( - "invalid listening address %s (use fully formed addresses, including the tcp:// or unix:// prefix)", - addr, - ) - } - proto, addr := parts[0], parts[1] - listener, err = net.Listen(proto, addr) - if err != nil { - return nil, errors.New("failed to listen on %v: %v", addr, err) - } - if config.MaxOpenConnections > 0 { - listener = netutil.LimitListener(listener, config.MaxOpenConnections) - } - - return listener, nil -} diff --git a/tm2/pkg/bft/rpc/lib/server/http_server_test.go b/tm2/pkg/bft/rpc/lib/server/http_server_test.go deleted file mode 100644 index 3a85cee13d7..00000000000 --- a/tm2/pkg/bft/rpc/lib/server/http_server_test.go +++ /dev/null @@ -1,183 +0,0 @@ -package rpcserver - -import ( - "crypto/tls" - "fmt" - "io" - "net" - "net/http" - "net/http/httptest" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" - "github.com/gnolang/gno/tm2/pkg/log" -) - -func TestMaxOpenConnections(t *testing.T) { - t.Parallel() - - const maxVal = 5 // max simultaneous connections - - // Start the server. - var open int32 - mux := http.NewServeMux() - mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - if n := atomic.AddInt32(&open, 1); n > int32(maxVal) { - t.Errorf("%d open connections, want <= %d", n, maxVal) - } - defer atomic.AddInt32(&open, -1) - time.Sleep(10 * time.Millisecond) - fmt.Fprint(w, "some body") - }) - config := DefaultConfig() - config.MaxOpenConnections = maxVal - l, err := Listen("tcp://127.0.0.1:0", config) - require.NoError(t, err) - defer l.Close() - go StartHTTPServer(l, mux, log.NewTestingLogger(t), config) - - // Make N GET calls to the server. - attempts := maxVal * 2 - var wg sync.WaitGroup - var failed int32 - for range attempts { - wg.Add(1) - go func() { - defer wg.Done() - c := http.Client{Timeout: 3 * time.Second} - r, err := c.Get("http://" + l.Addr().String()) - if err != nil { - t.Log(err) - atomic.AddInt32(&failed, 1) - return - } - defer r.Body.Close() - io.Copy(io.Discard, r.Body) - }() - } - wg.Wait() - - // We expect some Gets to fail as the server's accept queue is filled, - // but most should succeed. - if int(failed) >= attempts/2 { - t.Errorf("%d requests failed within %d attempts", failed, attempts) - } -} - -func TestStartHTTPAndTLSServer(t *testing.T) { - t.Parallel() - - ln, err := net.Listen("tcp", "localhost:0") - require.NoError(t, err) - defer ln.Close() - - mux := http.NewServeMux() - mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, "some body") - }) - - go StartHTTPAndTLSServer(ln, mux, "test.crt", "test.key", log.NewTestingLogger(t), DefaultConfig()) - - tr := &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - } - c := &http.Client{Transport: tr} - res, err := c.Get("https://" + ln.Addr().String()) - require.NoError(t, err) - defer res.Body.Close() - assert.Equal(t, http.StatusOK, res.StatusCode) - - body, err := io.ReadAll(res.Body) - require.NoError(t, err) - assert.Equal(t, []byte("some body"), body) -} - -func TestRecoverAndLogHandler(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - panicArg any - expectedResponse string - }{ - { - name: "panic with types.RPCResponse", - panicArg: types.NewRPCErrorResponse(types.JSONRPCStringID("id"), 42, "msg", "data"), - expectedResponse: `{ - "jsonrpc": "2.0", - "id": "id", - "error": { - "code": 42, - "message": "msg", - "data": "data" - } -}`, - }, - { - name: "panic with error", - panicArg: fmt.Errorf("I'm an error"), - expectedResponse: `{ - "jsonrpc": "2.0", - "id": "", - "error": { - "code": -32603, - "message": "Internal error", - "data": "I'm an error" - } -}`, - }, - { - name: "panic with string", - panicArg: "I'm an string", - expectedResponse: `{ - "jsonrpc": "2.0", - "id": "", - "error": { - "code": -32603, - "message": "Internal error", - "data": "I'm an string" - } -}`, - }, - { - name: "panic with random struct", - panicArg: struct { - f int - }{f: 1}, - expectedResponse: `{ - "jsonrpc": "2.0", - "id": "", - "error": { - "code": -32603, - "message": "Internal error", - "data": "{1}" - } -}`, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - var ( - req, _ = http.NewRequest(http.MethodGet, "", nil) - resp = httptest.NewRecorder() - logger = log.NewNoopLogger() - // Create a handler that will always panic with argument tt.panicArg - handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - panic(tt.panicArg) - }) - ) - - RecoverAndLogHandler(handler, logger).ServeHTTP(resp, req) - - require.Equal(t, tt.expectedResponse, resp.Body.String()) - }) - } -} diff --git a/tm2/pkg/bft/rpc/lib/server/jsonrpc.go b/tm2/pkg/bft/rpc/lib/server/jsonrpc.go new file mode 100644 index 00000000000..5c581e4ea58 --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/server/jsonrpc.go @@ -0,0 +1,438 @@ +package server + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "log/slog" + "net/http" + "sort" + + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/conns" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/conns/wsconn" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/metadata" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/spec" + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/writer" + httpWriter "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/writer/http" + wsWriter "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/writer/ws" + "github.com/gnolang/gno/tm2/pkg/log" + "github.com/go-chi/chi/v5" + "github.com/go-chi/chi/v5/middleware" + "github.com/google/uuid" + "github.com/olahol/melody" +) + +const ( + jsonMimeType = "application/json" // Only JSON is supported + maxRequestBodySize = 1 << 20 // 1MB + wsIDKey = "ws-id" // key used for WS connection metadata +) + +// maxSizeMiddleware enforces a 1MB size limit on the request body +func maxSizeMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + r.Body = http.MaxBytesReader(w, r.Body, maxRequestBodySize) + + next.ServeHTTP(w, r) + }) +} + +// JSONRPC is the JSONRPC server instance, that is capable +// of handling both HTTP and WS requests +type JSONRPC struct { + // wsConns keeps track of WS connections + // that need to be directly accessed by certain methods + wsConns conns.ConnectionManager + + logger *slog.Logger + + // handlers are the registered method handlers + handlers handlers + + // ws handles incoming and active WS connections + ws *melody.Melody +} + +// NewJSONRPC creates a new instance of the JSONRPC server +func NewJSONRPC(opts ...Option) *JSONRPC { + j := &JSONRPC{ + logger: log.NewNoopLogger(), + handlers: newHandlers(), + ws: melody.New(), + } + + for _, opt := range opts { + opt(j) + } + + // Set up the WS connection manager + j.wsConns = wsconn.NewConns(j.logger) + + // Set up the WS listeners + j.setupWSListeners() + + return j +} + +// SetupRoutes sets up the request router for the JSON-RPC service +func (j *JSONRPC) SetupRoutes(mux *chi.Mux) *chi.Mux { + // Set up the middlewares + mux.Use(middleware.AllowContentType(jsonMimeType)) + mux.Use(maxSizeMiddleware) + + // OPTIONS requests are ignored + mux.Options("/", func(http.ResponseWriter, *http.Request) {}) + + // Browser-friendly endpoints (GET) + mux.Get("/", j.handleIndexRequest) + mux.Get("/{method}", j.handleHTTPGetRequest) + + // Register the POST method handler for HTTP requests + mux.Post("/", j.handleHTTPRequest) + + // Register the WS method handler + mux.HandleFunc("/websocket", j.handleWSRequest) + + return mux +} + +// RegisterHandler registers a new method handler, +// overwriting existing ones, if any +func (j *JSONRPC) RegisterHandler(method string, handler Handler, paramNames ...string) { + j.handlers.addHandler(method, handler, paramNames...) +} + +// UnregisterHandler removes the method handler for the specified method, if any +func (j *JSONRPC) UnregisterHandler(method string) { + j.handlers.removeHandler(method) +} + +// setupWSListeners sets up handlers for WS events +func (j *JSONRPC) setupWSListeners() { + // Set up the new connection handler + j.ws.HandleConnect(func(s *melody.Session) { + j.logger.Info( + "WS connection established", + "remote", s.RemoteAddr().String(), + ) + + // Generate the WS ID + wsID := uuid.NewString() + s.Set(wsIDKey, wsID) + + // Register the connection so it's queryable + j.wsConns.AddWSConnection(wsID, s) + }) + + // Set up the connection disconnect handler + j.ws.HandleDisconnect(func(s *melody.Session) { + j.logger.Info( + "WS connection terminated", + "remote", s.RemoteAddr().String(), + ) + + // Read the WS ID + wsIDRaw, _ := s.Get(wsIDKey) + wsConnID := wsIDRaw.(string) + + // Remove the WS connection + j.wsConns.RemoveWSConnection(wsConnID) + }) + + // Set up the core message method handler + j.ws.HandleMessage(func(s *melody.Session, msg []byte) { + // Extract the base request + requests, err := extractBaseRequests(msg) + if err != nil { + // Malformed requests are completely ignored + return + } + + // Get the ID associated with this active WS connection + wsIDRaw, _ := s.Get(wsIDKey) + wsConnID := wsIDRaw.(string) + + // Handle the request + j.handleRequest( + metadata.NewMetadata( + s.RemoteAddr().String(), + metadata.WithWebSocketID(wsConnID), + ), + wsWriter.New(j.logger, s), + requests, + ) + }) +} + +// handleHTTPRequest handles incoming HTTP requests +func (j *JSONRPC) handleHTTPRequest(w http.ResponseWriter, r *http.Request) { + requestBody, readErr := io.ReadAll(r.Body) + if readErr != nil { + http.Error( + w, + "unable to read request", + http.StatusBadRequest, + ) + + return + } + + requests, err := extractBaseRequests(requestBody) + if err != nil { + http.Error( + w, + "Invalid request body", + http.StatusBadRequest, + ) + + return + } + + // Handle the request + w.Header().Set("Content-Type", jsonMimeType) + j.handleRequest( + metadata.NewMetadata(r.RemoteAddr), + httpWriter.New(j.logger, w), + requests, + ) +} + +// handleWSRequest handles incoming WS requests +func (j *JSONRPC) handleWSRequest(w http.ResponseWriter, r *http.Request) { + if err := j.ws.HandleRequest(w, r); err != nil { + j.logger.Error( + "unable to initialize WS connection", + "err", err, + ) + } +} + +// handleRequest handles the specific requests with a +// custom response writer +func (j *JSONRPC) handleRequest( + metadata *metadata.Metadata, + writer writer.ResponseWriter, + requests spec.BaseJSONRequests, +) { + // Parse all JSON-RPC requests + responses := make(spec.BaseJSONResponses, len(requests)) + + for i, baseRequest := range requests { + // Log the request + j.logger.Debug( + "incoming request", + "request", baseRequest, + ) + + // Make sure it's a valid base request + if !isValidBaseRequest(baseRequest) { + // Marshal the JSON-RPC error + responses[i] = spec.NewJSONResponse( + baseRequest.ID, + nil, + spec.NewJSONError( + "invalid JSON-RPC 2.0 request", + spec.InvalidRequestErrorCode, + ), + ) + + continue + } + + // Run the method methodHandler + handleResp, handleErr := j.route(metadata, baseRequest) + if handleErr != nil { + j.logger.Debug( + "unable to handle JSON-RPC request", + "request", baseRequest, + "err", handleErr, + ) + + responses[i] = spec.NewJSONResponse( + baseRequest.ID, + nil, + handleErr, + ) + + continue + } + + // TODO fix print + j.logger.Debug( + "handled request", + "request", baseRequest, + ) + + responses[i] = spec.NewJSONResponse( + baseRequest.ID, + handleResp, + nil, + ) + } + + if len(responses) == 1 { + // Write the JSON response as a single response + writer.WriteResponse(responses[0]) + + return + } + + // Write the JSON response as a batch + writer.WriteResponse(responses) +} + +// route routes the base request to the appropriate handler +func (j *JSONRPC) route( + metadata *metadata.Metadata, + request *spec.BaseJSONRequest, +) (any, *spec.BaseJSONError) { + // Get the appropriate handler + entry, ok := j.handlers[request.Method] + if !ok { + return nil, spec.NewJSONError( + "Method handler not set", + spec.MethodNotFoundErrorCode, + ) + } + + return entry.fn(metadata, request.Params) +} + +// handleHTTPGetRequest parses the GET request, extracts the query params, and passes +// the JSON-RPC request on for further processing +func (j *JSONRPC) handleHTTPGetRequest(w http.ResponseWriter, r *http.Request) { + method := chi.URLParam(r, "method") + + entry, ok := j.handlers[method] + if !ok { + http.Error(w, "method not found", http.StatusNotFound) + + return + } + + q := r.URL.Query() + + // Query param order does not actually matter, but the ordering of + // the params for the POST handler does. Because of this, we build the + // params slice in the canonical order defined by the param names + params := make([]any, len(entry.paramNames)) + for i, name := range entry.paramNames { + val := q.Get(name) + if val == "" { + params[i] = nil + + continue + } + + params[i] = val + } + + baseReq := &spec.BaseJSONRequest{ + BaseJSON: spec.BaseJSON{ + JSONRPC: spec.JSONRPCVersion, + ID: spec.JSONRPCNumberID(0), + }, + Method: method, + Params: params, + } + + w.Header().Set("Content-Type", jsonMimeType) + + j.handleRequest( + metadata.NewMetadata(r.RemoteAddr), + httpWriter.New(j.logger, w), + spec.BaseJSONRequests{baseReq}, + ) +} + +// handleIndexRequest writes the list of available rpc endpoints as an HTML page +func (j *JSONRPC) handleIndexRequest(w http.ResponseWriter, r *http.Request) { + // Separate methods with and without args + noArgNames := make([]string, 0, len(j.handlers)) + argNames := make([]string, 0, len(j.handlers)) + + for name, entry := range j.handlers { + if len(entry.paramNames) == 0 { + noArgNames = append(noArgNames, name) + + continue + } + + argNames = append(argNames, name) + } + + sort.Strings(noArgNames) + sort.Strings(argNames) + + var buf bytes.Buffer + + buf.WriteString("") + buf.WriteString("
Available endpoints:
") + + host := r.Host + + // Endpoints without arguments + for _, name := range noArgNames { + link := fmt.Sprintf("//%s/%s", host, name) + fmt.Fprintf(&buf, "%s
", link, link) + } + + buf.WriteString("
Endpoints that require arguments:
") + + // Endpoints with arguments + for _, name := range argNames { + entry := j.handlers[name] + + link := fmt.Sprintf("//%s/%s?", host, name) + for i, argName := range entry.paramNames { + link += argName + "=_" + + if i < len(entry.paramNames)-1 { + link += "&" + } + } + + fmt.Fprintf(&buf, "%s
", link, link) + } + + buf.WriteString("") + + w.Header().Set("Content-Type", "text/html") + w.WriteHeader(http.StatusOK) + + if _, err := buf.WriteTo(w); err != nil { + j.logger.Error("failed to write RPC endpoint index", "err", err) + } +} + +// isValidBaseRequest validates that the base JSON request is valid +func isValidBaseRequest(baseRequest *spec.BaseJSONRequest) bool { + if baseRequest.Method == "" { + return false + } + + return baseRequest.JSONRPC == spec.JSONRPCVersion +} + +// extractBaseRequests extracts the base JSON-RPC request from the +// request body +func extractBaseRequests(requestBody []byte) (spec.BaseJSONRequests, error) { + // Extract the request + var requests spec.BaseJSONRequests + + // Check if the request is a batch request + if err := json.Unmarshal(requestBody, &requests); err != nil { + // Try to get a single JSON-RPC request, since this is not a batch + var baseRequest *spec.BaseJSONRequest + if err := json.Unmarshal(requestBody, &baseRequest); err != nil { + return nil, err + } + + requests = spec.BaseJSONRequests{ + baseRequest, + } + } + + return requests, nil +} diff --git a/tm2/pkg/bft/rpc/lib/server/metadata/metadata.go b/tm2/pkg/bft/rpc/lib/server/metadata/metadata.go new file mode 100644 index 00000000000..9752c1e3639 --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/server/metadata/metadata.go @@ -0,0 +1,26 @@ +package metadata + +// Metadata houses the active request metadata +type Metadata struct { + WebSocketID *string + RemoteAddr string +} + +// NewMetadata creates a new request metadata object +func NewMetadata(remoteAddr string, opts ...Option) *Metadata { + m := &Metadata{ + RemoteAddr: remoteAddr, + } + + for _, opt := range opts { + opt(m) + } + + return m +} + +// IsWS returns a flag indicating if the request +// belongs to a WS connection +func (m *Metadata) IsWS() bool { + return m.WebSocketID != nil +} diff --git a/tm2/pkg/bft/rpc/lib/server/metadata/metadata_test.go b/tm2/pkg/bft/rpc/lib/server/metadata/metadata_test.go new file mode 100644 index 00000000000..8b3ff3c29e8 --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/server/metadata/metadata_test.go @@ -0,0 +1,38 @@ +package metadata + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestMetadata_NewMetadata(t *testing.T) { + t.Parallel() + + t.Run("HTTP metadata", func(t *testing.T) { + t.Parallel() + + address := "remote address" + m := NewMetadata(address) + + require.NotNil(t, m) + + assert.Equal(t, address, m.RemoteAddr) + assert.False(t, m.IsWS()) + }) + + t.Run("WS metadata", func(t *testing.T) { + t.Parallel() + + address := "remote address" + wsID := "ws ID" + m := NewMetadata(address, WithWebSocketID(wsID)) + + require.NotNil(t, m) + + assert.Equal(t, address, m.RemoteAddr) + assert.True(t, m.IsWS()) + assert.Equal(t, wsID, *m.WebSocketID) + }) +} diff --git a/tm2/pkg/bft/rpc/lib/server/metadata/options.go b/tm2/pkg/bft/rpc/lib/server/metadata/options.go new file mode 100644 index 00000000000..af1f9627cbb --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/server/metadata/options.go @@ -0,0 +1,11 @@ +package metadata + +type Option func(m *Metadata) + +// WithWebSocketID sets the WS connection ID +// for the connection metadata +func WithWebSocketID(id string) Option { + return func(m *Metadata) { + m.WebSocketID = &id + } +} diff --git a/tm2/pkg/bft/rpc/lib/server/options.go b/tm2/pkg/bft/rpc/lib/server/options.go new file mode 100644 index 00000000000..22658069896 --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/server/options.go @@ -0,0 +1,13 @@ +package server + +import "log/slog" + +type Option func(s *JSONRPC) + +// WithLogger sets the logger to be used +// with the JSON-RPC server +func WithLogger(logger *slog.Logger) Option { + return func(s *JSONRPC) { + s.logger = logger + } +} diff --git a/tm2/pkg/bft/rpc/lib/server/parse_test.go b/tm2/pkg/bft/rpc/lib/server/parse_test.go deleted file mode 100644 index b4a3fc14459..00000000000 --- a/tm2/pkg/bft/rpc/lib/server/parse_test.go +++ /dev/null @@ -1,307 +0,0 @@ -package rpcserver - -import ( - "bytes" - "encoding/base64" - "encoding/hex" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strconv" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" -) - -func TestParseJSONMap(t *testing.T) { - t.Parallel() - - input := []byte(`{"value":"1234","height":22}`) - - // naive is float,string - var p1 map[string]any - err := json.Unmarshal(input, &p1) - if assert.Nil(t, err) { - h, ok := p1["height"].(float64) - if assert.True(t, ok, "%#v", p1["height"]) { - assert.EqualValues(t, 22, h) - } - v, ok := p1["value"].(string) - if assert.True(t, ok, "%#v", p1["value"]) { - assert.EqualValues(t, "1234", v) - } - } - - // preloading map with values doesn't help - tmp := 0 - p2 := map[string]any{ - "value": &[]byte{}, - "height": &tmp, - } - err = json.Unmarshal(input, &p2) - if assert.Nil(t, err) { - h, ok := p2["height"].(float64) - if assert.True(t, ok, "%#v", p2["height"]) { - assert.EqualValues(t, 22, h) - } - v, ok := p2["value"].(string) - if assert.True(t, ok, "%#v", p2["value"]) { - assert.EqualValues(t, "1234", v) - } - } - - // preload here with *pointers* to the desired types - // struct has unknown types, but hard-coded keys - tmp = 0 - p3 := struct { - Value any `json:"value"` - Height any `json:"height"` - }{ - Height: &tmp, - Value: &[]byte{}, - } - err = json.Unmarshal(input, &p3) - if assert.Nil(t, err) { - h, ok := p3.Height.(*int) - if assert.True(t, ok, "%#v", p3.Height) { - assert.Equal(t, 22, *h) - } - v, ok := p3.Value.(*[]byte) - if assert.True(t, ok, "%#v", p3.Value) { - // "1234" is interpreted as base64, decodes to the following bytes. - assert.EqualValues(t, []byte{0xd7, 0x6d, 0xf8}, *v) - } - } - - // simplest solution, but hard-coded - p4 := struct { - Value []byte `json:"value"` - Height int `json:"height"` - }{} - err = json.Unmarshal(input, &p4) - if assert.Nil(t, err) { - assert.EqualValues(t, 22, p4.Height) - assert.EqualValues(t, []byte{0xd7, 0x6d, 0xf8}, p4.Value) - } - - // so, let's use this trick... - // dynamic keys on map, and we can deserialize to the desired types - var p5 map[string]*json.RawMessage - err = json.Unmarshal(input, &p5) - if assert.Nil(t, err) { - var h int - err = json.Unmarshal(*p5["height"], &h) - if assert.Nil(t, err) { - assert.Equal(t, 22, h) - } - - var v []byte - err = json.Unmarshal(*p5["value"], &v) - if assert.Nil(t, err) { - assert.Equal(t, []byte{0xd7, 0x6d, 0xf8}, v) - } - } -} - -func TestParseJSONArray(t *testing.T) { - t.Parallel() - - input := []byte(`["1234",22]`) - - // naive is float,string - var p1 []any - err := json.Unmarshal(input, &p1) - if assert.Nil(t, err) { - v, ok := p1[0].(string) - if assert.True(t, ok, "%#v", p1[0]) { - assert.EqualValues(t, "1234", v) - } - h, ok := p1[1].(float64) - if assert.True(t, ok, "%#v", p1[1]) { - assert.EqualValues(t, 22, h) - } - } - - // preloading map with values helps here (unlike map - p2 above) - tmp := 0 - p2 := []any{&[]byte{}, &tmp} - err = json.Unmarshal(input, &p2) - if assert.Nil(t, err) { - v, ok := p2[0].(*[]byte) - if assert.True(t, ok, "%#v", p2[0]) { - assert.EqualValues(t, []byte{0xd7, 0x6d, 0xf8}, *v) - } - h, ok := p2[1].(*int) - if assert.True(t, ok, "%#v", p2[1]) { - assert.EqualValues(t, 22, *h) - } - } -} - -func TestParseJSONRPC(t *testing.T) { - t.Parallel() - - demo := func(ctx *types.Context, height int, name string) {} - call := NewRPCFunc(demo, "height,name") - - cases := []struct { - raw string - height int64 - name string - fail bool - }{ - // should parse - {`["7", "flew"]`, 7, "flew", false}, - {`{"name": "john", "height": "22"}`, 22, "john", false}, - // defaults - {`{"name": "solo", "unused": "stuff"}`, 0, "solo", false}, - // should fail - wrong types/length - {`["flew", 7]`, 0, "", true}, - {`[7,"flew",100]`, 0, "", true}, - {`{"name": -12, "height": "fred"}`, 0, "", true}, - } - for idx, tc := range cases { - i := strconv.Itoa(idx) - data := []byte(tc.raw) - vals, err := jsonParamsToArgs(call, data) - if tc.fail { - assert.NotNil(t, err, i) - } else { - assert.Nil(t, err, "%s: %+v", i, err) - if assert.Equal(t, 2, len(vals), i) { - assert.Equal(t, tc.height, vals[0].Int(), i) - assert.Equal(t, tc.name, vals[1].String(), i) - } - } - } -} - -func TestParseURINonJSON(t *testing.T) { - t.Parallel() - - // Define a demo RPC function - demo := func(ctx *types.Context, height int, name string, hash []byte) {} - call := NewRPCFunc(demo, "height,name,hash") - - // Helper function to decode input base64 string to []byte - decodeBase64 := func(input string) []byte { - decoded, _ := base64.StdEncoding.DecodeString(input) - return decoded - } - - // Helper function to decode input hex string to []byte - decodeHex := func(input string) []byte { - decoded, _ := hex.DecodeString(input[2:]) - return decoded - } - - // Test cases for non-JSON encoded parameters - nonJSONCases := []struct { - raw []string - height int64 - name string - hash []byte - fail bool - }{ - // can parse numbers and strings, quoted and unquoted - {[]string{"7", `"flew"`, "MzMz"}, 7, "flew", decodeBase64("MzMz"), false}, - {[]string{"22", `john`, "MzM="}, 22, "john", decodeBase64("MzM="), false}, - {[]string{"-10", `"bob"`, "Z25v"}, -10, "bob", decodeBase64("Z25v"), false}, - // can parse numbers quoted, too - {[]string{`"7"`, `"flew"`, "0x486173682076616c7565"}, 7, "flew", decodeHex("0x486173682076616c7565"), false}, // Testing hex encoded data - {[]string{`"-10"`, `"bob"`, "0x6578616d706c65"}, -10, "bob", decodeHex("0x6578616d706c65"), false}, // Testing hex encoded data - // []byte must be base64 - {[]string{`"-10"`, `bob`, "invalid_encoded_data"}, -10, "bob", []byte("invalid_encoded_data"), true}, // Invalid encoded data format - } - - // Iterate over test cases for non-JSON encoded parameters - for idx, tc := range nonJSONCases { - t.Run(fmt.Sprintf("case %d", idx), func(t *testing.T) { - t.Parallel() - - i := strconv.Itoa(idx) - url := fmt.Sprintf("test.com/method?height=%v&name=%v&hash=%v", tc.raw[0], tc.raw[1], url.QueryEscape(tc.raw[2])) - req, err := http.NewRequest("GET", url, nil) - - assert.NoError(t, err) - - // Invoke httpParamsToArgs to parse the request and convert to reflect.Values - vals, err := httpParamsToArgs(call, req) - - // Check for expected errors or successful parsing - if tc.fail { - assert.NotNil(t, err, i) - - return - } - - assert.Nil(t, err, "%s: %+v", i, err) - // Assert the parsed values match the expected height, name, and data - - if assert.Equal(t, 3, len(vals), i) { - assert.Equal(t, tc.height, vals[0].Int(), i) - assert.Equal(t, tc.name, vals[1].String(), i) - assert.Equal(t, len(tc.hash), len(vals[2].Bytes()), i) - assert.True(t, bytes.Equal(tc.hash, vals[2].Bytes()), i) - } - }) - } -} - -func TestParseURI_JSON(t *testing.T) { - t.Parallel() - - type Data struct { - Key string `json:"key"` - } - - // Define a demo RPC function - demo := func(ctx *types.Context, data Data) {} - call := NewRPCFunc(demo, "data") - - // Test cases for JSON encoded parameters - jsonCases := []struct { - raw string - data Data - fail bool - }{ - // Valid JSON encoded values - {`{"key": "value"}`, Data{Key: "value"}, false}, - {`{"id": 123}`, Data{}, false}, // Invalid field "id" (not in struct) - {`{"list": [1, 2, 3]}`, Data{}, false}, // Invalid field "list" (not in struct) - // Invalid JSON encoded values - {`"string_data"`, Data{}, true}, // Invalid JSON format (not an object) - {`12345`, Data{}, true}, // Invalid JSON format (not an object) - {`{"key": true}`, Data{}, true}, // Invalid field "key" type (expected string) - {`{"key": {"nested": "value"}}`, Data{}, true}, // Invalid field "key" type (nested object) - } - - // Iterate over test cases for JSON encoded parameters - for _, tc := range jsonCases { - t.Run(tc.raw, func(t *testing.T) { - t.Parallel() - url := fmt.Sprintf("test.com/method?data=%v", url.PathEscape(tc.raw)) - req, err := http.NewRequest("GET", url, nil) - assert.NoError(t, err) - - // Invoke httpParamsToArgs to parse the request and convert to reflect.Values - vals, err := httpParamsToArgs(call, req) - - // Check for expected errors or successful parsing - if tc.fail { - assert.NotNil(t, err) - return - } - - assert.Nil(t, err, " %+v", err) - - // Assert the parsed values match the expected data - require.Len(t, vals, 1) - assert.Equal(t, tc.data, vals[0].Interface()) - }) - } -} diff --git a/tm2/pkg/bft/rpc/lib/server/spec/errors.go b/tm2/pkg/bft/rpc/lib/server/spec/errors.go new file mode 100644 index 00000000000..cc521127c2d --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/server/spec/errors.go @@ -0,0 +1,9 @@ +package spec + +const ( + ParseErrorCode int = -32700 + InvalidParamsErrorCode int = -32602 + MethodNotFoundErrorCode int = -32601 + InvalidRequestErrorCode int = -32600 + ServerErrorCode int = -32000 +) diff --git a/tm2/pkg/bft/rpc/lib/server/spec/spec.go b/tm2/pkg/bft/rpc/lib/server/spec/spec.go new file mode 100644 index 00000000000..8fa71bb3c10 --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/server/spec/spec.go @@ -0,0 +1,315 @@ +package spec + +import ( + "encoding/json" + "fmt" + + "github.com/gnolang/gno/tm2/pkg/amino" +) + +const JSONRPCVersion = "2.0" + +// JSONRPCID is a wrapper type for JSON-RPC request IDs, +// which can be a string or number (or omitted) +type JSONRPCID interface { + String() string +} + +// JSONRPCStringID is a wrapper for JSON-RPC string IDs +type JSONRPCStringID string + +func (id JSONRPCStringID) String() string { + return string(id) +} + +// JSONRPCNumberID is a wrapper for JSON-RPC number IDs +type JSONRPCNumberID uint + +func (id JSONRPCNumberID) String() string { + return fmt.Sprintf("%d", id) +} + +// parseID parses the generic JSON value into a JSON-RPC ID (string / number) +func parseID(idValue any) (JSONRPCID, error) { + switch v := idValue.(type) { + case string: + return JSONRPCStringID(v), nil + case float64: + // encoding/json uses float64 for numbers + return JSONRPCNumberID(uint(v)), nil + case nil: + // omitted + return nil, nil + default: + return nil, fmt.Errorf("JSON-RPC ID (%v) is of unknown type (%T)", v, v) + } +} + +// BaseJSON defines the base JSON fields +// all JSON-RPC requests and responses need to have +type BaseJSON struct { + JSONRPC string `json:"jsonrpc"` + ID JSONRPCID `json:"id,omitempty"` +} + +// BaseJSONRequest defines the base JSON request format +type BaseJSONRequest struct { + BaseJSON + + Method string `json:"method"` + + // Keeping Params as []any, instead of a json.RawMessage + // is a design choice. The Tendermint RPC, traditionally, + // has always supported positional params, so []any. + // POST requests to the Tendermint RPC always include positional params. + // Additionally, the Tendermint RPC does not support named params ({}). + // The RPC can handle GET requests from the user's browser, which can contain + // random positional arguments, but this is a case that can be handled easily, + // without enforcing the Params to be either-or a specific type. + Params []any `json:"params"` +} + +// BaseJSONRequests represents a batch of JSON-RPC requests +type BaseJSONRequests []*BaseJSONRequest + +// BaseJSONResponses represents a batch of JSON-RPC responses +type BaseJSONResponses []*BaseJSONResponse + +// BaseJSONResponse defines the base JSON response format +type BaseJSONResponse struct { + Result json.RawMessage `json:"result,omitempty"` // We need to keep the result as a RawMessage, for Amino encoding + Error *BaseJSONError `json:"error,omitempty"` + BaseJSON +} + +// BaseJSONError defines the base JSON response error format +type BaseJSONError struct { + Data any `json:"data,omitempty"` + Code int `json:"code"` + Message string `json:"message"` +} + +func (err BaseJSONError) Error() string { + const baseFormat = "RPC error %d - %s" + + if err.Data != "" { + return fmt.Sprintf(baseFormat+": %s", err.Code, err.Message, err.Data) + } + + return fmt.Sprintf(baseFormat, err.Code, err.Message) +} + +// NewJSONRequest creates a new JSON-RPC request +func NewJSONRequest( + id JSONRPCID, + method string, + params []any, +) *BaseJSONRequest { + return &BaseJSONRequest{ + BaseJSON: BaseJSON{ + ID: id, + JSONRPC: JSONRPCVersion, + }, + Method: method, + Params: params, + } +} + +// NewJSONResponse creates a new JSON-RPC response +func NewJSONResponse( + id JSONRPCID, + result any, + err *BaseJSONError, +) *BaseJSONResponse { + var raw json.RawMessage + + if err == nil && result != nil { + b, marshalErr := amino.MarshalJSON(result) + if marshalErr != nil { + return NewJSONResponse( + id, + nil, + GenerateResponseError(marshalErr), + ) + } + + raw = b + } + + return &BaseJSONResponse{ + BaseJSON: BaseJSON{ + ID: id, + JSONRPC: JSONRPCVersion, + }, + Result: raw, + Error: err, + } +} + +// NewJSONError creates a new JSON-RPC error +func NewJSONError(message string, code int) *BaseJSONError { + return &BaseJSONError{ + Code: code, + Message: message, + } +} + +// GenerateResponseError generates the JSON-RPC server error response +func GenerateResponseError(err error) *BaseJSONError { + return NewJSONError(err.Error(), ServerErrorCode) +} + +// GenerateInvalidParamError generates the JSON-RPC invalid param error response +func GenerateInvalidParamError(index int) *BaseJSONError { + return NewJSONError( + fmt.Sprintf( + "Invalid %s parameter", + getOrdinalSuffix(index), + ), + InvalidParamsErrorCode, + ) +} + +func getOrdinalSuffix(num int) string { + switch num % 10 { + case 1: + if num%100 != 11 { + return fmt.Sprintf("%d%s", num, "st") + } + case 2: + if num%100 != 12 { + return fmt.Sprintf("%d%s", num, "nd") + } + case 3: + if num%100 != 13 { + return fmt.Sprintf("%d%s", num, "rd") + } + } + + return fmt.Sprintf("%d%s", num, "th") +} + +func (r BaseJSONRequest) MarshalJSON() ([]byte, error) { + var id any + switch v := r.ID.(type) { + case nil: + // omitted + case JSONRPCStringID: + id = string(v) + case JSONRPCNumberID: + id = uint(v) + default: + if v != nil { + return nil, fmt.Errorf("unsupported JSON-RPC ID type %T", v) + } + } + + var raw struct { + JSONRPC string `json:"jsonrpc"` + ID any `json:"id,omitempty"` + Method string `json:"method"` + Params []any `json:"params"` + } + + raw.JSONRPC = r.JSONRPC + raw.ID = id + raw.Method = r.Method + raw.Params = r.Params + + return json.Marshal(raw) +} + +func (r *BaseJSONRequest) UnmarshalJSON(data []byte) error { + var raw struct { + JSONRPC string `json:"jsonrpc"` + ID any `json:"id"` + Method string `json:"method"` + Params json.RawMessage `json:"params"` + } + + if err := json.Unmarshal(data, &raw); err != nil { + return fmt.Errorf("unable to JSON-parse request: %w", err) + } + + r.JSONRPC = raw.JSONRPC + r.Method = raw.Method + + // Parse ID + id, err := parseID(raw.ID) + if err != nil { + return fmt.Errorf("unable to parse request ID: %w", err) + } + + r.ID = id + + // Parse params as []any + if len(raw.Params) == 0 || string(raw.Params) == "null" { + r.Params = nil + + return nil + } + + var params []any + if err := json.Unmarshal(raw.Params, ¶ms); err != nil { + return fmt.Errorf("unable to parse request params: %w", err) + } + + r.Params = params + + return nil +} + +func (r BaseJSONResponse) MarshalJSON() ([]byte, error) { + var id any + switch v := r.ID.(type) { + case nil: + case JSONRPCStringID: + id = string(v) + case JSONRPCNumberID: + id = uint(v) + default: + if v != nil { + return nil, fmt.Errorf("unsupported JSON-RPC ID type %T", v) + } + } + + var raw struct { + JSONRPC string `json:"jsonrpc"` + ID any `json:"id,omitempty"` + Result json.RawMessage `json:"result,omitempty"` + Error *BaseJSONError `json:"error,omitempty"` + } + + raw.JSONRPC = r.JSONRPC + raw.ID = id + raw.Result = r.Result + raw.Error = r.Error + + return json.Marshal(raw) +} + +func (r *BaseJSONResponse) UnmarshalJSON(data []byte) error { + var raw struct { + JSONRPC string `json:"jsonrpc"` + ID any `json:"id"` + Result json.RawMessage `json:"result,omitempty"` + Error *BaseJSONError `json:"error,omitempty"` + } + + if err := json.Unmarshal(data, &raw); err != nil { + return fmt.Errorf("unable to JSON-parse response: %w", err) + } + + r.JSONRPC = raw.JSONRPC + r.Error = raw.Error + r.Result = raw.Result + + id, err := parseID(raw.ID) + if err != nil { + return fmt.Errorf("unable to parse response ID: %w", err) + } + + r.ID = id + + return nil +} diff --git a/tm2/pkg/bft/rpc/lib/types/types_test.go b/tm2/pkg/bft/rpc/lib/server/spec/spec_test.go similarity index 75% rename from tm2/pkg/bft/rpc/lib/types/types_test.go rename to tm2/pkg/bft/rpc/lib/server/spec/spec_test.go index 667cfea2711..e8e29c52936 100644 --- a/tm2/pkg/bft/rpc/lib/types/types_test.go +++ b/tm2/pkg/bft/rpc/lib/server/spec/spec_test.go @@ -1,4 +1,4 @@ -package rpctypes +package spec import ( "encoding/json" @@ -7,8 +7,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "github.com/gnolang/gno/tm2/pkg/errors" ) func TestJSONRPCID_Marshal_Unmarshal(t *testing.T) { @@ -39,19 +37,14 @@ func TestJSONRPCID_Marshal_Unmarshal(t *testing.T) { JSONRPCStringID("àáâ"), `"àáâ"`, }, - { - "negative number", - JSONRPCIntID(-1), - "-1", - }, { "zero ID", - JSONRPCIntID(0), + JSONRPCNumberID(0), "0", }, { "non-zero ID", - JSONRPCIntID(100), + JSONRPCNumberID(100), "100", }, } @@ -66,11 +59,14 @@ func TestJSONRPCID_Marshal_Unmarshal(t *testing.T) { t.Parallel() data, err := json.Marshal( - NewRPCSuccessResponse(testCase.id, struct { - Value string - }{ - Value: "hello", - }, + NewJSONResponse( + testCase.id, + struct { + Value string + }{ + Value: "hello", + }, + nil, ), ) require.NoError(t, err) @@ -84,19 +80,31 @@ func TestJSONRPCID_Marshal_Unmarshal(t *testing.T) { string(data), ) - data, err = json.Marshal(RPCParseError(testCase.id, errors.New("Hello world"))) + data, err = json.Marshal( + NewJSONResponse( + testCase.id, + nil, + NewJSONError("Invalid JSON", ParseErrorCode), + ), + ) require.NoError(t, err) assert.Equal( t, fmt.Sprintf( - `{"jsonrpc":"2.0","id":%v,"error":{"code":-32700,"message":"Parse error. Invalid JSON","data":"Hello world"}}`, + `{"jsonrpc":"2.0","id":%v,"error":{"code":-32700,"message":"Invalid JSON"}}`, testCase.expectedID, ), string(data), ) - data, err = json.Marshal(RPCMethodNotFoundError(testCase.id)) + data, err = json.Marshal( + NewJSONResponse( + testCase.id, + nil, + NewJSONError("Method not found", MethodNotFoundErrorCode), + ), + ) require.NoError(t, err) assert.Equal( @@ -112,7 +120,7 @@ func TestJSONRPCID_Marshal_Unmarshal(t *testing.T) { t.Run("unmarshal", func(t *testing.T) { t.Parallel() - var expectedResponse RPCResponse + var expectedResponse *BaseJSONResponse assert.NoError( t, @@ -122,13 +130,14 @@ func TestJSONRPCID_Marshal_Unmarshal(t *testing.T) { ), ) - successResponse := NewRPCSuccessResponse( + successResponse := NewJSONResponse( testCase.id, struct { Value string }{ Value: "hello", }, + nil, ) assert.Equal(t, expectedResponse, successResponse) diff --git a/tm2/pkg/bft/rpc/lib/server/test.crt b/tm2/pkg/bft/rpc/lib/server/test.crt deleted file mode 100644 index e4ab1965dea..00000000000 --- a/tm2/pkg/bft/rpc/lib/server/test.crt +++ /dev/null @@ -1,25 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEODCCAiCgAwIBAgIQWDHUrd4tOM2xExWhzOEJ7DANBgkqhkiG9w0BAQsFADAZ -MRcwFQYDVQQDEw50ZW5kZXJtaW50LmNvbTAeFw0xOTA2MDIxMTAyMDdaFw0yMDEy -MDIxMTAyMDRaMBExDzANBgNVBAMTBnNlcnZlcjCCASIwDQYJKoZIhvcNAQEBBQAD -ggEPADCCAQoCggEBANBaa6dc9GZcIhAHWqVrx0LONYf+IlbvTP7yrV45ws0ix8TX -1NUOiDY1cwzKH8ay/HYX45e2fFLrtLidc9h+apsC55k3Vdcy00+Ksr/adjR8D4A/ -GpnTS+hVDHTlqINe9a7USok34Zr1rc3fh4Imu5RxEurjMwkA/36k6+OpXMp2qlKY -S1fGqwn2KGhXkp/yTWZILEMXBazNxGx4xfqYXzWm6boeyJAXpM2DNkv7dtwa/CWY -WacUQJApNInwn5+B8LLoo+pappkfZOjAD9/aHKsyFTSWmmWeg7V//ouB3u5vItqf -GP+3xmPgeYeEyOIe/P2f8bRuQs+GGwSCmi6F1GUCAwEAAaOBgzCBgDAOBgNVHQ8B -Af8EBAMCA7gwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQW -BBSpBFIMbkBR4xVYQZtUJQQwzPmbHjAfBgNVHSMEGDAWgBTUkz3u+N2iMe6yKb5+ -R1d4CeM9YTAPBgNVHREECDAGhwR/AAABMA0GCSqGSIb3DQEBCwUAA4ICAQBCqdzS -tPHkMYWjYs6aREwob9whjyG8a4Qp6IkP1SYHCwpzsTeWLi9ybEcDRb3jZ4iRxbZg -7GFxjqHoWgBZHAIyICMsHupOJEtXq5hx86NuMwk/12bx1eNj0yTIAnVOA+em/ZtB -zR38OwB8xXmjKd0Ow1Y7zCh5zE2gU+sR0JOJSfxXUZrJvwDNrbcmZPQ+kwuq4cyv -fxZnvZf/owbyOLQFdbiPQbbiZ7JSv8q7GCMleULCEygrsWClYkULUByhKykCHJIU -wfq1owge9EqG/4CDCCjB9vBFmUyv3FJhgWnzd6tPQckFoHSoD0Bjsv/pQFcsGLcg -+e/Mm6hZgCXXwI2WHYbxqz5ToOaRQQYo6N77jWejOBMecOZmPDyQ2nz73aJd11GW -NiDT7pyMlBJA8W4wAvVP4ow2ugqsPjqZ6EyismIGFUTqMp+NtXOsLPK+sEMhKhJ9 -ulczRpPEf25roBt6aEk2fTAfAPmbpvNamBLSbBU23mzJ38RmfhxLOlOgCGbBBX4d -kE+/+En8UJO4X8CKaKRo/c5G2UZ6++2cjp6SPrsGENDMW5yBGegrDw+ow8/bLxIr -OjWpSe2cygovy3aHE6UBOgkxw9KIaSEqFgjQZ0i+xO6l6qQoljQgUGXfecVMR+7C -4KsyVVTMlK9/thA7Zfc8a5z8ZCtIKkT52XsJhw== ------END CERTIFICATE----- diff --git a/tm2/pkg/bft/rpc/lib/server/test.key b/tm2/pkg/bft/rpc/lib/server/test.key deleted file mode 100644 index bb9af06b050..00000000000 --- a/tm2/pkg/bft/rpc/lib/server/test.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEoQIBAAKCAQEA0Fprp1z0ZlwiEAdapWvHQs41h/4iVu9M/vKtXjnCzSLHxNfU -1Q6INjVzDMofxrL8dhfjl7Z8Uuu0uJ1z2H5qmwLnmTdV1zLTT4qyv9p2NHwPgD8a -mdNL6FUMdOWog171rtRKiTfhmvWtzd+Hgia7lHES6uMzCQD/fqTr46lcynaqUphL -V8arCfYoaFeSn/JNZkgsQxcFrM3EbHjF+phfNabpuh7IkBekzYM2S/t23Br8JZhZ -pxRAkCk0ifCfn4Hwsuij6lqmmR9k6MAP39ocqzIVNJaaZZ6DtX/+i4He7m8i2p8Y -/7fGY+B5h4TI4h78/Z/xtG5Cz4YbBIKaLoXUZQIDAQABAoH/NodzpVmunRt/zrIe -By0t+U3+tJjOY/I9NHxO41o6oXV40wupqBkljQpwEejUaCxv5nhaGFqqLwmBQs/y -gbaUL/2Sn4bb8HZc13R1U8DZLuNJK0dYrumd9DBOEkoI0FkJ87ebyk3VvbiOxFK8 -JFP+w9rUGKVdtf2M4JhJJEwu/M2Yawx9/8CrCIY2G6ufaylrIysLeQMsxrogF8n4 -hq7fyqveWRzxhqUxS2fp9Ynpx4jnd1lMzv+z3i8eEsW+gB9yke7UkXZMbtZg1xfB -JjiEfcDVfSwSihhgOYttgQ9hkIdohDUak7OzRSWVBuoxWUhMfrQxw/HZlgZJL9Vf -rGdlAoGBANOGmgEGky+acV33WTWGV5OdAw6B/SlBEoORJbj6UzQiUz3hFH/Tgpbj -JOKHWGbGd8OtOYbt9JoofGlNgHA/4nAEYAc2HGa+q0fBwMUflU0DudAxXis4jDmE -D76moGmyJoSgwVrp1W/vwNixA5RpcZ3Wst2nf9RKLr+DxypHTit/AoGBAPwpDeqc -rwXOTl0KR/080Nc11Z03VIVZAGfA59J73HmADF9bBVlmReQdkwX0lERchdzD0lfa -XqbqBLr4FS5Uqyn5f3DCaMnOeKfvtGw2z6LnY+w03mii4PEW/vNKLlB18NdduPwL -KeAc08Zh+qJFMKD1PoEQOH+Y7NybBbaQL8IbAoGAfPPUYaq6o7I+Kd4FysKTVVW5 -CobrP8V65FGH0R++qttkBPfDHkeZqvx/O3nsVLoE4YigpP5IMhCcfbAUoTp7zuQm -vdvPJzqW/4qLD2c60QXUbBHdqPZ8jzVd/6d6tzVP36T+02+yb69XYiofDTrErRK5 -EorxzjwMJYH40xbQLI0CgYBh7d/FucwPSSwN3ixPIQtKSVIImLBuiT4rDTP6/reF -SEGF1ueg7KNAEGxE59OdKQGj1zkdfWU9Fa14n1g6gg9nYcoolJf1qAYb0nAThsXk -0lBwL6ggowERIIkrGygZf3Rlb7SjzgIZU5i7dtnLo2tbV2NK5G3MwCtdEaeKWzzw -+QKBgQC7+JPHoqbnNgis2vCGLKMOU3HpJK/rYEU/8ZUegc9lshEFZYsRbtKQQJQs -nqsChrG8UoK84frujEBkO/Nzsil85p8ar79wZguGnVvswTWaTuKvl8H/qQQ/JSHZ -OHGQD4qwTCkdRr8Vf8NfuCoZlJDnHncLJZNWjrb5feqCnJ/YIQ== ------END RSA PRIVATE KEY----- diff --git a/tm2/pkg/bft/rpc/lib/server/write_endpoints_test.go b/tm2/pkg/bft/rpc/lib/server/write_endpoints_test.go deleted file mode 100644 index 3402012b458..00000000000 --- a/tm2/pkg/bft/rpc/lib/server/write_endpoints_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package rpcserver - -import ( - "io" - "net/http" - "net/http/httptest" - "testing" - - "github.com/stretchr/testify/assert" - - types "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/types" -) - -func TestWriteListOfEndpoints(t *testing.T) { - funcMap := map[string]*RPCFunc{ - "c": NewWSRPCFunc(func(ctx *types.Context, s string, i int) (string, error) { return "foo", nil }, "s,i"), - "d": {}, - } - - req, _ := http.NewRequest("GET", "http://localhost/", nil) - rec := httptest.NewRecorder() - writeListOfEndpoints(rec, req, funcMap) - res := rec.Result() - assert.Equal(t, res.StatusCode, 200, "Should always return 200") - blob, err := io.ReadAll(res.Body) - assert.NoError(t, err) - gotResp := string(blob) - wantResp := `
Available endpoints:
//localhost/d

Endpoints that require arguments:
//localhost/c?s=_&i=_
` - assert.Equal(t, wantResp, gotResp) -} diff --git a/tm2/pkg/bft/rpc/lib/server/writer/http/http.go b/tm2/pkg/bft/rpc/lib/server/writer/http/http.go new file mode 100644 index 00000000000..545d864c24a --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/server/writer/http/http.go @@ -0,0 +1,34 @@ +package http + +import ( + "encoding/json" + "log/slog" + "net/http" + + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/writer" +) + +var _ writer.ResponseWriter = (*ResponseWriter)(nil) + +type ResponseWriter struct { + logger *slog.Logger + + w http.ResponseWriter +} + +func New(logger *slog.Logger, w http.ResponseWriter) ResponseWriter { + return ResponseWriter{ + logger: logger.With("writer", "http-writer"), + w: w, + } +} + +func (h ResponseWriter) WriteResponse(response any) { + // TODO use amino encoding + if err := json.NewEncoder(h.w).Encode(response); err != nil { + h.logger.Error( + "unable to encode JSON response", + "err", err, + ) + } +} diff --git a/tm2/pkg/bft/rpc/lib/server/writer/writer.go b/tm2/pkg/bft/rpc/lib/server/writer/writer.go new file mode 100644 index 00000000000..d86a25e2359 --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/server/writer/writer.go @@ -0,0 +1,9 @@ +package writer + +// ResponseWriter outlines the interface any +// JSON-RPC response writer needs to implement +type ResponseWriter interface { + // WriteResponse takes in the JSON-RPC response + // which is either a single object, or a batch + WriteResponse(response any) +} diff --git a/tm2/pkg/bft/rpc/lib/server/writer/ws/ws.go b/tm2/pkg/bft/rpc/lib/server/writer/ws/ws.go new file mode 100644 index 00000000000..7cbbf94d1f6 --- /dev/null +++ b/tm2/pkg/bft/rpc/lib/server/writer/ws/ws.go @@ -0,0 +1,45 @@ +package ws + +import ( + "encoding/json" + "log/slog" + + "github.com/gnolang/gno/tm2/pkg/bft/rpc/lib/server/writer" + "github.com/olahol/melody" +) + +var _ writer.ResponseWriter = (*ResponseWriter)(nil) + +type ResponseWriter struct { + logger *slog.Logger + + s *melody.Session +} + +func New(logger *slog.Logger, s *melody.Session) ResponseWriter { + return ResponseWriter{ + logger: logger.With("writer", "ws-writer"), + s: s, + } +} + +func (w ResponseWriter) WriteResponse(response any) { + // TODO use amino encoding + + jsonRaw, encodeErr := json.Marshal(response) + if encodeErr != nil { + w.logger.Error( + "unable to encode JSON-RPC response", + "err", encodeErr, + ) + + return + } + + if err := w.s.Write(jsonRaw); err != nil { + w.logger.Error( + "unable to write WS response", + "err", err, + ) + } +} diff --git a/tm2/pkg/bft/rpc/lib/types/types.go b/tm2/pkg/bft/rpc/lib/types/types.go deleted file mode 100644 index 02c49c3c9f9..00000000000 --- a/tm2/pkg/bft/rpc/lib/types/types.go +++ /dev/null @@ -1,324 +0,0 @@ -package rpctypes - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "reflect" - - "github.com/gnolang/gno/tm2/pkg/amino" - "github.com/gnolang/gno/tm2/pkg/errors" -) - -// JSONRPCID is a wrapper type for JSON-RPC request IDs, -// which can be a string value | number value | not set (nil) -type JSONRPCID interface { - String() string -} - -// JSONRPCStringID a wrapper for JSON-RPC string IDs -type JSONRPCStringID string - -func (id JSONRPCStringID) String() string { - return string(id) -} - -// JSONRPCIntID a wrapper for JSON-RPC integer IDs -type JSONRPCIntID int - -func (id JSONRPCIntID) String() string { - return fmt.Sprintf("%d", id) -} - -// parseID parses the given ID value -func parseID(idValue any) (JSONRPCID, error) { - switch id := idValue.(type) { - case string: - return JSONRPCStringID(id), nil - case float64: - // json.Unmarshal uses float64 for all numbers - // (https://golang.org/pkg/encoding/json/#Unmarshal), - // but the JSONRPC2.0 spec says the id SHOULD NOT contain - // decimals - so we truncate the decimals here. - return JSONRPCIntID(int(id)), nil - default: - typ := reflect.TypeOf(id) - return nil, fmt.Errorf("JSON-RPC ID (%v) is of unknown type (%v)", id, typ) - } -} - -// ---------------------------------------- -// REQUEST - -type RPCRequest struct { - JSONRPC string `json:"jsonrpc"` - ID JSONRPCID `json:"id"` - Method string `json:"method"` - Params json.RawMessage `json:"params"` // must be map[string]interface{} or []interface{} -} - -// UnmarshalJSON custom JSON unmarshalling due to JSONRPCID being string or int -func (request *RPCRequest) UnmarshalJSON(data []byte) error { - unsafeReq := &struct { - JSONRPC string `json:"jsonrpc"` - ID any `json:"id"` - Method string `json:"method"` - Params json.RawMessage `json:"params"` // must be map[string]any or []any - }{} - - if err := json.Unmarshal(data, &unsafeReq); err != nil { - return fmt.Errorf("unable to JSON-parse the RPC request, %w", err) - } - - request.JSONRPC = unsafeReq.JSONRPC - request.Method = unsafeReq.Method - request.Params = unsafeReq.Params - - // Check if the ID is set - if unsafeReq.ID == nil { - return nil - } - - // Parse the ID - id, err := parseID(unsafeReq.ID) - if err != nil { - return fmt.Errorf("unable to parse request ID, %w", err) - } - - request.ID = id - - return nil -} - -func NewRPCRequest(id JSONRPCID, method string, params json.RawMessage) RPCRequest { - return RPCRequest{ - JSONRPC: "2.0", - ID: id, - Method: method, - Params: params, - } -} - -func (request RPCRequest) String() string { - return fmt.Sprintf("[%s %s]", request.ID, request.Method) -} - -// MapToRequest generates an RPC request with the given ID and method. -// The params are encoded as a JSON map -func MapToRequest(id JSONRPCID, method string, params map[string]any) (RPCRequest, error) { - params_ := make(map[string]json.RawMessage, len(params)) - for name, value := range params { - valueJSON, err := amino.MarshalJSON(value) - if err != nil { - return RPCRequest{}, fmt.Errorf("unable to parse param, %w", err) - } - - params_[name] = valueJSON - } - - payload, err := json.Marshal(params_) // NOTE: Amino doesn't handle maps yet. - if err != nil { - return RPCRequest{}, fmt.Errorf("unable to JSON marshal params, %w", err) - } - - return NewRPCRequest(id, method, payload), nil -} - -// ---------------------------------------- -// RESPONSE - -type RPCError struct { - Code int `json:"code"` - Message string `json:"message"` - Data string `json:"data,omitempty"` -} - -func (err RPCError) Error() string { - const baseFormat = "RPC error %d - %s" - if err.Data != "" { - return fmt.Sprintf(baseFormat+": %s", err.Code, err.Message, err.Data) - } - - return fmt.Sprintf(baseFormat, err.Code, err.Message) -} - -type RPCResponse struct { - JSONRPC string `json:"jsonrpc"` - ID JSONRPCID `json:"id"` - Result json.RawMessage `json:"result,omitempty"` - Error *RPCError `json:"error,omitempty"` -} - -type ( - RPCRequests []RPCRequest - RPCResponses []RPCResponse -) - -// UnmarshalJSON custom JSON unmarshalling due to JSONRPCID being string or int -func (response *RPCResponse) UnmarshalJSON(data []byte) error { - unsafeResp := &struct { - JSONRPC string `json:"jsonrpc"` - ID any `json:"id"` - Result json.RawMessage `json:"result,omitempty"` - Error *RPCError `json:"error,omitempty"` - }{} - - // Parse the response - if err := json.Unmarshal(data, &unsafeResp); err != nil { - return fmt.Errorf("unable to JSON-parse the RPC response, %w", err) - } - - response.JSONRPC = unsafeResp.JSONRPC - response.Error = unsafeResp.Error - response.Result = unsafeResp.Result - - // Check if any response ID is set - if unsafeResp.ID == nil { - return nil - } - - // Parse the ID - id, err := parseID(unsafeResp.ID) - if err != nil { - return fmt.Errorf("unable to parse response ID, %w", err) - } - - response.ID = id - - return nil -} - -func NewRPCSuccessResponse(id JSONRPCID, res any) RPCResponse { - var rawMsg json.RawMessage - - if res != nil { - var js []byte - js, err := amino.MarshalJSON(res) - if err != nil { - return RPCInternalError(id, errors.Wrap(err, "Error marshalling response")) - } - rawMsg = js - } - - return RPCResponse{JSONRPC: "2.0", ID: id, Result: rawMsg} -} - -func NewRPCErrorResponse(id JSONRPCID, code int, msg string, data string) RPCResponse { - return RPCResponse{ - JSONRPC: "2.0", - ID: id, - Error: &RPCError{Code: code, Message: msg, Data: data}, - } -} - -func (response RPCResponse) String() string { - if response.Error == nil { - return fmt.Sprintf("[%s %v]", response.ID, response.Result) - } - return fmt.Sprintf("[%s %s]", response.ID, response.Error) -} - -func RPCParseError(id JSONRPCID, err error) RPCResponse { - return NewRPCErrorResponse(id, -32700, "Parse error. Invalid JSON", err.Error()) -} - -func RPCInvalidRequestError(id JSONRPCID, err error) RPCResponse { - return NewRPCErrorResponse(id, -32600, "Invalid Request", err.Error()) -} - -func RPCMethodNotFoundError(id JSONRPCID) RPCResponse { - return NewRPCErrorResponse(id, -32601, "Method not found", "") -} - -func RPCInvalidParamsError(id JSONRPCID, err error) RPCResponse { - return NewRPCErrorResponse(id, -32602, "Invalid params", err.Error()) -} - -func RPCInternalError(id JSONRPCID, err error) RPCResponse { - return NewRPCErrorResponse(id, -32603, "Internal error", err.Error()) -} - -// ---------------------------------------- - -// WSRPCConnection represents a websocket connection. -type WSRPCConnection interface { - // GetRemoteAddr returns a remote address of the connection. - GetRemoteAddr() string - // WriteRPCResponses writes the resp onto connection (BLOCKING). - WriteRPCResponses(resp RPCResponses) - // TryWriteRPCResponses tries to write the resp onto connection (NON-BLOCKING). - TryWriteRPCResponses(resp RPCResponses) bool - // Context returns the connection's context. - Context() context.Context -} - -// Context is the first parameter for all functions. It carries a json-rpc -// request, http request and websocket connection. -// -// - JSONReq is non-nil when JSONRPC is called over websocket or HTTP. -// - WSConn is non-nil when we're connected via a websocket. -// - HTTPReq is non-nil when URI or JSONRPC is called over HTTP. -type Context struct { - // json-rpc request - JSONReq *RPCRequest - // websocket connection - WSConn WSRPCConnection - // http request - HTTPReq *http.Request -} - -// RemoteAddr returns the remote address (usually a string "IP:port"). -// If neither HTTPReq nor WSConn is set, an empty string is returned. -// HTTP: -// -// http.Request#RemoteAddr -// -// WS: -// -// result of GetRemoteAddr -func (ctx *Context) RemoteAddr() string { - if ctx.HTTPReq != nil { - return ctx.HTTPReq.RemoteAddr - } else if ctx.WSConn != nil { - return ctx.WSConn.GetRemoteAddr() - } - return "" -} - -// Context returns the request's context. -// The returned context is always non-nil; it defaults to the background context. -// HTTP: -// -// The context is canceled when the client's connection closes, the request -// is canceled (with HTTP/2), or when the ServeHTTP method returns. -// -// WS: -// -// The context is canceled when the client's connections closes. -func (ctx *Context) Context() context.Context { - if ctx.HTTPReq != nil { - return ctx.HTTPReq.Context() - } else if ctx.WSConn != nil { - return ctx.WSConn.Context() - } - return context.Background() -} - -// NewHTTPStatusError returns an error meant to be used in the rpc handlers to signal to the server -// that it must answer with a specific http error code -func NewHTTPStatusError(code int, message string) error { - return &HTTPStatusError{Code: code, Message: message} -} - -// HTTPStatusError is an error meant to be returned in the rpc handlers to signal to the server -// that it must answer with a specific http error code -type HTTPStatusError struct { - Code int - Message string -} - -// Error implements error. -func (h *HTTPStatusError) Error() string { - return fmt.Sprintf("%d: %s", h.Code, h.Message) -}