diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e655fa42b..a681e7277 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -241,7 +241,7 @@ jobs: run: | npm ci npm run build - npm run start & + npm run start > ocean-node.log 2>&1 & env: PRIVATE_KEY: ${{ secrets.PRIVATE_KEY }} IPFS_GATEWAY: http://172.15.0.16:8080/ @@ -287,6 +287,21 @@ jobs: run: npm run test:system env: AVOID_LOOP_RUN: true + - name: Show Ocean Node logs on failure + if: failure() + working-directory: ${{ github.workspace }}/ocean-node + run: | + echo "=== Ocean Node Logs ===" + if [ -f ocean-node.log ]; then + cat ocean-node.log + else + echo "No ocean-node.log file found" + fi + echo "=== Docker Logs ===" + docker logs ocean-contracts-1 || echo "No ocean-contracts-1 container" + docker logs ocean-typesense-1 || echo "No ocean-typesense-1 container" + echo "=== System Processes ===" + ps aux | grep -E "(node|ocean)" || echo "No node/ocean processes found" control_panel_build: runs-on: ubuntu-latest diff --git a/config.json b/config.json new file mode 100644 index 000000000..1db2149a8 --- /dev/null +++ b/config.json @@ -0,0 +1,148 @@ +{ + "authorizedDecrypters": [], + "authorizedDecryptersList": [], + "allowedValidators": [], + "allowedValidatorsList": [], + "authorizedPublishers": [], + "authorizedPublishersList": [], + "keys": {}, + "hasIndexer": true, + "hasHttp": true, + "hasP2P": true, + "p2pConfig": { + "bootstrapNodes": [], + "bootstrapTimeout": 20000, + "bootstrapTagName": "bootstrap", + "bootstrapTagValue": 50, + "bootstrapTTL": 0, + "enableIPV4": true, + "enableIPV6": true, + "ipV4BindAddress": "0.0.0.0", + "ipV4BindTcpPort": 9000, + "ipV4BindWsPort": 0, + "ipV6BindAddress": "::1", + "ipV6BindTcpPort": 0, + "ipV6BindWsPort": 0, + "announceAddresses": [], + "pubsubPeerDiscoveryInterval": 10000, + "dhtMaxInboundStreams": 500, + "dhtMaxOutboundStreams": 500, + "dhtFilter": null, + "mDNSInterval": 20000, + "connectionsMaxParallelDials": 15, + "connectionsDialTimeout": 30000, + "upnp": true, + "autoNat": true, + "enableCircuitRelayServer": false, + "enableCircuitRelayClient": false, + "circuitRelays": 0, + "announcePrivateIp": false, + "filterAnnouncedAddresses": [ + "127.0.0.0/8", + "10.0.0.0/8", + "172.16.0.0/12", + "192.168.0.0/16", + "100.64.0.0/10", + "169.254.0.0/16", + "192.0.0.0/24", + "192.0.2.0/24", + "198.51.100.0/24", + "203.0.113.0/24", + "224.0.0.0/4", + "240.0.0.0/4" + ], + "minConnections": 1, + "maxConnections": 300, + "autoDialPeerRetryThreshold": 7200000, + "autoDialConcurrency": 5, + "maxPeerAddrsToDial": 5, + "autoDialInterval": 5000, + "enableNetworkStats": false + }, + "hasControlPanel": true, + "httpPort": 8001, + "dbConfig": { + "url": "http://localhost:8108/?apiKey=xyz", + "username": "", + "password": "", + "dbType": "typesense" + }, + "supportedNetworks": { + "8996": { + "rpc": "http://127.0.0.1:8545", + "chainId": 8996, + "network": "development", + "chunkSize": 100 + } + }, + "feeStrategy": {}, + "c2dClusters": [], + "ipfsGateway": "https://ipfs.io/", + "arweaveGateway": "https://arweave.net/", + "accountPurgatoryUrl": null, + "assetPurgatoryUrl": null, + "allowedAdmins": [], + "allowedAdminsList": [], + "rateLimit": 30, + "maxConnections": 30, + "denyList": { + "peers": [], + "ips": [] + }, + "unsafeURLs": [], + "isBootstrap": false, + "claimDurationTimeout": 600, + "validateUnsignedDDO": true, + "jwtSecret": "ocean-node-secret", + "dockerComputeEnvironments": [ + { + "socketPath": "/var/run/docker.sock", + "resources": [ + { + "id": "disk", + "total": 1 + } + ], + "storageExpiry": 604800, + "maxJobDuration": 3600, + "access": { + "addresses": [], + "accessLists": [] + }, + "fees": { + "8996": [ + { + "prices": [ + { + "id": "cpu", + "price": 1 + } + ] + } + ] + }, + "free": { + "maxJobDuration": 3600, + "maxJobs": 3, + "access": { + "addresses": [], + "accessLists": [] + }, + "resources": [ + { + "id": "cpu", + "max": 1 + }, + { + "id": "ram", + "max": 1 + }, + { + "id": "disk", + "max": 1 + } + ] + } + } + ] +} diff --git a/docs/API.md b/docs/API.md index 455b4aeaa..10d887219 100644 --- a/docs/API.md +++ b/docs/API.md @@ -1198,6 +1198,91 @@ Forwards request to PolicyServer (if any) } ``` +--- + +## Fetch Config + +### `HTTP` GET /api/admin/config + +#### Description + +returns current node configuration with sensitive data hidden (admin only) + +#### Parameters + +| name | type | required | description | +| --------------- | ------ | -------- | -------------------------------------------- | +| expiryTimestamp | number | v | expiry timestamp for the request | +| signature | string | v | signed message to authenticate admin request | + +#### Request + +```json +{ + "expiryTimestamp": 1234567890, + "signature": "0x123" +} +``` + +#### Response + +```json +{ + "keys": { + "privateKey": "[*** HIDDEN CONTENT ***]" + }, + "chainIds": [1], + "rpcs": { "1": "https://eth-mainnet.g.alchemy.com/v2/..." }, + "...": "..." +} +``` + +--- + +## Update Config + +### `HTTP` POST /api/admin/config/update + +#### Description + +updates node configuration and reloads it gracefully (admin only) + +#### Parameters + +| name | type | required | description | +| --------------- | ------ | -------- | -------------------------------------------------- | +| expiryTimestamp | number | v | expiry timestamp for the request | +| signature | string | v | signed message to authenticate admin request | +| config | object | v | partial configuration object with fields to update | + +#### Request + +```json +{ + "expiryTimestamp": 1234567890, + "signature": "0x123", + "config": { + "chainIds": [1], + "rpcs": { "1": "https://eth-mainnet.g.alchemy.com/v2/..." } + } +} +``` + +#### Response + +```json +{ + "keys": { + "privateKey": "[*** HIDDEN CONTENT ***]" + }, + "chainIds": [1], + "rpcs": { "1": "https://eth-mainnet.g.alchemy.com/v2/..." }, + "...": "..." +} +``` + +--- + # Compute For starters, you can find a list of algorithms in the [Ocean Algorithms repository](https://github.com/oceanprotocol/algo_dockers) and the docker images in the [Algo Dockerhub](https://hub.docker.com/r/oceanprotocol/algo_dockers/tags). diff --git a/docs/env.md b/docs/env.md index 6b1f59006..74c561bf5 100644 --- a/docs/env.md +++ b/docs/env.md @@ -138,6 +138,10 @@ The `DOCKER_COMPUTE_ENVIRONMENTS` environment variable should be a JSON array of ], "storageExpiry": 604800, "maxJobDuration": 3600, + "access": { + "addresses": ["0x123", "0x456"], + "accessLists": [] + }, "fees": { "1": [ { @@ -154,6 +158,10 @@ The `DOCKER_COMPUTE_ENVIRONMENTS` environment variable should be a JSON array of "free": { "maxJobDuration": 60, "maxJobs": 3, + "access": { + "addresses": [], + "accessLists": ["0x789"] + }, "resources": [ { "id": "cpu", @@ -178,6 +186,9 @@ The `DOCKER_COMPUTE_ENVIRONMENTS` environment variable should be a JSON array of - **socketPath**: Path to the Docker socket (e.g., docker.sock). - **storageExpiry**: Amount of seconds for storage expiry.(Mandatory) - **maxJobDuration**: Maximum duration in seconds for a job.(Mandatory) +- **access**: Access control configuration for paid compute jobs. If both `addresses` and `accessLists` are empty, all addresses are allowed. + - **addresses**: Array of Ethereum addresses allowed to run compute jobs. If empty and no access lists are configured, all addresses are allowed. + - **accessLists**: Array of AccessList contract addresses. Users holding NFTs from these contracts can run compute jobs. Checked across all supported networks. - **fees**: Fee structure for the compute environment. - **feeToken**: Token address for the fee. - **prices**: Array of resource pricing information. @@ -192,6 +203,9 @@ The `DOCKER_COMPUTE_ENVIRONMENTS` environment variable should be a JSON array of - **storageExpiry**: Amount of seconds for storage expiry for free jobs. - **maxJobDuration**: Maximum duration in seconds for a free job. - **maxJobs**: Maximum number of simultaneous free jobs. + - **access**: Access control configuration for free compute jobs. Works the same as the main `access` field. + - **addresses**: Array of Ethereum addresses allowed to run free compute jobs. + - **accessLists**: Array of AccessList contract addresses for free compute access control. - **resources**: Array of resources available for free jobs. - **id**: Resource type (e.g., `cpu`, `ram`, `disk`). - **total**: Total number of the resource available. diff --git a/package-lock.json b/package-lock.json index 3f8f53996..c5fa29c3e 100644 --- a/package-lock.json +++ b/package-lock.json @@ -37,7 +37,7 @@ "@oceanprotocol/contracts": "^2.4.0", "@oceanprotocol/ddo-js": "^0.1.4", "@types/lodash.clonedeep": "^4.5.7", - "aws-sdk": "^2.1591.0", + "aws-sdk": "^2.1692.0", "axios": "^1.12.0", "base58-js": "^2.0.0", "cors": "^2.8.5", diff --git a/package.json b/package.json index 941106390..0e709ec7f 100644 --- a/package.json +++ b/package.json @@ -76,7 +76,7 @@ "@oceanprotocol/contracts": "^2.4.0", "@oceanprotocol/ddo-js": "^0.1.4", "@types/lodash.clonedeep": "^4.5.7", - "aws-sdk": "^2.1591.0", + "aws-sdk": "^2.1692.0", "axios": "^1.12.0", "base58-js": "^2.0.0", "cors": "^2.8.5", diff --git a/src/@types/C2D/C2D.ts b/src/@types/C2D/C2D.ts index f6cdb9ed6..db54e6812 100644 --- a/src/@types/C2D/C2D.ts +++ b/src/@types/C2D/C2D.ts @@ -80,12 +80,18 @@ export interface RunningPlatform { os?: string } +export interface ComputeAccessList { + addresses: string[] + accessLists: string[] +} + export interface ComputeEnvironmentFreeOptions { // only if a compute env exposes free jobs storageExpiry?: number maxJobDuration?: number maxJobs?: number // maximum number of simultaneous free jobs resources?: ComputeResource[] + access: ComputeAccessList } export interface ComputeEnvironmentBaseConfig { description?: string // v1 @@ -95,6 +101,7 @@ export interface ComputeEnvironmentBaseConfig { maxJobs?: number // maximum number of simultaneous paid jobs fees: ComputeEnvFeesStructure resources?: ComputeResource[] + access: ComputeAccessList free?: ComputeEnvironmentFreeOptions platform: RunningPlatform } @@ -126,6 +133,7 @@ export interface C2DDockerConfig { fees: ComputeEnvFeesStructure resources?: ComputeResource[] // optional, owner can overwrite free?: ComputeEnvironmentFreeOptions + access: ComputeAccessList } export type ComputeResultType = @@ -221,6 +229,7 @@ export interface DBComputeJobPayment { token: string lockTx: string claimTx: string + cost: number } // this is the internal structure @@ -243,6 +252,7 @@ export interface DBComputeJob extends ComputeJob { payment?: DBComputeJobPayment metadata?: DBComputeJobMetadata additionalViewers?: string[] // addresses of additional addresses that can get results + algoDuration: number // duration of the job in seconds } // make sure we keep them both in sync diff --git a/src/@types/OceanNode.ts b/src/@types/OceanNode.ts index 6eb5fba01..cbb7949a3 100644 --- a/src/@types/OceanNode.ts +++ b/src/@types/OceanNode.ts @@ -1,6 +1,6 @@ import { Stream } from 'stream' import { RPCS } from './blockchain' -import { C2DClusterInfo } from './C2D/C2D' +import { C2DClusterInfo, C2DDockerConfig } from './C2D/C2D' import { FeeStrategy } from './Fees' import { Schema } from '../components/database' @@ -82,6 +82,7 @@ export interface AccessListContract { } export interface OceanNodeConfig { + dockerComputeEnvironments: C2DDockerConfig[] authorizedDecrypters: string[] authorizedDecryptersList: AccessListContract | null allowedValidators: string[] @@ -97,13 +98,14 @@ export interface OceanNodeConfig { dbConfig?: OceanNodeDBConfig httpPort: number feeStrategy: FeeStrategy + ipfsGateway?: string | null + arweaveGateway?: string | null supportedNetworks?: RPCS claimDurationTimeout: number indexingNetworks?: RPCS c2dClusters: C2DClusterInfo[] - c2dNodeUri: string - accountPurgatoryUrl: string - assetPurgatoryUrl: string + accountPurgatoryUrl: string | null + assetPurgatoryUrl: string | null allowedAdmins?: string[] allowedAdminsList?: AccessListContract | null codeHash?: string diff --git a/src/@types/commands.ts b/src/@types/commands.ts index f00cdfc40..767836057 100644 --- a/src/@types/commands.ts +++ b/src/@types/commands.ts @@ -160,6 +160,12 @@ export interface AdminReindexChainCommand extends AdminCommand { block?: number } +export interface AdminFetchConfigCommand extends AdminCommand {} + +export interface AdminPushConfigCommand extends AdminCommand { + config: Record +} + export interface ICommandHandler { handle(command: Command): Promise verifyParamsAndRateLimits(task: Command): Promise @@ -288,3 +294,9 @@ export interface InvalidateAuthTokenCommand extends Command { signature: string token: string } + +export interface GetJobsCommand extends Command { + environments?: string[] + fromTimestamp?: string + consumerAddrs?: string[] +} diff --git a/src/components/Indexer/crawlerThread.ts b/src/components/Indexer/crawlerThread.ts index 13b35b0cc..7e1a059f2 100644 --- a/src/components/Indexer/crawlerThread.ts +++ b/src/components/Indexer/crawlerThread.ts @@ -127,6 +127,7 @@ export async function processNetworkData( // we can override the default value of 30 secs, by setting process.env.INDEXER_INTERVAL const interval = getCrawlingInterval() let { chunkSize } = rpcDetails + let successfulRetrievalCount = 0 let lockProccessing = false while (true) { @@ -166,6 +167,7 @@ export async function processNetworkData( startBlock, blocksToProcess ) + successfulRetrievalCount++ } catch (error) { INDEXER_LOGGER.log( LOG_LEVELS_STR.LEVEL_WARN, @@ -173,6 +175,7 @@ export async function processNetworkData( true ) chunkSize = Math.floor(chunkSize / 2) < 1 ? 1 : Math.floor(chunkSize / 2) + successfulRetrievalCount = 0 INDEXER_LOGGER.logMessage( `network: ${rpcDetails.network} Reducing chunk size ${chunkSize} `, true @@ -202,11 +205,20 @@ export async function processNetworkData( currentBlock = lastIndexedBlock } checkNewlyIndexedAssets(processedBlocks.foundEvents) - chunkSize = chunkSize !== 1 ? chunkSize : rpcDetails.chunkSize + // Revert to original chunk size after 3 successful retrieveChunkEvents calls + if (successfulRetrievalCount >= 3 && chunkSize < rpcDetails.chunkSize) { + chunkSize = rpcDetails.chunkSize + successfulRetrievalCount = 0 + INDEXER_LOGGER.logMessage( + `network: ${rpcDetails.network} Reverting chunk size back to original ${chunkSize} after 3 successful calls`, + true + ) + } } catch (error) { INDEXER_LOGGER.error( `Processing event from network failed network: ${rpcDetails.network} Error: ${error.message} ` ) + successfulRetrievalCount = 0 // since something went wrong, we will not update the last indexed block // so we will try to process the same chunk again // after some sleep diff --git a/src/components/Indexer/index.ts b/src/components/Indexer/index.ts index f76e551cd..462d43dea 100644 --- a/src/components/Indexer/index.ts +++ b/src/components/Indexer/index.ts @@ -214,8 +214,18 @@ export class OceanIndexer { } private setupEventListeners(worker: Worker, chainId: number) { - worker.on('message', (event: any) => { - if (event.data) { + worker.on('message', async (event: any) => { + try { + if (!event.data) { + INDEXER_LOGGER.log( + LOG_LEVELS_STR.LEVEL_ERROR, + `Missing event data (ddo) on postMessage. Something is wrong! Event: ${JSON.stringify( + event + )}`, + true + ) + } + if ( [ EVENTS.METADATA_CREATED, @@ -230,21 +240,21 @@ export class OceanIndexer { EVENTS.EXCHANGE_RATE_CHANGED ].includes(event.method) ) { - // will emit the metadata created/updated event and advertise it to the other peers (on create only) INDEXER_LOGGER.logMessage( `Emiting "${event.method}" for DDO : ${event.data.id} from network: ${chainId} ` ) - INDEXER_DDO_EVENT_EMITTER.emit(event.method, event.data.id) - // remove from indexing list + await Promise.resolve( + INDEXER_DDO_EVENT_EMITTER.emit(event.method, event.data.id) + ) } else if (event.method === INDEXER_CRAWLING_EVENTS.REINDEX_QUEUE_POP) { - // remove this one from the queue (means we processed the reindex for this tx) INDEXING_QUEUE = INDEXING_QUEUE.filter( (task) => task.txId !== event.data.txId && task.chainId !== event.data.chainId ) - // reindex tx successfully done - INDEXER_CRAWLING_EVENT_EMITTER.emit( - INDEXER_CRAWLING_EVENTS.REINDEX_TX, // explicitly set constant value for readability - event.data + await Promise.resolve( + INDEXER_CRAWLING_EVENT_EMITTER.emit( + INDEXER_CRAWLING_EVENTS.REINDEX_TX, + event.data + ) ) this.updateJobStatus( PROTOCOL_COMMANDS.REINDEX_TX, @@ -252,10 +262,11 @@ export class OceanIndexer { CommandStatus.SUCCESS ) } else if (event.method === INDEXER_CRAWLING_EVENTS.REINDEX_CHAIN) { - // we should listen to this on the dashboard for instance - INDEXER_CRAWLING_EVENT_EMITTER.emit( - INDEXER_CRAWLING_EVENTS.REINDEX_CHAIN, - event.data + await Promise.resolve( + INDEXER_CRAWLING_EVENT_EMITTER.emit( + INDEXER_CRAWLING_EVENTS.REINDEX_CHAIN, + event.data + ) ) this.updateJobStatus( PROTOCOL_COMMANDS.REINDEX_CHAIN, @@ -263,12 +274,14 @@ export class OceanIndexer { event.data.result ? CommandStatus.SUCCESS : CommandStatus.FAILURE ) } else if (event.method === INDEXER_CRAWLING_EVENTS.CRAWLING_STARTED) { - INDEXER_CRAWLING_EVENT_EMITTER.emit(event.method, event.data) + await Promise.resolve( + INDEXER_CRAWLING_EVENT_EMITTER.emit(event.method, event.data) + ) } - } else { + } catch (err) { INDEXER_LOGGER.log( LOG_LEVELS_STR.LEVEL_ERROR, - 'Missing event data (ddo) on postMessage. Something is wrong!', + `Worker handler failed after retries: ${err?.message ?? err}`, true ) } diff --git a/src/components/Indexer/processor.ts b/src/components/Indexer/processor.ts index de6e55d10..0be240106 100644 --- a/src/components/Indexer/processor.ts +++ b/src/components/Indexer/processor.ts @@ -107,7 +107,7 @@ export const processChunkLogs = async ( if (!allowed.length) { INDEXER_LOGGER.log( LOG_LEVELS_STR.LEVEL_ERROR, - `Metadata Proof validators list is empty`, + `Metadata Proof validators list is empty. Block/event for tx ${log.transactionHash} was NOT processed due to no allowed validators.`, true ) continue @@ -150,6 +150,11 @@ export const processChunkLogs = async ( } // move on to the next (do not process this event) if (isAllowed === false) { + INDEXER_LOGGER.log( + LOG_LEVELS_STR.LEVEL_ERROR, + `Block/event for tx ${log.transactionHash} was NOT processed because none of the metadata validators are part of the access list group(s) for chain ${chainId}.`, + true + ) continue } } // end if (allowedValidatorsList) { diff --git a/src/components/Indexer/processors/BaseProcessor.ts b/src/components/Indexer/processors/BaseProcessor.ts index 20bb341bc..4c0b976b1 100644 --- a/src/components/Indexer/processors/BaseProcessor.ts +++ b/src/components/Indexer/processors/BaseProcessor.ts @@ -9,8 +9,7 @@ import { toUtf8Bytes, hexlify, getBytes, - toUtf8String, - getAddress + toUtf8String } from 'ethers' import { Readable } from 'winston-transport' import { DecryptDDOCommand } from '../../../@types/commands.js' @@ -20,15 +19,15 @@ import { timestampToDateTime } from '../../../utils/conversions.js' import { getConfiguration } from '../../../utils/config.js' import { create256Hash } from '../../../utils/crypt.js' import { getDatabase } from '../../../utils/database.js' -import { CORE_LOGGER, INDEXER_LOGGER } from '../../../utils/logging/common.js' +import { INDEXER_LOGGER } from '../../../utils/logging/common.js' import { LOG_LEVELS_STR } from '../../../utils/logging/Logger.js' import { URLUtils } from '../../../utils/url.js' import { streamToString } from '../../../utils/util.js' import ERC721Template from '@oceanprotocol/contracts/artifacts/contracts/templates/ERC721Template.sol/ERC721Template.json' assert { type: 'json' } import { toString as uint8ArrayToString } from 'uint8arrays/to-string' import ERC20Template from '@oceanprotocol/contracts/artifacts/contracts/templates/ERC20TemplateEnterprise.sol/ERC20TemplateEnterprise.json' assert { type: 'json' } -import { createHash } from 'node:crypto' -import { AbstractDdoDatabase } from '../../database/BaseDatabase.js' +import { fetchTransactionReceipt } from '../../core/utils/validateOrders.js' +import { withRetrial } from '../utils.js' export abstract class BaseEventProcessor { protected networkId: number @@ -91,39 +90,47 @@ export abstract class BaseEventProcessor { eventType: string ): Promise { const iface = new Interface(abi) - const receipt = await provider.getTransactionReceipt(transactionHash) - - let eventHash: string - for (const [key, value] of Object.entries(EVENT_HASHES)) { - if (value.type === eventType) { - eventHash = key - break - } - } - if (eventHash === '') { - INDEXER_LOGGER.error(`Event hash couldn't be found!`) - return null + let receipt: ethers.TransactionReceipt + try { + receipt = await fetchTransactionReceipt(transactionHash, provider) + } catch (e) { + INDEXER_LOGGER.error(`Error retrieving receipt: ${e.message}`) } + if (receipt) { + let eventHash: string + for (const [key, value] of Object.entries(EVENT_HASHES)) { + if (value.type === eventType) { + eventHash = key + break + } + } + if (eventHash === '') { + INDEXER_LOGGER.error(`Event hash couldn't be found!`) + return null + } - let eventObj: any - for (const log of receipt.logs) { - if (log.topics[0] === eventHash) { - eventObj = { - topics: log.topics, - data: log.data + let eventObj: any + for (const log of receipt.logs) { + if (log.topics[0] === eventHash) { + eventObj = { + topics: log.topics, + data: log.data + } + break } - break } - } - if (!eventObj) { - INDEXER_LOGGER.error( - `Event object couldn't be retrieved! Event hash not present in logs topics` - ) - return null - } + if (!eventObj) { + INDEXER_LOGGER.error( + `Event object couldn't be retrieved! Event hash not present in logs topics` + ) + return null + } - return iface.parseLog(eventObj) + return iface.parseLog(eventObj) + } else { + INDEXER_LOGGER.error('Receipt could not be fetched') + } } protected async getNFTInfo( @@ -158,6 +165,7 @@ export abstract class BaseEventProcessor { return saveDDO } + const saveDDO = await ddoDatabase.update({ ...ddo.getDDOData() }) await ddoState.update( this.networkId, @@ -197,37 +205,6 @@ export abstract class BaseEventProcessor { return true } - protected async getDDO( - ddoDatabase: AbstractDdoDatabase, - nftAddress: string, - chainId: number - ): Promise { - const did = - 'did:op:' + - createHash('sha256') - .update(getAddress(nftAddress) + chainId.toString(10)) - .digest('hex') - const didOpe = - 'did:ope:' + - createHash('sha256') - .update(getAddress(nftAddress) + chainId.toString(10)) - .digest('hex') - - let ddo = await ddoDatabase.retrieve(did) - if (!ddo) { - INDEXER_LOGGER.logMessage( - `Detected OrderStarted changed for ${did}, but it does not exists, try with ddo:ope.` - ) - ddo = await ddoDatabase.retrieve(didOpe) - if (!ddo) { - INDEXER_LOGGER.logMessage( - `Detected OrderStarted changed for ${didOpe}, but it does not exists.` - ) - } - } - return ddo - } - protected async decryptDDO( decryptorURL: string, flag: string, @@ -239,25 +216,59 @@ export abstract class BaseEventProcessor { metadata: any ): Promise { let ddo - if (parseInt(flag) === 2) { + // Log the flag value + INDEXER_LOGGER.logMessage(`decryptDDO: flag=${flag}`) + if ((parseInt(flag) & 2) !== 0) { INDEXER_LOGGER.logMessage( `Decrypting DDO from network: ${this.networkId} created by: ${eventCreator} encrypted by: ${decryptorURL}` ) - const nonce = Math.floor(Date.now() / 1000).toString() const config = await getConfiguration() const { keys } = config + let nonce: string + try { + if (URLUtils.isValidUrl(decryptorURL)) { + INDEXER_LOGGER.logMessage( + `decryptDDO: Making HTTP request for nonce. DecryptorURL: ${decryptorURL}` + ) + const nonceResponse = await axios.get( + `${decryptorURL}/api/services/nonce?userAddress=${keys.ethAddress}`, + { timeout: 20000 } + ) + nonce = + nonceResponse.status === 200 && nonceResponse.data + ? String(parseInt(nonceResponse.data.nonce) + 1) + : Date.now().toString() + } else { + nonce = Date.now().toString() + } + } catch (err) { + INDEXER_LOGGER.log( + LOG_LEVELS_STR.LEVEL_ERROR, + `decryptDDO: Error getting nonce, using timestamp: ${err.message}` + ) + nonce = Date.now().toString() + } const nodeId = keys.peerId.toString() const wallet: ethers.Wallet = new ethers.Wallet(process.env.PRIVATE_KEY as string) + const useTxIdOrContractAddress = txId || contractAddress const message = String( - txId + contractAddress + keys.ethAddress + chainId.toString() + nonce + useTxIdOrContractAddress + keys.ethAddress + chainId.toString() + nonce ) - const consumerMessage = ethers.solidityPackedKeccak256( + + const messageHash = ethers.solidityPackedKeccak256( ['bytes'], [ethers.hexlify(ethers.toUtf8Bytes(message))] ) - const signature = await wallet.signMessage(consumerMessage) + + const messageHashBytes = ethers.getBytes(messageHash) + const signature = await wallet.signMessage(messageHashBytes) + + const recoveredAddress = ethers.verifyMessage(messageHashBytes, signature) + INDEXER_LOGGER.logMessage( + `decryptDDO: recovered address: ${recoveredAddress}, expected: ${keys.ethAddress}` + ) if (URLUtils.isValidUrl(decryptorURL)) { try { @@ -269,16 +280,38 @@ export abstract class BaseEventProcessor { signature, nonce } - const response = await axios({ - method: 'post', - url: `${decryptorURL}/api/services/decrypt`, - data: payload + const response = await withRetrial(async () => { + try { + const res = await axios({ + method: 'post', + url: `${decryptorURL}/api/services/decrypt`, + data: payload, + timeout: 30000 + }) + + if (res.status !== 200 && res.status !== 201) { + const message = `bProvider exception on decrypt DDO. Status: ${res.status}, ${res.statusText}` + INDEXER_LOGGER.log(LOG_LEVELS_STR.LEVEL_ERROR, message) + throw new Error(message) // do NOT retry + } + return res + } catch (err: any) { + // Retry ONLY on ECONNREFUSED + if ( + err.code === 'ECONNREFUSED' || + (err.message && err.message.includes('ECONNREFUSED')) + ) { + INDEXER_LOGGER.log( + LOG_LEVELS_STR.LEVEL_ERROR, + `Decrypt request failed with ECONNREFUSED, retrying...`, + true + ) + throw err + } + + throw err + } }) - if (response.status !== 200) { - const message = `bProvider exception on decrypt DDO. Status: ${response.status}, ${response.statusText}` - INDEXER_LOGGER.log(LOG_LEVELS_STR.LEVEL_ERROR, message) - throw new Error(message) - } let responseHash if (response.data instanceof Object) { @@ -294,7 +327,6 @@ export abstract class BaseEventProcessor { throw new Error(msg) } } catch (err) { - CORE_LOGGER.error(`Error on decrypting DDO: ${JSON.stringify(err)}`) const message = `Provider exception on decrypt DDO. Status: ${err.message}` INDEXER_LOGGER.log(LOG_LEVELS_STR.LEVEL_ERROR, message) throw new Error(message) @@ -399,36 +431,6 @@ export abstract class BaseEventProcessor { return ddo } - protected decryptDDOIPFS( - decryptorURL: string, - eventCreator: string, - metadata: any - ): Promise { - INDEXER_LOGGER.logMessage( - `Decompressing DDO from network: ${this.networkId} created by: ${eventCreator} ecnrypted by: ${decryptorURL}` - ) - const byteArray = getBytes(metadata) - const utf8String = toUtf8String(byteArray) - const proof = JSON.parse(utf8String) - return proof - } - - protected getDataFromProof( - proof: any - ): { header: any; ddoObj: Record; signature: string } | null { - INDEXER_LOGGER.logMessage(`Decompressing JWT`) - const data = proof.split('.') - if (data.length > 2) { - const header = JSON.parse(Buffer.from(data[0], 'base64').toString('utf-8')) - let ddoObj = JSON.parse(Buffer.from(data[1], 'base64').toString('utf-8')) - if (ddoObj.vc) ddoObj = ddoObj.vc - const signature = data[2] - - return { header, ddoObj, signature } - } - return null - } - public abstract processEvent( event: ethers.Log, chainId: number, diff --git a/src/components/Indexer/processors/DispenserActivatedEventProcessor.ts b/src/components/Indexer/processors/DispenserActivatedEventProcessor.ts index 87223b102..34a919256 100644 --- a/src/components/Indexer/processors/DispenserActivatedEventProcessor.ts +++ b/src/components/Indexer/processors/DispenserActivatedEventProcessor.ts @@ -1,5 +1,5 @@ import { DDOManager } from '@oceanprotocol/ddo-js' -import { ethers, Signer, JsonRpcApiProvider } from 'ethers' +import { ethers, Signer, JsonRpcApiProvider, ZeroAddress } from 'ethers' import { EVENTS } from '../../../utils/constants.js' import { getDatabase } from '../../../utils/database.js' import { INDEXER_LOGGER } from '../../../utils/logging/common.js' @@ -9,7 +9,9 @@ import { getDid, doesDispenserAlreadyExist, findServiceIdByDatatoken, - getPricesByDt + getPricesByDt, + isValidDispenserContract, + getDidOpe } from '../utils.js' import { BaseEventProcessor } from './BaseProcessor.js' import Dispenser from '@oceanprotocol/contracts/artifacts/contracts/pools/dispenser/Dispenser.sol/Dispenser.json' assert { type: 'json' } @@ -28,18 +30,46 @@ export class DispenserActivatedEventProcessor extends BaseEventProcessor { EVENTS.DISPENSER_ACTIVATED ) const datatokenAddress = decodedEventData.args[0].toString() + if (!datatokenAddress) { + INDEXER_LOGGER.error( + `Datatoken address is not found in decoded event. Decoded event: ${JSON.stringify( + decodedEventData + )}` + ) + return null + } + if (datatokenAddress === ZeroAddress) { + INDEXER_LOGGER.error( + `Datatoken address is ZERO ADDRESS. Cannot find DDO by ZERO ADDRESS contract.` + ) + return null + } const datatokenContract = getDtContract(signer, datatokenAddress) const nftAddress = await datatokenContract.getERC721Address() - const did = getDid(nftAddress, chainId) + let did = getDidOpe(nftAddress, chainId) try { const { ddo: ddoDatabase } = await getDatabase() - const ddo = await ddoDatabase.retrieve(did) + let ddo = await ddoDatabase.retrieve(did) if (!ddo) { INDEXER_LOGGER.logMessage( `Detected DispenserActivated changed for ${did}, but it does not exists.` ) - return + did = getDid(nftAddress, chainId) + ddo = await ddoDatabase.retrieve(did) + if (!ddo) { + INDEXER_LOGGER.logMessage( + `Detected DispenserActivated changed for ${did}, but it does not exists` + ) + return + } + } + if (!(await isValidDispenserContract(event.address, chainId, signer))) { + INDEXER_LOGGER.warn( + `Dispenser contract ${event.address} is not approved by Router. + Abort updating DDO pricing! Returning the existing DDO...` + ) + return ddo } const ddoInstance = DDOManager.getDDOClass(ddo) if (!ddoInstance.getAssetFields().indexedMetadata) { diff --git a/src/components/Indexer/processors/DispenserCreatedEventProcessor.ts b/src/components/Indexer/processors/DispenserCreatedEventProcessor.ts index 1651911c1..b5027e261 100644 --- a/src/components/Indexer/processors/DispenserCreatedEventProcessor.ts +++ b/src/components/Indexer/processors/DispenserCreatedEventProcessor.ts @@ -1,5 +1,5 @@ import { DDOManager, PriceType } from '@oceanprotocol/ddo-js' -import { ethers, Signer, JsonRpcApiProvider } from 'ethers' +import { ethers, Signer, JsonRpcApiProvider, ZeroAddress } from 'ethers' import { EVENTS } from '../../../utils/constants.js' import { getDatabase } from '../../../utils/database.js' import { INDEXER_LOGGER } from '../../../utils/logging/common.js' @@ -9,7 +9,9 @@ import { getDid, doesDispenserAlreadyExist, findServiceIdByDatatoken, - getPricesByDt + getPricesByDt, + isValidDispenserContract, + getDidOpe } from '../utils.js' import { BaseEventProcessor } from './BaseProcessor.js' import Dispenser from '@oceanprotocol/contracts/artifacts/contracts/pools/dispenser/Dispenser.sol/Dispenser.json' assert { type: 'json' } @@ -28,18 +30,46 @@ export class DispenserCreatedEventProcessor extends BaseEventProcessor { EVENTS.DISPENSER_CREATED ) const datatokenAddress = decodedEventData.args[0].toString() + if (!datatokenAddress) { + INDEXER_LOGGER.error( + `Datatoken address is not found in decoded event. Decoded event: ${JSON.stringify( + decodedEventData + )}` + ) + return null + } + if (datatokenAddress === ZeroAddress) { + INDEXER_LOGGER.error( + `Datatoken address is ZERO ADDRESS. Cannot find DDO by ZERO ADDRESS contract.` + ) + return null + } const datatokenContract = getDtContract(signer, datatokenAddress) const nftAddress = await datatokenContract.getERC721Address() - const did = getDid(nftAddress, chainId) + let did = getDidOpe(nftAddress, chainId) try { const { ddo: ddoDatabase } = await getDatabase() - const ddo = await ddoDatabase.retrieve(did) + let ddo = await ddoDatabase.retrieve(did) if (!ddo) { INDEXER_LOGGER.logMessage( `Detected DispenserCreated changed for ${did}, but it does not exists.` ) - return + did = getDid(nftAddress, chainId) + ddo = await ddoDatabase.retrieve(did) + if (!ddo) { + INDEXER_LOGGER.logMessage( + `Detected DispenserCreated changed for ${did}, but it does not exists` + ) + return + } + } + if (!(await isValidDispenserContract(event.address, chainId, signer))) { + INDEXER_LOGGER.warn( + `Dispenser contract ${event.address} is not approved by Router. + Abort updating DDO pricing! Returning the existing DDO...` + ) + return ddo } const ddoInstance = DDOManager.getDDOClass(ddo) if (!ddoInstance.getAssetFields().indexedMetadata) { diff --git a/src/components/Indexer/processors/DispenserDeactivatedEventProcessor.ts b/src/components/Indexer/processors/DispenserDeactivatedEventProcessor.ts index 3c800e634..0b7d751ea 100644 --- a/src/components/Indexer/processors/DispenserDeactivatedEventProcessor.ts +++ b/src/components/Indexer/processors/DispenserDeactivatedEventProcessor.ts @@ -1,5 +1,5 @@ import { DDOManager } from '@oceanprotocol/ddo-js' -import { ethers, Signer, JsonRpcApiProvider } from 'ethers' +import { ethers, Signer, JsonRpcApiProvider, ZeroAddress } from 'ethers' import { EVENTS } from '../../../utils/constants.js' import { getDatabase } from '../../../utils/database.js' import { INDEXER_LOGGER } from '../../../utils/logging/common.js' @@ -9,7 +9,9 @@ import { getDid, doesDispenserAlreadyExist, findServiceIdByDatatoken, - getPricesByDt + getPricesByDt, + isValidDispenserContract, + getDidOpe } from '../utils.js' import { BaseEventProcessor } from './BaseProcessor.js' import Dispenser from '@oceanprotocol/contracts/artifacts/contracts/pools/dispenser/Dispenser.sol/Dispenser.json' assert { type: 'json' } @@ -28,18 +30,46 @@ export class DispenserDeactivatedEventProcessor extends BaseEventProcessor { EVENTS.DISPENSER_DEACTIVATED ) const datatokenAddress = decodedEventData.args[0].toString() + if (!datatokenAddress) { + INDEXER_LOGGER.error( + `Datatoken address is not found in decoded event. Decoded event: ${JSON.stringify( + decodedEventData + )}` + ) + return null + } + if (datatokenAddress === ZeroAddress) { + INDEXER_LOGGER.error( + `Datatoken address is ZERO ADDRESS. Cannot find DDO by ZERO ADDRESS contract.` + ) + return null + } const datatokenContract = getDtContract(signer, datatokenAddress) const nftAddress = await datatokenContract.getERC721Address() - const did = getDid(nftAddress, chainId) + let did = getDidOpe(nftAddress, chainId) try { const { ddo: ddoDatabase } = await getDatabase() - const ddo = await ddoDatabase.retrieve(did) + let ddo = await ddoDatabase.retrieve(did) if (!ddo) { INDEXER_LOGGER.logMessage( `Detected DispenserDeactivated changed for ${did}, but it does not exists.` ) - return + did = getDid(nftAddress, chainId) + ddo = await ddoDatabase.retrieve(did) + if (!ddo) { + INDEXER_LOGGER.logMessage( + `Detected DispenserDeactivated changed for ${did}, but it does not exists` + ) + return + } + } + if (!(await isValidDispenserContract(event.address, chainId, signer))) { + INDEXER_LOGGER.warn( + `Dispenser contract ${event.address} is not approved by Router. + Abort updating DDO pricing! Returning the existing DDO...` + ) + return ddo } const ddoInstance = DDOManager.getDDOClass(ddo) if (!ddoInstance.getAssetFields().indexedMetadata) { diff --git a/src/components/Indexer/processors/ExchangeActivatedEventProcessor.ts b/src/components/Indexer/processors/ExchangeActivatedEventProcessor.ts index 28fa7a429..6e59908fe 100644 --- a/src/components/Indexer/processors/ExchangeActivatedEventProcessor.ts +++ b/src/components/Indexer/processors/ExchangeActivatedEventProcessor.ts @@ -1,5 +1,5 @@ import { DDOManager } from '@oceanprotocol/ddo-js' -import { ethers, Signer, JsonRpcApiProvider } from 'ethers' +import { ethers, Signer, JsonRpcApiProvider, ZeroAddress } from 'ethers' import { EVENTS } from '../../../utils/constants.js' import { getDatabase } from '../../../utils/database.js' import { INDEXER_LOGGER } from '../../../utils/logging/common.js' @@ -9,7 +9,9 @@ import { getDid, doesFreAlreadyExist, findServiceIdByDatatoken, - getPricesByDt + getPricesByDt, + isValidFreContract, + getDidOpe } from '../utils.js' import { BaseEventProcessor } from './BaseProcessor.js' import FixedRateExchange from '@oceanprotocol/contracts/artifacts/contracts/pools/fixedRate/FixedRateExchange.sol/FixedRateExchange.json' assert { type: 'json' } @@ -22,6 +24,12 @@ export class ExchangeActivatedEventProcessor extends BaseEventProcessor { provider: JsonRpcApiProvider ): Promise { try { + if (!(await isValidFreContract(event.address, chainId, signer))) { + INDEXER_LOGGER.error( + `Fixed Rate Exhange contract ${event.address} is not approved by Router. Abort updating DDO pricing!` + ) + return null + } const decodedEventData = await this.getEventData( provider, event.transactionHash, @@ -41,17 +49,30 @@ export class ExchangeActivatedEventProcessor extends BaseEventProcessor { const exchange = await freContract.getExchange(exchangeId) const datatokenAddress = exchange[1] + if (datatokenAddress === ZeroAddress) { + INDEXER_LOGGER.error( + `Datatoken address is ZERO ADDRESS. Cannot find DDO by ZERO ADDRESS contract.` + ) + return null + } const datatokenContract = getDtContract(signer, datatokenAddress) const nftAddress = await datatokenContract.getERC721Address() - const did = getDid(nftAddress, chainId) + let did = getDidOpe(nftAddress, chainId) const { ddo: ddoDatabase } = await getDatabase() - const ddo = await ddoDatabase.retrieve(did) + let ddo = await ddoDatabase.retrieve(did) if (!ddo) { INDEXER_LOGGER.logMessage( `Detected ExchangeActivated changed for ${did}, but it does not exists.` ) - return + did = getDid(nftAddress, chainId) + ddo = await ddoDatabase.retrieve(did) + if (!ddo) { + INDEXER_LOGGER.logMessage( + `Detected ExchangeActivated changed for ${did}, but it does not exists` + ) + return + } } const ddoInstance = DDOManager.getDDOClass(ddo) @@ -87,7 +108,7 @@ export class ExchangeActivatedEventProcessor extends BaseEventProcessor { INDEXER_LOGGER.logMessage( `[ExchangeActivated] - This datatoken does not contain this service. Invalid service id!` ) - return + return null } const { stats } = ddoInstance.getAssetFields().indexedMetadata stats.push({ diff --git a/src/components/Indexer/processors/ExchangeCreatedEventProcessor.ts b/src/components/Indexer/processors/ExchangeCreatedEventProcessor.ts index b9a02c53f..61143cf82 100644 --- a/src/components/Indexer/processors/ExchangeCreatedEventProcessor.ts +++ b/src/components/Indexer/processors/ExchangeCreatedEventProcessor.ts @@ -1,5 +1,5 @@ import { DDOManager } from '@oceanprotocol/ddo-js' -import { ethers, Signer, JsonRpcApiProvider } from 'ethers' +import { ethers, Signer, JsonRpcApiProvider, ZeroAddress } from 'ethers' import { EVENTS } from '../../../utils/constants.js' import { getDatabase } from '../../../utils/database.js' import { INDEXER_LOGGER } from '../../../utils/logging/common.js' @@ -9,7 +9,9 @@ import { getDid, doesFreAlreadyExist, findServiceIdByDatatoken, - getPricesByDt + getPricesByDt, + isValidFreContract, + getDidOpe } from '../utils.js' import { BaseEventProcessor } from './BaseProcessor.js' import FixedRateExchange from '@oceanprotocol/contracts/artifacts/contracts/pools/fixedRate/FixedRateExchange.sol/FixedRateExchange.json' assert { type: 'json' } @@ -22,6 +24,12 @@ export class ExchangeCreatedEventProcessor extends BaseEventProcessor { provider: JsonRpcApiProvider ): Promise { try { + if (!(await isValidFreContract(event.address, chainId, signer))) { + INDEXER_LOGGER.error( + `Fixed Rate Exhange contract ${event.address} is not approved by Router. Abort updating DDO pricing!` + ) + return null + } const decodedEventData = await this.getEventData( provider, event.transactionHash, @@ -37,17 +45,30 @@ export class ExchangeCreatedEventProcessor extends BaseEventProcessor { const exchange = await freContract.getExchange(exchangeId) const datatokenAddress = exchange[1] + if (datatokenAddress === ZeroAddress) { + INDEXER_LOGGER.error( + `Datatoken address is ZERO ADDRESS. Cannot find DDO by ZERO ADDRESS contract.` + ) + return null + } const datatokenContract = getDtContract(signer, datatokenAddress) const nftAddress = await datatokenContract.getERC721Address() - const did = getDid(nftAddress, chainId) + let did = getDidOpe(nftAddress, chainId) const { ddo: ddoDatabase } = await getDatabase() - const ddo = await ddoDatabase.retrieve(did) + let ddo = await ddoDatabase.retrieve(did) if (!ddo) { INDEXER_LOGGER.logMessage( `Detected ExchangeCreated changed for ${did}, but it does not exists.` ) - return + did = getDid(nftAddress, chainId) + ddo = await ddoDatabase.retrieve(did) + if (!ddo) { + INDEXER_LOGGER.logMessage( + `Detected ExchangeCreated changed for ${did}, but it does not exists` + ) + return + } } const ddoInstance = DDOManager.getDDOClass(ddo) @@ -84,7 +105,7 @@ export class ExchangeCreatedEventProcessor extends BaseEventProcessor { INDEXER_LOGGER.logMessage( `[ExchangeCreated] - This datatoken does not contain this service. Invalid service id!` ) - return + return null } const { stats } = ddoInstance.getAssetFields().indexedMetadata @@ -100,10 +121,7 @@ export class ExchangeCreatedEventProcessor extends BaseEventProcessor { ddoInstance.updateFields({ indexedMetadata: { stats } }) } - const savedDDO = await this.createOrUpdateDDO( - ddoInstance, - EVENTS.EXCHANGE_ACTIVATED - ) + const savedDDO = await this.createOrUpdateDDO(ddoInstance, EVENTS.EXCHANGE_CREATED) return savedDDO } catch (err) { INDEXER_LOGGER.log(LOG_LEVELS_STR.LEVEL_ERROR, `Error retrieving DDO: ${err}`, true) diff --git a/src/components/Indexer/processors/ExchangeDeactivatedEventProcessor.ts b/src/components/Indexer/processors/ExchangeDeactivatedEventProcessor.ts index 7b1200812..74d6ede30 100644 --- a/src/components/Indexer/processors/ExchangeDeactivatedEventProcessor.ts +++ b/src/components/Indexer/processors/ExchangeDeactivatedEventProcessor.ts @@ -1,5 +1,5 @@ import { DDOManager } from '@oceanprotocol/ddo-js' -import { ethers, Signer, JsonRpcApiProvider } from 'ethers' +import { ethers, Signer, JsonRpcApiProvider, ZeroAddress } from 'ethers' import { EVENTS } from '../../../utils/constants.js' import { getDatabase } from '../../../utils/database.js' import { INDEXER_LOGGER } from '../../../utils/logging/common.js' @@ -9,7 +9,9 @@ import { getDid, doesFreAlreadyExist, findServiceIdByDatatoken, - getPricesByDt + getPricesByDt, + isValidFreContract, + getDidOpe } from '../utils.js' import { BaseEventProcessor } from './BaseProcessor.js' import FixedRateExchange from '@oceanprotocol/contracts/artifacts/contracts/pools/fixedRate/FixedRateExchange.sol/FixedRateExchange.json' assert { type: 'json' } @@ -21,6 +23,12 @@ export class ExchangeDeactivatedEventProcessor extends BaseEventProcessor { signer: Signer, provider: JsonRpcApiProvider ): Promise { + if (!(await isValidFreContract(event.address, chainId, signer))) { + INDEXER_LOGGER.error( + `Fixed Rate Exhange contract ${event.address} is not approved by Router. Abort updating DDO pricing!` + ) + return null + } const decodedEventData = await this.getEventData( provider, event.transactionHash, @@ -29,19 +37,43 @@ export class ExchangeDeactivatedEventProcessor extends BaseEventProcessor { ) const exchangeId = decodedEventData.args[0].toString() const freContract = new ethers.Contract(event.address, FixedRateExchange.abi, signer) - const exchange = await freContract.getExchange(exchangeId) + let exchange + try { + exchange = await freContract.getExchange(exchangeId) + } catch (e) { + INDEXER_LOGGER.error(`Could not fetch exchange details: ${e.message}`) + } + if (!exchange) { + INDEXER_LOGGER.error( + `Exchange not found...Aborting processing exchange created event` + ) + return null + } const datatokenAddress = exchange[1] + if (datatokenAddress === ZeroAddress) { + INDEXER_LOGGER.error( + `Datatoken address is ZERO ADDRESS. Cannot find DDO by ZERO ADDRESS contract.` + ) + return null + } const datatokenContract = getDtContract(signer, datatokenAddress) const nftAddress = await datatokenContract.getERC721Address() - const did = getDid(nftAddress, chainId) + let did = getDidOpe(nftAddress, chainId) try { const { ddo: ddoDatabase } = await getDatabase() - const ddo = await ddoDatabase.retrieve(did) + let ddo = await ddoDatabase.retrieve(did) if (!ddo) { INDEXER_LOGGER.logMessage( `Detected ExchangeDeactivated changed for ${did}, but it does not exists.` ) - return + did = getDid(nftAddress, chainId) + ddo = await ddoDatabase.retrieve(did) + if (!ddo) { + INDEXER_LOGGER.logMessage( + `Detected ExchangeDeactivated changed for ${did}, but it does not exists` + ) + return + } } const ddoInstance = DDOManager.getDDOClass(ddo) @@ -81,7 +113,7 @@ export class ExchangeDeactivatedEventProcessor extends BaseEventProcessor { INDEXER_LOGGER.logMessage( `[ExchangeDeactivated] - This datatoken does not contain this service. Invalid service id!` ) - return + return null } const { stats } = ddoInstance.getAssetFields().indexedMetadata stats.push({ diff --git a/src/components/Indexer/processors/ExchangeRateChangedEventProcessor.ts b/src/components/Indexer/processors/ExchangeRateChangedEventProcessor.ts index 8c82d5ef8..ec1d70c15 100644 --- a/src/components/Indexer/processors/ExchangeRateChangedEventProcessor.ts +++ b/src/components/Indexer/processors/ExchangeRateChangedEventProcessor.ts @@ -1,5 +1,5 @@ import { DDOManager } from '@oceanprotocol/ddo-js' -import { ethers, Signer, JsonRpcApiProvider } from 'ethers' +import { ethers, Signer, JsonRpcApiProvider, ZeroAddress } from 'ethers' import { EVENTS } from '../../../utils/constants.js' import { getDatabase } from '../../../utils/database.js' import { INDEXER_LOGGER } from '../../../utils/logging/common.js' @@ -9,7 +9,9 @@ import { getDid, doesFreAlreadyExist, findServiceIdByDatatoken, - getPricesByDt + getPricesByDt, + isValidFreContract, + getDidOpe } from '../utils.js' import { BaseEventProcessor } from './BaseProcessor.js' import FixedRateExchange from '@oceanprotocol/contracts/artifacts/contracts/pools/fixedRate/FixedRateExchange.sol/FixedRateExchange.json' assert { type: 'json' } @@ -22,6 +24,12 @@ export class ExchangeRateChangedEventProcessor extends BaseEventProcessor { provider: JsonRpcApiProvider ): Promise { try { + if (!(await isValidFreContract(event.address, chainId, signer))) { + INDEXER_LOGGER.error( + `Fixed Rate Exhange contract ${event.address} is not approved by Router. Abort updating DDO pricing!` + ) + return null + } const decodedEventData = await this.getEventData( provider, event.transactionHash, @@ -37,17 +45,30 @@ export class ExchangeRateChangedEventProcessor extends BaseEventProcessor { ) const exchange = await freContract.getExchange(exchangeId) const datatokenAddress = exchange[1] + if (datatokenAddress === ZeroAddress) { + INDEXER_LOGGER.error( + `Datatoken address is ZERO ADDRESS. Cannot find DDO by ZERO ADDRESS contract.` + ) + return null + } const datatokenContract = getDtContract(signer, datatokenAddress) const nftAddress = await datatokenContract.getERC721Address() - const did = getDid(nftAddress, chainId) + let did = getDidOpe(nftAddress, chainId) const { ddo: ddoDatabase } = await getDatabase() - const ddo = await ddoDatabase.retrieve(did) + let ddo = await ddoDatabase.retrieve(did) if (!ddo) { INDEXER_LOGGER.logMessage( `Detected ExchangeRateChanged changed for ${did}, but it does not exists.` ) - return + did = getDid(nftAddress, chainId) + ddo = await ddoDatabase.retrieve(did) + if (!ddo) { + INDEXER_LOGGER.logMessage( + `Detected ExchangeRateChanged changed for ${did}, but it does not exists` + ) + return + } } const ddoInstance = DDOManager.getDDOClass(ddo) diff --git a/src/components/Indexer/processors/MetadataEventProcessor.ts b/src/components/Indexer/processors/MetadataEventProcessor.ts index 34115bcb4..e523e6dd2 100644 --- a/src/components/Indexer/processors/MetadataEventProcessor.ts +++ b/src/components/Indexer/processors/MetadataEventProcessor.ts @@ -11,15 +11,12 @@ import { checkCredentialOnAccessList } from '../../../utils/credentials.js' import { getDatabase } from '../../../utils/database.js' import { INDEXER_LOGGER } from '../../../utils/logging/common.js' import { LOG_LEVELS_STR } from '../../../utils/logging/Logger.js' -import { asyncCallWithTimeout, streamToString } from '../../../utils/util.js' +import { asyncCallWithTimeout } from '../../../utils/util.js' import { PolicyServer } from '../../policyServer/index.js' import { wasNFTDeployedByOurFactory, getPricingStatsForDddo } from '../utils.js' import { BaseEventProcessor } from './BaseProcessor.js' import ERC721Template from '@oceanprotocol/contracts/artifacts/contracts/templates/ERC721Template.sol/ERC721Template.json' assert { type: 'json' } import { Purgatory } from '../purgatory.js' -import { isRemoteDDO } from '../../core/utils/validateDdoHandler.js' -import { Storage } from '../../storage/index.js' -import { Readable } from 'stream' export class MetadataEventProcessor extends BaseEventProcessor { async processEvent( @@ -56,7 +53,7 @@ export class MetadataEventProcessor extends BaseEventProcessor { const metadataHash = decodedEventData.args[5] const flag = decodedEventData.args[3] const owner = decodedEventData.args[0] - const decryptedDDO = await this.decryptDDO( + const ddo = await this.decryptDDO( decodedEventData.args[2], flag, owner, @@ -66,26 +63,6 @@ export class MetadataEventProcessor extends BaseEventProcessor { metadataHash, metadata ) - let ddo = await this.processDDO(decryptedDDO) - if ( - !isRemoteDDO(decryptedDDO) && - parseInt(flag) !== 2 && - !this.checkDdoHash(ddo, metadataHash) - ) { - return - } - if (ddo.encryptedData) { - const proof = await this.decryptDDOIPFS( - decodedEventData.args[2], - owner, - ddo.encryptedData - ) - const data = this.getDataFromProof(proof) - const ddoInstance = DDOManager.getDDOClass(data.ddoObj) - ddo = ddoInstance.updateFields({ - proof: { signature: data.signature, header: data.header } - }) - } const clonedDdo = structuredClone(ddo) const updatedDdo = deleteIndexedMetadataIfExists(clonedDdo) const ddoInstance = DDOManager.getDDOClass(updatedDdo) @@ -93,10 +70,27 @@ export class MetadataEventProcessor extends BaseEventProcessor { INDEXER_LOGGER.error( `Decrypted DDO ID is not matching the generated hash for DID.` ) + await ddoState.update( + this.networkId, + did, + event.address, + event.transactionHash, + false, + 'Decrypted DDO ID does not match generated DID.' + ) return } // for unencrypted DDOs - if (parseInt(flag) !== 2 && !this.checkDdoHash(updatedDdo, metadataHash)) { + if ((parseInt(flag) & 2) === 0 && !this.checkDdoHash(updatedDdo, metadataHash)) { + INDEXER_LOGGER.error('Unencrypted DDO hash does not match metadata hash.') + await ddoState.update( + this.networkId, + did, + event.address, + event.transactionHash, + false, + 'Unencrypted DDO hash does not match metadata hash.' + ) return } @@ -112,6 +106,14 @@ export class MetadataEventProcessor extends BaseEventProcessor { INDEXER_LOGGER.error( `DDO owner ${owner} is NOT part of the ${ENVIRONMENT_VARIABLES.AUTHORIZED_PUBLISHERS.name} group.` ) + await ddoState.update( + this.networkId, + did, + event.address, + event.transactionHash, + false, + `DDO owner ${owner} is NOT part of the ${ENVIRONMENT_VARIABLES.AUTHORIZED_PUBLISHERS.name} group.` + ) return } } @@ -127,6 +129,14 @@ export class MetadataEventProcessor extends BaseEventProcessor { INDEXER_LOGGER.error( `DDO owner ${owner} is NOT part of the ${ENVIRONMENT_VARIABLES.AUTHORIZED_PUBLISHERS_LIST.name} access group.` ) + await ddoState.update( + this.networkId, + did, + event.address, + event.transactionHash, + false, + `DDO owner ${owner} is NOT part of the ${ENVIRONMENT_VARIABLES.AUTHORIZED_PUBLISHERS_LIST.name} access group.` + ) return } } @@ -152,14 +162,35 @@ export class MetadataEventProcessor extends BaseEventProcessor { if (previousDdo) { previousDdoInstance = DDOManager.getDDOClass(previousDdo) } + if (eventName === EVENTS.METADATA_CREATED) { if ( previousDdoInstance && previousDdoInstance.getAssetFields().indexedMetadata.nft.state === MetadataStates.ACTIVE ) { + const previousTxId = + previousDdoInstance.getAssetFields().indexedMetadata?.event?.txid + // If it's the same transaction being reprocessed, just skip (idempotent) + if (previousTxId === event.transactionHash) { + INDEXER_LOGGER.logMessage( + `DDO ${ddoInstance.getDid()} already indexed from same transaction ${ + event.transactionHash + }. Skipping reprocessing.`, + true + ) + await ddoState.update( + this.networkId, + did, + event.address, + event.transactionHash, + true, + ' ' + ) + return + } INDEXER_LOGGER.logMessage( - `DDO ${ddoInstance.getDid()} is already registered as active`, + `DDO ${ddoInstance.getDid()} is already registered as active from different transaction ${previousTxId}`, true ) await ddoState.update( @@ -168,7 +199,7 @@ export class MetadataEventProcessor extends BaseEventProcessor { event.address, event.transactionHash, false, - `DDO ${ddoInstance.getDid()} is already registered as active` + `DDO ${ddoInstance.getDid()} is already registered as active from transaction ${previousTxId}` ) return } @@ -212,6 +243,7 @@ export class MetadataEventProcessor extends BaseEventProcessor { } const from = decodedEventData.args[0].toString() let ddoUpdatedWithPricing + // we need to store the event data (either metadata created or update and is updatable) if ( [EVENTS.METADATA_CREATED, EVENTS.METADATA_UPDATED].includes(eventName) && @@ -284,12 +316,12 @@ export class MetadataEventProcessor extends BaseEventProcessor { // always call, but only create instance once const purgatory = await Purgatory.getInstance() // if purgatory is disabled just return false - const state = await this.getPurgatoryState(ddo, from, purgatory) - - ddoUpdatedWithPricing.updateFields({ - indexedMetadata: { purgatory: { state } } - }) - if (state === false) { + const updatedDDO = await this.updatePurgatoryStateDdo( + ddoUpdatedWithPricing, + from, + purgatory + ) + if (updatedDDO.getAssetFields().indexedMetadata.purgatory.state === false) { // TODO: insert in a different collection for purgatory DDOs const saveDDO = await this.createOrUpdateDDO(ddoUpdatedWithPricing, eventName) INDEXER_LOGGER.logMessage(`saved DDO: ${JSON.stringify(saveDDO)}`) @@ -307,51 +339,41 @@ export class MetadataEventProcessor extends BaseEventProcessor { ) INDEXER_LOGGER.log( LOG_LEVELS_STR.LEVEL_ERROR, - `Error processMetadataEvents: ${error}`, + `Error processMetadataEvents for did: ${did} and txHash: ${event.transactionHash} and error: ${error}`, true ) } } - async getPurgatoryState( - ddo: any, - owner: string, - purgatory: Purgatory - ): Promise { - if (purgatory.isEnabled()) { - const state: boolean = - (await purgatory.isBannedAsset(ddo.id)) || - (await purgatory.isBannedAccount(owner)) - return state - } - return false - } - async updatePurgatoryStateDdo( ddo: VersionedDDO, owner: string, purgatory: Purgatory - ): Promise> { + ): Promise { if (!purgatory.isEnabled()) { - return ddo.updateFields({ + ddo.updateFields({ indexedMetadata: { purgatory: { state: false } } }) + + return ddo } const state: boolean = (await purgatory.isBannedAsset(ddo.getDid())) || (await purgatory.isBannedAccount(owner)) - return ddo.updateFields({ + ddo.updateFields({ indexedMetadata: { purgatory: { state } } }) + + return ddo } isUpdateable( @@ -377,18 +399,4 @@ export class MetadataEventProcessor extends BaseEventProcessor { return [true, ''] } - - async processDDO(ddo: any) { - if (isRemoteDDO(ddo)) { - INDEXER_LOGGER.logMessage('DDO is remote', true) - - const storage = Storage.getStorageClass(ddo.remote, await getConfiguration()) - const result = await storage.getReadableStream() - const streamToStringDDO = await streamToString(result.stream as Readable) - - return JSON.parse(streamToStringDDO) - } - - return ddo - } } diff --git a/src/components/Indexer/processors/OrderReusedEventProcessor.ts b/src/components/Indexer/processors/OrderReusedEventProcessor.ts index c47c4c00a..0fe715ced 100644 --- a/src/components/Indexer/processors/OrderReusedEventProcessor.ts +++ b/src/components/Indexer/processors/OrderReusedEventProcessor.ts @@ -8,7 +8,8 @@ import { getDtContract, getDid, findServiceIdByDatatoken, - getPricesByDt + getPricesByDt, + getDidOpe } from '../utils.js' import { BaseEventProcessor } from './BaseProcessor.js' import ERC20Template from '@oceanprotocol/contracts/artifacts/contracts/templates/ERC20TemplateEnterprise.sol/ERC20TemplateEnterprise.json' assert { type: 'json' } @@ -34,15 +35,22 @@ export class OrderReusedEventProcessor extends BaseEventProcessor { const datatokenContract = getDtContract(signer, event.address) const nftAddress = await datatokenContract.getERC721Address() - const did = getDid(nftAddress, chainId) + let did = getDidOpe(nftAddress, chainId) try { const { ddo: ddoDatabase, order: orderDatabase } = await getDatabase() - const ddo = await ddoDatabase.retrieve(did) + let ddo = await ddoDatabase.retrieve(did) if (!ddo) { INDEXER_LOGGER.logMessage( `Detected OrderReused changed for ${did}, but it does not exists.` ) - return + did = getDid(nftAddress, chainId) + ddo = await ddoDatabase.retrieve(did) + if (!ddo) { + INDEXER_LOGGER.logMessage( + `Detected OrderReused changed for ${did}, but it does not exists` + ) + return + } } const ddoInstance = DDOManager.getDDOClass(ddo) if (!ddoInstance.getAssetFields().indexedMetadata) { diff --git a/src/components/Indexer/processors/OrderStartedEventProcessor.ts b/src/components/Indexer/processors/OrderStartedEventProcessor.ts index 73f9ade9e..0d2263650 100644 --- a/src/components/Indexer/processors/OrderStartedEventProcessor.ts +++ b/src/components/Indexer/processors/OrderStartedEventProcessor.ts @@ -4,7 +4,7 @@ import { EVENTS } from '../../../utils/constants.js' import { getDatabase } from '../../../utils/database.js' import { INDEXER_LOGGER } from '../../../utils/logging/common.js' import { LOG_LEVELS_STR } from '../../../utils/logging/Logger.js' -import { getDtContract, getDid, getPricesByDt } from '../utils.js' +import { getDtContract, getDid, getPricesByDt, getDidOpe } from '../utils.js' import { BaseEventProcessor } from './BaseProcessor.js' import ERC20Template from '@oceanprotocol/contracts/artifacts/contracts/templates/ERC20TemplateEnterprise.sol/ERC20TemplateEnterprise.json' assert { type: 'json' } @@ -32,38 +32,47 @@ export class OrderStartedEventProcessor extends BaseEventProcessor { const datatokenContract = getDtContract(signer, event.address) const nftAddress = await datatokenContract.getERC721Address() - const did = getDid(nftAddress, chainId) + let did = getDidOpe(nftAddress, chainId) try { const { ddo: ddoDatabase, order: orderDatabase } = await getDatabase() - const ddo = await this.getDDO(ddoDatabase, nftAddress, chainId) + let ddo = await ddoDatabase.retrieve(did) if (!ddo) { INDEXER_LOGGER.logMessage( - `Detected OrderStarted changed for ${did}, but it does not exists.` + `Detected OrderStarted changed for ${did}, but it does not exists. try with op` ) - return + did = getDid(nftAddress, chainId) + ddo = await ddoDatabase.retrieve(did) + if (!ddo) { + INDEXER_LOGGER.logMessage( + `Detected OrderStarted changed for ${did}, but it does not exists` + ) + return + } } const ddoInstance = DDOManager.getDDOClass(ddo) - if (!ddoInstance.getDDOData().indexedMetadata) { + if (!ddoInstance.getAssetFields().indexedMetadata) { ddoInstance.updateFields({ indexedMetadata: {} }) } - if (!Array.isArray(ddoInstance.getDDOData().indexedMetadata.stats)) { + + if (!Array.isArray(ddoInstance.getAssetFields().indexedMetadata.stats)) { ddoInstance.updateFields({ indexedMetadata: { stats: [] } }) } + if ( - ddoInstance.getDDOData().indexedMetadata.stats.length !== 0 && + ddoInstance.getAssetFields().indexedMetadata.stats.length !== 0 && ddoInstance .getDDOFields() .services[serviceIndex].datatokenAddress?.toLowerCase() === event.address?.toLowerCase() ) { - for (const stat of ddoInstance.getDDOData().indexedMetadata.stats) { + for (const stat of ddoInstance.getAssetFields().indexedMetadata.stats) { if (stat.datatokenAddress.toLowerCase() === event.address?.toLowerCase()) { stat.orders += 1 break } } - } else if (ddoInstance.getDDOData().indexedMetadata.stats.length === 0) { - const existingStats = ddoInstance.getDDOData().indexedMetadata.stats + } else if (ddoInstance.getAssetFields().indexedMetadata.stats.length === 0) { + const existingStats = ddoInstance.getAssetFields().indexedMetadata.stats existingStats.push({ datatokenAddress: event.address, name: await datatokenContract.name(), diff --git a/src/components/Indexer/utils.ts b/src/components/Indexer/utils.ts index 4a1fbb6c0..e4535c53f 100644 --- a/src/components/Indexer/utils.ts +++ b/src/components/Indexer/utils.ts @@ -1,11 +1,7 @@ import { JsonRpcApiProvider, Signer, ethers, getAddress } from 'ethers' import ERC721Factory from '@oceanprotocol/contracts/artifacts/contracts/ERC721Factory.sol/ERC721Factory.json' assert { type: 'json' } import ERC721Template from '@oceanprotocol/contracts/artifacts/contracts/templates/ERC721Template.sol/ERC721Template.json' assert { type: 'json' } -import { - ENVIRONMENT_VARIABLES, - EVENT_HASHES, - existsEnvironmentVariable -} from '../../utils/index.js' +import { EVENT_HASHES, isDefined } from '../../utils/index.js' import { NetworkEvent } from '../../@types/blockchain.js' import { INDEXER_LOGGER } from '../../utils/logging/common.js' import ERC20Template from '@oceanprotocol/contracts/artifacts/contracts/templates/ERC20TemplateEnterprise.sol/ERC20TemplateEnterprise.json' assert { type: 'json' } @@ -17,6 +13,7 @@ import FixedRateExchange from '@oceanprotocol/contracts/artifacts/contracts/pool import { createHash } from 'crypto' import { ServicePrice } from '../../@types/IndexedMetadata.js' import { VersionedDDO } from '@oceanprotocol/ddo-js' +import FactoryRouter from '@oceanprotocol/contracts/artifacts/contracts/pools/FactoryRouter.sol/FactoryRouter.json' assert { type: 'json' } export const getContractAddress = (chainId: number, contractName: string): string => { const addressFile = getOceanArtifactsAdressesByChainId(chainId) @@ -26,6 +23,34 @@ export const getContractAddress = (chainId: number, contractName: string): strin return '' } +export const isValidFreContract = async ( + address: string, + chainId: number, + signer: Signer +) => { + const router = getContractAddress(chainId, 'Router') + const routerContract = new ethers.Contract(router, FactoryRouter.abi, signer) + try { + return await routerContract.isFixedRateContract(address) + } catch (e) { + INDEXER_LOGGER.error(`Could not fetch FRE contract status: ${e.message}`) + } +} + +export const isValidDispenserContract = async ( + address: string, + chainId: number, + signer: Signer +) => { + const router = getContractAddress(chainId, 'Router') + const routerContract = new ethers.Contract(router, FactoryRouter.abi, signer) + try { + return await routerContract.isDispenserContract(address) + } catch (e) { + INDEXER_LOGGER.error(`Could not fetch dispenser contract status: ${e.message}`) + } +} + export const getDeployedContractBlock = (network: number) => { let deployedBlock: number const addressFile = getOceanArtifactsAdressesByChainId(network) @@ -144,7 +169,7 @@ export async function wasNFTDeployedByOurFactory( // default in seconds const DEFAULT_INDEXER_CRAWLING_INTERVAL = 1000 * 30 // 30 seconds export const getCrawlingInterval = (): number => { - if (existsEnvironmentVariable(ENVIRONMENT_VARIABLES.INDEXER_INTERVAL)) { + if (isDefined(process.env.INDEXER_INTERVAL)) { const number: any = process.env.INDEXER_INTERVAL if (!isNaN(number) && number > 0) { return number @@ -404,3 +429,34 @@ export function getDid(nftAddress: string, chainId: number): string { .digest('hex') ) } +export function getDidOpe(nftAddress: string, chainId: number): string { + return ( + 'did:ope:' + + createHash('sha256') + .update(getAddress(nftAddress) + chainId.toString(10)) + .digest('hex') + ) +} +export async function withRetrial( + fn: () => Promise, + maxRetries: number = 5, + delay: number = 2000 +): Promise { + let lastError: Error + + for (let attempt = 0; attempt < maxRetries; attempt++) { + try { + return await fn() + } catch (error) { + lastError = error + + if (attempt === maxRetries - 1) { + throw lastError + } + + await new Promise((resolve) => setTimeout(resolve, delay)) + } + } + + throw lastError +} diff --git a/src/components/c2d/compute_engine_base.ts b/src/components/c2d/compute_engine_base.ts index c50bfcead..63f64034b 100644 --- a/src/components/c2d/compute_engine_base.ts +++ b/src/components/c2d/compute_engine_base.ts @@ -14,7 +14,8 @@ import type { DBComputeJobPayment, DBComputeJob, dockerDeviceRequest, - DBComputeJobMetadata + DBComputeJobMetadata, + ComputeEnvFees } from '../../@types/C2D/C2D.js' import { C2DClusterType } from '../../@types/C2D/C2D.js' import { C2DDatabase } from '../database/C2DDatabase.js' @@ -310,16 +311,33 @@ export abstract class C2DEngine { public getDockerDeviceRequest( requests: ComputeResourceRequest[], resources: ComputeResource[] - ) { + ): dockerDeviceRequest[] | null { if (!resources) return null - const ret: dockerDeviceRequest[] = [] + + const grouped: Record = {} + for (const resource of requests) { const res = this.getResource(resources, resource.id) - if (res.init && res.init.deviceRequests) { - ret.push(res.init.deviceRequests) + const init = res?.init?.deviceRequests + if (!init) continue + + const key = `${init.Driver}-${JSON.stringify(init.Capabilities)}` + if (!grouped[key]) { + grouped[key] = { + Driver: init.Driver, + Capabilities: init.Capabilities, + DeviceIDs: [], + Options: init.Options ?? null, + Count: undefined + } + } + + if (init.DeviceIDs?.length) { + grouped[key].DeviceIDs!.push(...init.DeviceIDs) } } - return ret + + return Object.values(grouped) } public getDockerAdvancedConfig( @@ -427,11 +445,15 @@ export abstract class C2DEngine { public getTotalCostOfJob( resources: ComputeResourceRequestWithPrice[], - duration: number + duration: number, + fee: ComputeEnvFees ) { let cost: number = 0 for (const request of resources) { - if (request.price) cost += request.price * request.amount * Math.ceil(duration / 60) + const price = fee.prices.find((p) => p.id === request.id)?.price + if (price) { + cost += price * request.amount * Math.ceil(duration / 60) + } } return cost } diff --git a/src/components/c2d/compute_engine_docker.ts b/src/components/c2d/compute_engine_docker.ts index 693e3c73c..17b04f64f 100644 --- a/src/components/c2d/compute_engine_docker.ts +++ b/src/components/c2d/compute_engine_docker.ts @@ -169,8 +169,14 @@ export class C2DEngineDocker extends C2DEngine { architecture: sysinfo.Architecture, os: sysinfo.OSType }, + access: { + addresses: [], + accessLists: [] + }, fees }) + if (`access` in envConfig) this.envs[0].access = envConfig.access + if (`storageExpiry` in envConfig) this.envs[0].storageExpiry = envConfig.storageExpiry if (`maxJobDuration` in envConfig) this.envs[0].maxJobDuration = envConfig.maxJobDuration @@ -235,7 +241,13 @@ export class C2DEngineDocker extends C2DEngine { */ // limits for free env if ('free' in envConfig) { - this.envs[0].free = {} + this.envs[0].free = { + access: { + addresses: [], + accessLists: [] + } + } + if (`access` in envConfig.free) this.envs[0].free.access = envConfig.free.access if (`storageExpiry` in envConfig.free) this.envs[0].free.storageExpiry = envConfig.free.storageExpiry if (`maxJobDuration` in envConfig.free) @@ -428,7 +440,8 @@ export class C2DEngineDocker extends C2DEngine { payment, metadata, additionalViewers, - terminationDetails: { exitCode: null, OOMKilled: null } + terminationDetails: { exitCode: null, OOMKilled: null }, + algoDuration: 0 } if (algorithm.meta.container && algorithm.meta.container.dockerfile) { @@ -859,9 +872,8 @@ export class C2DEngineDocker extends C2DEngine { } const cpus = this.getResourceRequest(job.resources, 'cpu') if (cpus && cpus > 0) { - const systemInfo = this.docker ? await this.docker.info() : null hostConfig.CpuPeriod = 100000 // 100 miliseconds is usually the default - hostConfig.CpuQuota = Math.floor((cpus / systemInfo.NCPU) * hostConfig.CpuPeriod) + hostConfig.CpuQuota = Math.floor(cpus * hostConfig.CpuPeriod) } const containerInfo: ContainerCreateOptions = { name: job.jobId + '-algoritm', @@ -1116,6 +1128,11 @@ export class C2DEngineDocker extends C2DEngine { this.jobImageSizes.delete(job.jobId) // payments + const algoDuration = + parseFloat(job.algoStopTimestamp) - parseFloat(job.algoStartTimestamp) + + job.algoDuration = algoDuration + await this.db.updateJob(job) if (!job.isFree && job.payment) { let txId = null const env = await this.getComputeEnvironment(job.payment.chainId, job.environment) @@ -1123,13 +1140,16 @@ export class C2DEngineDocker extends C2DEngine { if (env && `minJobDuration` in env && env.minJobDuration) { minDuration = env.minJobDuration } - const algoRunnedTime = - parseFloat(job.algoStopTimestamp) - parseFloat(job.algoStartTimestamp) - if (algoRunnedTime < 0) minDuration += algoRunnedTime * -1 - else minDuration += algoRunnedTime + + if (algoDuration < 0) minDuration += algoDuration * -1 + else minDuration += algoDuration + let cost = 0 if (minDuration > 0) { // we need to claim - const cost = this.getTotalCostOfJob(job.resources, minDuration) + const fee = env.fees[job.payment.chainId].find( + (fee) => fee.feeToken === job.payment.token + ) + cost = this.getTotalCostOfJob(job.resources, minDuration, fee) const proof = JSON.stringify(omitDBComputeFieldsFromComputeJob(job)) try { txId = await this.escrow.claimLock( @@ -1158,6 +1178,7 @@ export class C2DEngineDocker extends C2DEngine { } if (txId) { job.payment.claimTx = txId + job.payment.cost = cost await this.db.updateJob(job) } } diff --git a/src/components/core/admin/fetchConfigHandler.ts b/src/components/core/admin/fetchConfigHandler.ts new file mode 100644 index 000000000..7f1f005de --- /dev/null +++ b/src/components/core/admin/fetchConfigHandler.ts @@ -0,0 +1,43 @@ +import { AdminCommandHandler } from './adminHandler.js' +import { AdminFetchConfigCommand } from '../../../@types/commands.js' +import { P2PCommandResponse } from '../../../@types/OceanNode.js' +import { + ValidateParams, + buildInvalidParametersResponse +} from '../../httpRoutes/validateCommands.js' +import { ReadableString } from '../../P2P/handleProtocolCommands.js' +import { loadConfigFromFile } from '../../../utils/config/index.js' + +export class FetchConfigHandler extends AdminCommandHandler { + async validate(command: AdminFetchConfigCommand): Promise { + return await super.validate(command) + } + + async handle(task: AdminFetchConfigCommand): Promise { + const validation = await this.validate(task) + if (!validation.valid) { + return new Promise((resolve) => { + resolve(buildInvalidParametersResponse(validation)) + }) + } + + try { + const config = loadConfigFromFile() + config.keys.privateKey = '[*** HIDDEN CONTENT ***]' + + return new Promise((resolve) => { + resolve({ + status: { httpStatus: 200 }, + stream: new ReadableString(JSON.stringify(config)) + }) + }) + } catch (error) { + return new Promise((resolve) => { + resolve({ + status: { httpStatus: 500, error: `Error fetching config: ${error.message}` }, + stream: null + }) + }) + } + } +} diff --git a/src/components/core/admin/pushConfigHandler.ts b/src/components/core/admin/pushConfigHandler.ts new file mode 100644 index 000000000..14d0ecb6d --- /dev/null +++ b/src/components/core/admin/pushConfigHandler.ts @@ -0,0 +1,88 @@ +import { AdminCommandHandler } from './adminHandler.js' +import { AdminPushConfigCommand } from '../../../@types/commands.js' +import { P2PCommandResponse } from '../../../@types/OceanNode.js' +import { + ValidateParams, + buildInvalidParametersResponse, + buildInvalidRequestMessage +} from '../../httpRoutes/validateCommands.js' +import { CORE_LOGGER } from '../../../utils/logging/common.js' +import { ReadableString } from '../../P2P/handleProtocolCommands.js' +import { getConfiguration, getConfigFilePath } from '../../../utils/config/index.js' +import { OceanNodeConfigSchema } from '../../../utils/config/schemas.js' +import fs from 'fs' + +export class PushConfigHandler extends AdminCommandHandler { + async validate(command: AdminPushConfigCommand): Promise { + const baseValidation = await super.validate(command) + if (!baseValidation.valid) { + return baseValidation + } + + if (!command.config || typeof command.config !== 'object') { + return buildInvalidRequestMessage('Config must be a valid object') + } + + // Pre-validate the config fields using Zod schema + try { + const currentConfig = await getConfiguration() + const mergedConfig = { ...currentConfig, ...command.config } + + OceanNodeConfigSchema.parse(mergedConfig) + } catch (error) { + if (error.name === 'ZodError') { + const issues = error.issues + .map((issue: any) => `${issue.path.join('.')}: ${issue.message}`) + .join(', ') + return buildInvalidRequestMessage(`Config validation failed: ${issues}`) + } + return buildInvalidRequestMessage(`Config validation error: ${error.message}`) + } + + return { valid: true } + } + + async handle(task: AdminPushConfigCommand): Promise { + const validation = await this.validate(task) + if (!validation.valid) { + return new Promise((resolve) => { + resolve(buildInvalidParametersResponse(validation)) + }) + } + + try { + const configPath = getConfigFilePath() + const configContent = await fs.promises.readFile(configPath, 'utf-8') + const currentConfig = JSON.parse(configContent) + + const mergedConfig = { ...currentConfig, ...task.config } + await this.saveConfigToFile(mergedConfig) + + const newConfig = await getConfiguration(true, false) + newConfig.keys.privateKey = '[*** HIDDEN CONTENT ***]' + CORE_LOGGER.logMessage('Configuration reloaded successfully') + + return new Promise((resolve) => { + resolve({ + status: { httpStatus: 200 }, + stream: new ReadableString(JSON.stringify(newConfig)) + }) + }) + } catch (error) { + CORE_LOGGER.error(`Error pushing config: ${error.message}`) + return new Promise((resolve) => { + resolve({ + status: { httpStatus: 500, error: `Error pushing config: ${error.message}` }, + stream: null + }) + }) + } + } + + private async saveConfigToFile(config: Record): Promise { + const configPath = getConfigFilePath() + const content = JSON.stringify(config, null, 4) + await fs.promises.writeFile(configPath, content, 'utf-8') + CORE_LOGGER.logMessage(`Config saved to: ${configPath}`) + } +} diff --git a/src/components/core/compute/initialize.ts b/src/components/core/compute/initialize.ts index e6274cb30..855f62611 100644 --- a/src/components/core/compute/initialize.ts +++ b/src/components/core/compute/initialize.ts @@ -74,6 +74,7 @@ export class ComputeInitializeHandler extends CommandHandler { let resourcesNeeded try { const node = this.getOceanNode() + const config = await getConfiguration() try { // split compute env (which is already in hash-envId format) and get the hash // then get env which might contain dashes as well @@ -93,7 +94,8 @@ export class ComputeInitializeHandler extends CommandHandler { const algoChecksums = await getAlgoChecksums( task.algorithm.documentId, task.algorithm.serviceId, - node + node, + config ) const isRawCodeAlgorithm = task.algorithm.meta?.rawcode @@ -257,7 +259,7 @@ export class ComputeInitializeHandler extends CommandHandler { httpStatus: 400, error: `Algorithm ${ task.algorithm.documentId - } with serviceId ${task.algorithm.serviceId} not allowed to run on the dataset: ${ddoInstance.getDid()} with serviceId: ${task.datasets[safeIndex].serviceId}` + } not allowed to run on the dataset: ${ddoInstance.getDid()}` } } } diff --git a/src/components/core/compute/startCompute.ts b/src/components/core/compute/startCompute.ts index 53911c24f..fa352bb32 100644 --- a/src/components/core/compute/startCompute.ts +++ b/src/components/core/compute/startCompute.ts @@ -21,7 +21,10 @@ import { isERC20Template4Active } from '../../../utils/asset.js' import { EncryptMethod } from '../../../@types/fileObject.js' -import { ComputeResourceRequestWithPrice } from '../../../@types/C2D/C2D.js' +import { + ComputeAccessList, + ComputeResourceRequestWithPrice +} from '../../../@types/C2D/C2D.js' import { decrypt } from '../../../utils/crypt.js' // import { verifyProviderFees } from '../utils/feesHandler.js' import { Blockchain } from '../../../utils/blockchain.js' @@ -34,8 +37,11 @@ import { isOrderingAllowedForAsset } from '../handler/downloadHandler.js' import { Credentials, DDOManager } from '@oceanprotocol/ddo-js' import { getNonceAsNumber } from '../utils/nonceHandler.js' import { PolicyServer } from '../../policyServer/index.js' -import { areKnownCredentialTypes, checkCredentials } from '../../../utils/credentials.js' - +import { + areKnownCredentialTypes, + checkCredentials, + findAccessListCredentials +} from '../../../utils/credentials.js' export class PaidComputeStartHandler extends CommandHandler { validate(command: PaidComputeStartCommand): ValidateParams { const commandValidation = validateCommandParameters(command, [ @@ -125,11 +131,24 @@ export class PaidComputeStartHandler extends CommandHandler { } } const { algorithm } = task + const config = await getConfiguration() + + const accessGranted = await validateAccess(task.consumerAddress, env.access) + if (!accessGranted) { + return { + stream: null, + status: { + httpStatus: 403, + error: 'Access denied' + } + } + } const algoChecksums = await getAlgoChecksums( task.algorithm.documentId, task.algorithm.serviceId, - node + node, + config ) const isRawCodeAlgorithm = task.algorithm.meta?.rawcode @@ -484,7 +503,8 @@ export class PaidComputeStartHandler extends CommandHandler { chainId: task.payment.chainId, token: task.payment.token, lockTx: agreementId, - claimTx: null + claimTx: null, + cost: 0 }, jobId, task.metadata, @@ -707,6 +727,17 @@ export class FreeComputeStartHandler extends CommandHandler { } } + const accessGranted = await validateAccess(task.consumerAddress, env.free.access) + if (!accessGranted) { + return { + stream: null, + status: { + httpStatus: 403, + error: 'Access denied' + } + } + } + task.resources = await engine.checkAndFillMissingResources( task.resources, env, @@ -783,3 +814,51 @@ export class FreeComputeStartHandler extends CommandHandler { } } } + +async function validateAccess( + consumerAddress: string, + access: ComputeAccessList | undefined +): Promise { + if (!access) { + return true + } + + if (access.accessLists.length === 0 && access.addresses.length === 0) { + return true + } + + if (access.addresses.includes(consumerAddress)) { + return true + } + + if (access.accessLists.length > 0) { + const config = await getConfiguration() + const { supportedNetworks } = config + + for (const accessListAddress of access.accessLists) { + for (const chainIdStr of Object.keys(supportedNetworks)) { + const { rpc, network, chainId, fallbackRPCs } = supportedNetworks[chainIdStr] + try { + const blockchain = new Blockchain(rpc, network, chainId, fallbackRPCs) + const signer = blockchain.getSigner() + + const hasAccess = await findAccessListCredentials( + signer, + accessListAddress, + consumerAddress + ) + if (hasAccess) { + return true + } + } catch (error) { + CORE_LOGGER.logMessage( + `Failed to check access list ${accessListAddress} on chain ${chainIdStr}: ${error.message}`, + true + ) + } + } + } + } + + return false +} diff --git a/src/components/core/compute/utils.ts b/src/components/core/compute/utils.ts index 6966a3e3c..fe92e3737 100644 --- a/src/components/core/compute/utils.ts +++ b/src/components/core/compute/utils.ts @@ -1,5 +1,6 @@ import { OceanNode } from '../../../OceanNode.js' import { AlgoChecksums } from '../../../@types/C2D/C2D.js' +import { OceanNodeConfig } from '../../../@types/OceanNode.js' import { ArweaveFileObject, IpfsFileObject, @@ -27,7 +28,8 @@ export function generateUniqueID(jobStructure: any): string { export async function getAlgoChecksums( algoDID: string, algoServiceId: string, - oceanNode: OceanNode + oceanNode: OceanNode, + config: OceanNodeConfig ): Promise { const checksums: AlgoChecksums = { files: '', @@ -46,12 +48,9 @@ export async function getAlgoChecksums( file.type === 'url' ? (file as UrlFileObject).url : file.type === 'arweave' - ? urlJoin( - process.env.ARWEAVE_GATEWAY, - (file as ArweaveFileObject).transactionId - ) + ? urlJoin(config.arweaveGateway, (file as ArweaveFileObject).transactionId) : file.type === 'ipfs' - ? urlJoin(process.env.IPFS_GATEWAY, (file as IpfsFileObject).hash) + ? urlJoin(config.ipfsGateway, (file as IpfsFileObject).hash) : null const { contentChecksum } = await fetchFileMetadata(url, 'get', false) @@ -120,14 +119,9 @@ export async function validateAlgoForDataset( if ('serviceId' in algo) { const serviceIdMatch = algo.serviceId === '*' || algo.serviceId === algoChecksums.serviceId - CORE_LOGGER.info( - `didMatch: ${didMatch}, filesMatch: ${filesMatch}, containerMatch: ${containerMatch}, serviceIdMatch: ${serviceIdMatch}` - ) return didMatch && filesMatch && containerMatch && serviceIdMatch } - CORE_LOGGER.info( - `didMatch: ${didMatch}, filesMatch: ${filesMatch}, containerMatch: ${containerMatch}` - ) + return didMatch && filesMatch && containerMatch }) diff --git a/src/components/core/handler/coreHandlersRegistry.ts b/src/components/core/handler/coreHandlersRegistry.ts index 9f76a6b60..f84357160 100644 --- a/src/components/core/handler/coreHandlersRegistry.ts +++ b/src/components/core/handler/coreHandlersRegistry.ts @@ -35,6 +35,8 @@ import { ReindexTxHandler } from '../admin/reindexTxHandler.js' import { ReindexChainHandler } from '../admin/reindexChainHandler.js' import { IndexingThreadHandler } from '../admin/IndexingThreadHandler.js' import { CollectFeesHandler } from '../admin/collectFeesHandler.js' +import { FetchConfigHandler } from '../admin/fetchConfigHandler.js' +import { PushConfigHandler } from '../admin/pushConfigHandler.js' import { AdminCommandHandler } from '../admin/adminHandler.js' import { GetP2PPeerHandler, @@ -43,6 +45,7 @@ import { FindPeerHandler } from './p2p.js' import { CreateAuthTokenHandler, InvalidateAuthTokenHandler } from './authHandler.js' +import { GetJobsHandler } from './getJobs.js' export type HandlerRegistry = { handlerName: string // name of the handler @@ -159,6 +162,9 @@ export class CoreHandlersRegistry { PROTOCOL_COMMANDS.INVALIDATE_AUTH_TOKEN, new InvalidateAuthTokenHandler(node) ) + this.registerCoreHandler(PROTOCOL_COMMANDS.FETCH_CONFIG, new FetchConfigHandler(node)) + this.registerCoreHandler(PROTOCOL_COMMANDS.PUSH_CONFIG, new PushConfigHandler(node)) + this.registerCoreHandler(PROTOCOL_COMMANDS.JOBS, new GetJobsHandler(node)) } public static getInstance( diff --git a/src/components/core/handler/ddoHandler.ts b/src/components/core/handler/ddoHandler.ts index 3c2117bc4..2e88a258c 100644 --- a/src/components/core/handler/ddoHandler.ts +++ b/src/components/core/handler/ddoHandler.ts @@ -309,7 +309,7 @@ export class DecryptDdoHandler extends CommandHandler { let decryptedDocument: Buffer // check if DDO is ECIES encrypted - if (flags & 2) { + if ((flags & 2) !== 0) { try { decryptedDocument = await decrypt(encryptedDocument, EncryptMethod.ECIES) } catch (error) { @@ -321,8 +321,7 @@ export class DecryptDdoHandler extends CommandHandler { } } } - } - if (flags & 1) { + } else { try { decryptedDocument = lzmajs.decompressFile(decryptedDocument) /* @@ -380,15 +379,26 @@ export class DecryptDdoHandler extends CommandHandler { // check signature try { + const useTxIdOrContractAddress = transactionId || dataNftAddress + const message = String( - transactionId + dataNftAddress + decrypterAddress + chainId + nonce + useTxIdOrContractAddress + decrypterAddress + chainId + nonce ) const messageHash = ethers.solidityPackedKeccak256( ['bytes'], [ethers.hexlify(ethers.toUtf8Bytes(message))] ) - const addressFromSignature = ethers.verifyMessage(messageHash, task.signature) - if (addressFromSignature?.toLowerCase() !== decrypterAddress?.toLowerCase()) { + const messageHashBytes = ethers.getBytes(messageHash) + const addressFromHashSignature = ethers.verifyMessage(messageHash, task.signature) + const addressFromBytesSignature = ethers.verifyMessage( + messageHashBytes, + task.signature + ) + + if ( + addressFromHashSignature?.toLowerCase() !== decrypterAddress?.toLowerCase() && + addressFromBytesSignature?.toLowerCase() !== decrypterAddress?.toLowerCase() + ) { throw new Error('address does not match') } } catch (error) { @@ -855,9 +865,14 @@ export function validateDdoSignedByPublisher( ['bytes'], [ethers.hexlify(ethers.toUtf8Bytes(message))] ) - const messageHashBytes = ethers.toBeArray(messageHash) - const recoveredAddress = ethers.verifyMessage(messageHashBytes, signature) - return recoveredAddress === publisherAddress + const messageHashBytes = ethers.getBytes(messageHash) + // Try both verification methods for backward compatibility + const addressFromHashSignature = ethers.verifyMessage(messageHash, signature) + const addressFromBytesSignature = ethers.verifyMessage(messageHashBytes, signature) + return ( + addressFromHashSignature?.toLowerCase() === publisherAddress?.toLowerCase() || + addressFromBytesSignature?.toLowerCase() === publisherAddress?.toLowerCase() + ) } catch (error) { CORE_LOGGER.logMessage(`Error: ${error}`, true) return false diff --git a/src/components/core/handler/downloadHandler.ts b/src/components/core/handler/downloadHandler.ts index d17b366dd..2c8846448 100644 --- a/src/components/core/handler/downloadHandler.ts +++ b/src/components/core/handler/downloadHandler.ts @@ -1,9 +1,5 @@ import { CommandHandler } from './handler.js' -import { - ENVIRONMENT_VARIABLES, - MetadataStates, - PROTOCOL_COMMANDS -} from '../../../utils/constants.js' +import { MetadataStates, PROTOCOL_COMMANDS } from '../../../utils/constants.js' import { P2PCommandResponse } from '../../../@types/OceanNode.js' import { verifyProviderFees } from '../utils/feesHandler.js' import { decrypt } from '../../../utils/crypt.js' @@ -19,10 +15,9 @@ import { isDataTokenTemplate4, isERC20Template4Active } from '../../../utils/asset.js' -import { ArweaveStorage, IpfsStorage, Storage } from '../../storage/index.js' +import { Storage } from '../../storage/index.js' import { Blockchain, - existsEnvironmentVariable, getConfiguration, isPolicyServerConfigured } from '../../../utils/index.js' @@ -75,29 +70,12 @@ export async function handleDownloadUrlCommand( try { // Determine the type of storage and get a readable stream const storage = Storage.getStorageClass(task.fileObject, config) - if ( - storage instanceof ArweaveStorage && - !existsEnvironmentVariable(ENVIRONMENT_VARIABLES.ARWEAVE_GATEWAY) - ) { - CORE_LOGGER.logMessageWithEmoji( - 'Failure executing downloadURL task: Oean-node does not support arweave storage type files! ', - true, - GENERIC_EMOJIS.EMOJI_CROSS_MARK, - LOG_LEVELS_STR.LEVEL_ERROR - ) - return { - stream: null, - status: { - httpStatus: 501, - error: 'Error: Oean-node does not support arweave storage type files!' - } - } - } else if ( - storage instanceof IpfsStorage && - !existsEnvironmentVariable(ENVIRONMENT_VARIABLES.IPFS_GATEWAY) - ) { + + // Validate storage configuration (checks if gateways are configured) + const [isValid, validationError] = storage.validate() + if (!isValid) { CORE_LOGGER.logMessageWithEmoji( - 'Failure executing downloadURL task: Oean-node does not support ipfs storage type files! ', + `Failure executing downloadURL task: ${validationError}`, true, GENERIC_EMOJIS.EMOJI_CROSS_MARK, LOG_LEVELS_STR.LEVEL_ERROR @@ -106,7 +84,7 @@ export async function handleDownloadUrlCommand( stream: null, status: { httpStatus: 501, - error: 'Error: Oean-node does not support ipfs storage type files!' + error: `Error: ${validationError}` } } } diff --git a/src/components/core/handler/fileInfoHandler.ts b/src/components/core/handler/fileInfoHandler.ts index 19c457c96..4159b2920 100644 --- a/src/components/core/handler/fileInfoHandler.ts +++ b/src/components/core/handler/fileInfoHandler.ts @@ -6,6 +6,7 @@ import { IpfsFileObject, UrlFileObject } from '../../../@types/fileObject.js' +import { OceanNodeConfig } from '../../../@types/OceanNode.js' import { FileInfoCommand } from '../../../@types/commands.js' import { CORE_LOGGER } from '../../../utils/logging/common.js' import { Storage } from '../../storage/index.js' @@ -19,14 +20,17 @@ import { } from '../../httpRoutes/validateCommands.js' import { getFile } from '../../../utils/file.js' import { getConfiguration } from '../../../utils/index.js' -async function formatMetadata(file: ArweaveFileObject | IpfsFileObject | UrlFileObject) { +async function formatMetadata( + file: ArweaveFileObject | IpfsFileObject | UrlFileObject, + config: OceanNodeConfig +) { const url = file.type === 'url' ? (file as UrlFileObject).url : file.type === 'arweave' - ? urlJoin(process.env.ARWEAVE_GATEWAY, (file as ArweaveFileObject).transactionId) + ? urlJoin(config.arweaveGateway, (file as ArweaveFileObject).transactionId) : file.type === 'ipfs' - ? urlJoin(process.env.IPFS_GATEWAY, (file as IpfsFileObject).hash) + ? urlJoin(config.ipfsGateway, (file as IpfsFileObject).hash) : null const { contentLength, contentType, contentChecksum } = await fetchFileMetadata( @@ -75,10 +79,11 @@ export class FileInfoHandler extends CommandHandler { } try { const oceanNode = this.getOceanNode() + const config = await getConfiguration() let fileInfo = [] if (task.file && task.type) { - const storage = Storage.getStorageClass(task.file, await getConfiguration()) + const storage = Storage.getStorageClass(task.file, config) fileInfo = await storage.getFileInfo({ type: task.type, @@ -87,11 +92,11 @@ export class FileInfoHandler extends CommandHandler { } else if (task.did && task.serviceId) { const fileArray = await getFile(task.did, task.serviceId, oceanNode) if (task.fileIndex) { - const fileMetadata = await formatMetadata(fileArray[task.fileIndex]) + const fileMetadata = await formatMetadata(fileArray[task.fileIndex], config) fileInfo.push(fileMetadata) } else { for (const file of fileArray) { - const fileMetadata = await formatMetadata(file) + const fileMetadata = await formatMetadata(file, config) fileInfo.push(fileMetadata) } } diff --git a/src/components/core/handler/getJobs.ts b/src/components/core/handler/getJobs.ts new file mode 100644 index 000000000..e50f5c4b1 --- /dev/null +++ b/src/components/core/handler/getJobs.ts @@ -0,0 +1,54 @@ +import { Readable } from 'stream' +import { GetJobsCommand } from '../../../@types/commands.js' +import { CORE_LOGGER } from '../../../utils/logging/common.js' +import { buildInvalidRequestMessage } from '../../httpRoutes/validateCommands.js' +import { CommandHandler } from './handler.js' +import { P2PCommandResponse } from '../../../@types/OceanNode.js' + +export class GetJobsHandler extends CommandHandler { + validate(command: GetJobsCommand) { + if (command.fromTimestamp && typeof command.fromTimestamp !== 'string') { + return buildInvalidRequestMessage( + 'Parameter : "fromTimestamp" is not a valid string' + ) + } + return { valid: true } + } + + async handle(task: GetJobsCommand): Promise { + const validationResponse = await this.verifyParamsAndRateLimits(task) + if (this.shouldDenyTaskHandling(validationResponse)) { + return validationResponse + } + + try { + const { c2d } = this.getOceanNode().getDatabase() + if (!c2d) { + throw new Error('C2D database not initialized') + } + + const jobs = await c2d.getJobs( + task.environments, + task.fromTimestamp, + task.consumerAddrs + ) + return { + stream: Readable.from(JSON.stringify(jobs)), + status: { + httpStatus: 200, + error: null + } + } + } catch (error) { + const message = error instanceof Error ? error.message : String(error) + CORE_LOGGER.error('Error retrieving node jobs: ' + message) + return { + status: { + httpStatus: 500, + error: message + }, + stream: null + } + } + } +} diff --git a/src/components/core/handler/queryHandler.ts b/src/components/core/handler/queryHandler.ts index 50d7c7b12..dea8c616c 100644 --- a/src/components/core/handler/queryHandler.ts +++ b/src/components/core/handler/queryHandler.ts @@ -46,6 +46,17 @@ export class QueryDdoStateHandler extends QueryHandler { } try { const result = await this.getOceanNode().getDatabase().ddoState.search(task.query) + + CORE_LOGGER.debug(`DDO State search result: ${JSON.stringify(result)}`) + + if (result === null) { + CORE_LOGGER.error('Database search returned null') + return { + stream: null, + status: { httpStatus: 500, error: 'Database search failed' } + } + } + return { stream: Readable.from(JSON.stringify(result)), status: { httpStatus: 200 } diff --git a/src/components/core/utils/feesHandler.ts b/src/components/core/utils/feesHandler.ts index 88f62fc60..12ebc33f1 100644 --- a/src/components/core/utils/feesHandler.ts +++ b/src/components/core/utils/feesHandler.ts @@ -46,7 +46,8 @@ async function calculateProviderFeeAmount( // it's a download provider fee // we should get asset file size, and do a proper fee management according to time // something like estimated 3 downloads per day - const providerFeeAmount = (await getConfiguration()).feeStrategy.feeAmount.amount + const config = await getConfiguration() + const providerFeeAmount = config?.feeStrategy?.feeAmount?.amount || 0 return providerFeeAmount } @@ -468,13 +469,9 @@ export async function getProviderKey(): Promise { * @returns the token address */ export async function getProviderFeeToken(chainId: number): Promise { - const feeToken = (await getConfiguration()).feeStrategy.feeTokens - CORE_LOGGER.info(`feeToken: ${JSON.stringify(feeToken)}`) - CORE_LOGGER.info(`chainiD: ${chainId}`) - const result = (await getConfiguration()).feeStrategy.feeTokens.filter( - (token: FeeTokens) => Number(token.chain) === chainId - ) - CORE_LOGGER.info(`feeToken result: ${JSON.stringify(result)}`) + const config = await getConfiguration() + const feeTokens = config?.feeStrategy?.feeTokens || [] + const result = feeTokens.filter((token: FeeTokens) => Number(token.chain) === chainId) if (result.length === 0 && chainId === 8996) { const localOceanToken = getOceanArtifactsAdresses().development.Ocean return localOceanToken || ethers.ZeroAddress @@ -487,7 +484,8 @@ export async function getProviderFeeToken(chainId: number): Promise { * @returns amount */ export async function getProviderFeeAmount(): Promise { - return (await getConfiguration()).feeStrategy.feeAmount.amount + const config = await getConfiguration() + return config?.feeStrategy?.feeAmount?.amount || 0 } // https://github.com/oceanprotocol/contracts/blob/main/contracts/templates/ERC20Template.sol#L65-L74 // https://github.com/oceanprotocol/contracts/blob/main/contracts/templates/ERC20Template.sol#L447-L508 diff --git a/src/components/core/utils/statusHandler.ts b/src/components/core/utils/statusHandler.ts index ce9016840..c07c4e046 100644 --- a/src/components/core/utils/statusHandler.ts +++ b/src/components/core/utils/statusHandler.ts @@ -7,8 +7,7 @@ import { StorageTypes, OceanNodeConfig } from '../../../@types/OceanNode.js' -import { existsEnvironmentVariable, getConfiguration } from '../../../utils/index.js' -import { ENVIRONMENT_VARIABLES } from '../../../utils/constants.js' +import { getConfiguration } from '../../../utils/index.js' import { CORE_LOGGER } from '../../../utils/logging/common.js' import { OceanNode } from '../../../OceanNode.js' import { typesenseSchemas } from '../../database/TypesenseSchemas.js' @@ -16,10 +15,12 @@ import { SupportedNetwork } from '../../../@types/blockchain.js' import { getAdminAddresses } from '../../../utils/auth.js' import HumanHasher from 'humanhash' -const supportedStorageTypes: StorageTypes = { - url: true, - arwave: existsEnvironmentVariable(ENVIRONMENT_VARIABLES.ARWEAVE_GATEWAY), - ipfs: existsEnvironmentVariable(ENVIRONMENT_VARIABLES.IPFS_GATEWAY) +function getSupportedStorageTypes(config: OceanNodeConfig): StorageTypes { + return { + url: true, + arwave: !!config.arweaveGateway, + ipfs: !!config.ipfsGateway + } } // platform information @@ -118,7 +119,7 @@ export async function status( p2p: config.hasP2P, provider: [], indexer: [], - supportedStorage: supportedStorageTypes, + supportedStorage: getSupportedStorageTypes(config), // uptime: process.uptime(), platform: platformInfo, codeHash: config.codeHash, diff --git a/src/components/database/C2DDatabase.ts b/src/components/database/C2DDatabase.ts index b7b57c897..20ac3d7e5 100644 --- a/src/components/database/C2DDatabase.ts +++ b/src/components/database/C2DDatabase.ts @@ -67,14 +67,22 @@ export class C2DDatabase extends AbstractDatabase { return await this.provider.getRunningJobs(engine, environment) } - async getAllFinishedJobs(): Promise { - return await this.provider.getAllFinishedJobs() - } - async deleteJob(jobId: string): Promise { return await this.provider.deleteJob(jobId) } + async getFinishedJobs(environments?: string[]): Promise { + return await this.provider.getFinishedJobs(environments) + } + + async getJobs( + environments?: string[], + fromTimestamp?: string, + consumerAddrs?: string[] + ): Promise { + return await this.provider.getJobs(environments, fromTimestamp, consumerAddrs) + } + /** * * @param environment compute environment to check for @@ -97,8 +105,9 @@ export class C2DDatabase extends AbstractDatabase { for (const engine of allEngines) { const allEnvironments = await engine.getComputeEnvironments() for (const computeEnvironment of allEnvironments) { - const finishedOrExpired: DBComputeJob[] = - await this.provider.getFinishedJobs(computeEnvironment) + const finishedOrExpired: DBComputeJob[] = await this.provider.getFinishedJobs([ + computeEnvironment.id + ]) for (const job of finishedOrExpired) { if ( computeEnvironment && @@ -129,7 +138,7 @@ export class C2DDatabase extends AbstractDatabase { .map((env: any) => env.id) // Get all finished jobs from DB, not just from known environments - const allJobs: DBComputeJob[] = await c2dDatabase.getAllFinishedJobs() + const allJobs: DBComputeJob[] = await c2dDatabase.getFinishedJobs() for (const job of allJobs) { if (!job.environment || !envIds.includes(job.environment)) { diff --git a/src/components/database/ElasticSearchDatabase.ts b/src/components/database/ElasticSearchDatabase.ts index 9fb75a060..4282b527c 100644 --- a/src/components/database/ElasticSearchDatabase.ts +++ b/src/components/database/ElasticSearchDatabase.ts @@ -239,6 +239,7 @@ export class ElasticsearchDdoStateDatabase extends AbstractDdoStateDatabase { index: this.index, query }) + console.log('Query result: ', result) return result.hits.hits.map((hit: any) => { return normalizeDocumentId(hit._source, hit._id) }) diff --git a/src/components/database/sqliteCompute.ts b/src/components/database/sqliteCompute.ts index ad65e4b65..75b7c5716 100644 --- a/src/components/database/sqliteCompute.ts +++ b/src/components/database/sqliteCompute.ts @@ -2,7 +2,6 @@ import { typesenseSchemas, TypesenseSchema } from './TypesenseSchemas.js' import { C2DStatusNumber, C2DStatusText, - ComputeEnvironment, type DBComputeJob } from '../../@types/C2D/C2D.js' import sqlite3, { RunResult } from 'sqlite3' @@ -14,7 +13,12 @@ interface ComputeDatabaseProvider { updateJob(job: DBComputeJob): Promise getRunningJobs(engine?: string, environment?: string): Promise deleteJob(jobId: string): Promise - getFinishedJobs(): Promise + getFinishedJobs(environments?: string[]): Promise + getJobs( + environments?: string[], + fromTimestamp?: string, + consumerAddrs?: string[] + ): Promise } function getInternalStructure(job: DBComputeJob): any { @@ -36,7 +40,9 @@ function getInternalStructure(job: DBComputeJob): any { algoStopTimestamp: job.algoStopTimestamp, metadata: job.metadata, additionalViewers: job.additionalViewers, - terminationDetails: job.terminationDetails + terminationDetails: job.terminationDetails, + payment: job.payment, + algoDuration: job.algoDuration } return internalBlob } @@ -308,17 +314,27 @@ export class SQLiteCompute implements ComputeDatabaseProvider { }) } - getAllFinishedJobs(): Promise { - const selectSQL = ` - SELECT * FROM ${this.schema.name} WHERE dateFinished IS NOT NULL OR results IS NOT NULL - ` + getFinishedJobs(environments?: string[]): Promise { + let selectSQL = ` + SELECT * FROM ${this.schema.name} WHERE (dateFinished IS NOT NULL OR results IS NOT NULL) + ` + const params: string[] = [] + if (environments && environments.length > 0) { + const placeholders = environments.map(() => '?').join(',') + selectSQL += ` AND environment IN (${placeholders})` + params.push(...environments) + } + + selectSQL += ` ORDER BY dateFinished DESC` return new Promise((resolve, reject) => { - this.db.all(selectSQL, (err, rows: any[] | undefined) => { + this.db.all(selectSQL, params, (err, rows: any[] | undefined) => { if (err) { DATABASE_LOGGER.error(err.message) reject(err) } else { + // also decode the internal data into job data + // get them all running if (rows && rows.length > 0) { const all: DBComputeJob[] = rows.map((row) => { const body = generateJSONFromBlob(row.body) @@ -330,7 +346,11 @@ export class SQLiteCompute implements ComputeDatabaseProvider { }) resolve(all) } else { - DATABASE_LOGGER.info('Could not find any running C2D jobs!') + environments + ? DATABASE_LOGGER.info( + 'No jobs found for the specified enviroments: ' + environments.join(',') + ) + : DATABASE_LOGGER.info('No jobs found') resolve([]) } } @@ -338,13 +358,40 @@ export class SQLiteCompute implements ComputeDatabaseProvider { }) } - getFinishedJobs(environment?: ComputeEnvironment): Promise { - // get jobs that already finished (have results), for this environment, and clear storage + job if expired - const selectSQL = ` - SELECT * FROM ${this.schema.name} WHERE environment = ? AND dateFinished IS NOT NULL OR results IS NOT NULL - ` + getJobs( + environments?: string[], + fromTimestamp?: string, + consumerAddrs?: string[] + ): Promise { + let selectSQL = `SELECT * FROM ${this.schema.name}` + + const params: string[] = [] + const conditions: string[] = [] + + if (environments && environments.length > 0) { + const placeholders = environments.map(() => '?').join(',') + conditions.push(`environment IN (${placeholders})`) + params.push(...environments) + } + + if (fromTimestamp) { + conditions.push(`dateFinished >= ?`) + params.push(fromTimestamp) + } + + if (consumerAddrs && consumerAddrs.length > 0) { + const placeholders = consumerAddrs.map(() => '?').join(',') + conditions.push(`owner NOT IN (${placeholders})`) + params.push(...consumerAddrs) + } + + if (conditions.length > 0) { + selectSQL += ` WHERE ${conditions.join(' AND ')}` + } + selectSQL += ` ORDER BY dateCreated DESC` + return new Promise((resolve, reject) => { - this.db.all(selectSQL, [environment.id], (err, rows: any[] | undefined) => { + this.db.all(selectSQL, params, (err, rows: any[] | undefined) => { if (err) { DATABASE_LOGGER.error(err.message) reject(err) @@ -360,18 +407,13 @@ export class SQLiteCompute implements ComputeDatabaseProvider { const job: DBComputeJob = { ...row, ...body, maxJobDuration } return job }) - if (!environment) { - resolve(all) - } - // filter them out - const filtered = all.filter((job) => { - return environment && environment.id === job.environment - }) - resolve(filtered) + resolve(all) } else { - DATABASE_LOGGER.info( - 'Could not find any jobs for the specified enviroment: ' + environment.id - ) + environments + ? DATABASE_LOGGER.info( + 'No jobs found for the specified enviroments: ' + environments.join(',') + ) + : DATABASE_LOGGER.info('No jobs found') resolve([]) } } diff --git a/src/components/httpRoutes/adminConfig.ts b/src/components/httpRoutes/adminConfig.ts new file mode 100644 index 000000000..ed254c267 --- /dev/null +++ b/src/components/httpRoutes/adminConfig.ts @@ -0,0 +1,56 @@ +import express from 'express' +import { HTTP_LOGGER } from '../../utils/logging/common.js' +import { FetchConfigHandler } from '../core/admin/fetchConfigHandler.js' +import { PushConfigHandler } from '../core/admin/pushConfigHandler.js' +import { PROTOCOL_COMMANDS } from '../../utils/constants.js' +import { Readable } from 'stream' +import { streamToObject } from '../../utils/util.js' + +export const adminConfigRoutes = express.Router() + +adminConfigRoutes.get('/api/admin/config', express.json(), async (req, res) => { + try { + const { expiryTimestamp, signature } = req.body + + const response = await new FetchConfigHandler(req.oceanNode).handle({ + command: PROTOCOL_COMMANDS.FETCH_CONFIG, + expiryTimestamp, + signature + }) + + if (response.status.httpStatus === 200) { + const result = await streamToObject(response.stream as Readable) + res.status(200).json(result) + } else { + HTTP_LOGGER.log('LEVEL_ERROR', `Error fetching config: ${response.status.error}`) + res.status(response.status.httpStatus).json({ error: response.status.error }) + } + } catch (error) { + HTTP_LOGGER.error(`Error fetching config: ${error.message}`) + res.status(500).send(`Internal Server Error: ${error.message}`) + } +}) + +adminConfigRoutes.post('/api/admin/config/update', express.json(), async (req, res) => { + try { + const { expiryTimestamp, signature, config } = req.body + + const response = await new PushConfigHandler(req.oceanNode).handle({ + command: PROTOCOL_COMMANDS.PUSH_CONFIG, + expiryTimestamp, + signature, + config + }) + + if (response.status.httpStatus === 200) { + const result = await streamToObject(response.stream as Readable) + res.status(200).json(result) + } else { + HTTP_LOGGER.log('LEVEL_ERROR', `Error pushing config: ${response.status.error}`) + res.status(response.status.httpStatus).json({ error: response.status.error }) + } + } catch (error) { + HTTP_LOGGER.error(`Error pushing config: ${error.message}`) + res.status(500).send(`Internal Server Error: ${error.message}`) + } +}) diff --git a/src/components/httpRoutes/aquarius.ts b/src/components/httpRoutes/aquarius.ts index 0905b0aed..688813ff7 100644 --- a/src/components/httpRoutes/aquarius.ts +++ b/src/components/httpRoutes/aquarius.ts @@ -119,9 +119,28 @@ aquariusRoutes.get(`${AQUARIUS_API_BASE_PATH}/state/ddo`, async (req, res) => { if (result.stream) { const queryResult = JSON.parse(await streamToString(result.stream as Readable)) - if (queryResult[0].found) { - res.json(queryResult[0].hits[0]) + + if ( + queryResult && + typeof queryResult === 'object' && + queryResult.found !== undefined + ) { + if (queryResult.found > 0 && queryResult.hits && queryResult.hits.length > 0) { + res.json(queryResult.hits[0].document || queryResult.hits[0]) + } else { + res.status(404).send('Not found') + } + } else if (Array.isArray(queryResult)) { + if (queryResult.length > 0) { + res.json(queryResult[0]) + } else { + res.status(404).send('Not found') + } } else { + HTTP_LOGGER.log( + LOG_LEVELS_STR.LEVEL_DEBUG, + `Query result structure (not found): ${JSON.stringify(queryResult)}` + ) res.status(404).send('Not found') } } else { diff --git a/src/components/httpRoutes/index.ts b/src/components/httpRoutes/index.ts index 4941ff579..cf5530c5f 100644 --- a/src/components/httpRoutes/index.ts +++ b/src/components/httpRoutes/index.ts @@ -14,6 +14,7 @@ import { jobsRoutes } from './jobs.js' import { addMapping, allRoutesMapping, findPathName } from './routeUtils.js' import { PolicyServerPassthroughRoute } from './policyServer.js' import { authRoutes } from './auth.js' +import { adminConfigRoutes } from './adminConfig.js' export * from './getOceanPeers.js' export * from './auth.js' @@ -59,6 +60,8 @@ httpRoutes.use(jobsRoutes) httpRoutes.use(PolicyServerPassthroughRoute) // auth routes httpRoutes.use(authRoutes) +// admin config routes +httpRoutes.use(adminConfigRoutes) export function getAllServiceEndpoints() { httpRoutes.stack.forEach(addMapping.bind(null, [])) diff --git a/src/components/httpRoutes/requestValidator.ts b/src/components/httpRoutes/requestValidator.ts index e8f8f613e..dd505fca4 100644 --- a/src/components/httpRoutes/requestValidator.ts +++ b/src/components/httpRoutes/requestValidator.ts @@ -46,10 +46,10 @@ function checkIP( ): CommonValidation { let onDenyList = false if (!Array.isArray(requestIP)) { - onDenyList = configuration.denyList?.ips.includes(requestIP) + onDenyList = configuration.denyList?.ips?.includes(requestIP) } else { for (const ip of requestIP) { - if (configuration.denyList?.ips.includes(ip)) { + if (configuration.denyList?.ips?.includes(ip)) { onDenyList = true break } diff --git a/src/components/storage/index.ts b/src/components/storage/index.ts index aa394fb6a..09991da7a 100644 --- a/src/components/storage/index.ts +++ b/src/components/storage/index.ts @@ -53,7 +53,8 @@ export abstract class Storage { const response = await axios({ method: 'get', url: input, - responseType: 'stream' + responseType: 'stream', + timeout: 30000 }) CORE_LOGGER.info(`Successfully fetched the file from ${input}`) @@ -278,7 +279,8 @@ export class UrlStorage extends Storage { const response = await axios({ url: file.url, method: file.method || 'get', - headers: file.headers + headers: file.headers, + timeout: 30000 }) return await encryptData(response.data, encryptionType) } @@ -295,8 +297,8 @@ export class ArweaveStorage extends Storage { } validate(): [boolean, string] { - if (!process.env.ARWEAVE_GATEWAY) { - return [false, 'Arweave gateway is not provided!'] + if (!this.config.arweaveGateway) { + return [false, 'Arweave gateway is not configured!'] } const file: ArweaveFileObject = this.getFile() as ArweaveFileObject if (!file.transactionId) { @@ -325,14 +327,14 @@ export class ArweaveStorage extends Storage { } getDownloadUrl(): string { - return urlJoin(process.env.ARWEAVE_GATEWAY, this.getFile().transactionId) + return urlJoin(this.config.arweaveGateway, this.getFile().transactionId) } async fetchSpecificFileMetadata( fileObject: ArweaveFileObject, forceChecksum: boolean ): Promise { - const url = urlJoin(process.env.ARWEAVE_GATEWAY, fileObject.transactionId) + const url = urlJoin(this.config.arweaveGateway, fileObject.transactionId) const { contentLength, contentType, contentChecksum } = await fetchFileMetadata( url, 'get', @@ -355,7 +357,7 @@ export class ArweaveStorage extends Storage { ): Promise { const file = this.getFile() const response = await axios({ - url: urlJoin(process.env.ARWEAVE_GATEWAY, file.transactionId), + url: urlJoin(this.config.arweaveGateway, file.transactionId), method: 'get' }) return await encryptData(response.data, encryptionType) @@ -373,8 +375,8 @@ export class IpfsStorage extends Storage { } validate(): [boolean, string] { - if (!process.env.IPFS_GATEWAY) { - return [false, 'IPFS gateway is not provided!'] + if (!this.config.ipfsGateway) { + return [false, 'IPFS gateway is not configured!'] } const file: IpfsFileObject = this.getFile() as IpfsFileObject if (!file.hash) { @@ -397,14 +399,14 @@ export class IpfsStorage extends Storage { } getDownloadUrl(): string { - return urlJoin(process.env.IPFS_GATEWAY, urlJoin('/ipfs', this.getFile().hash)) + return urlJoin(this.config.ipfsGateway, urlJoin('/ipfs', this.getFile().hash)) } async fetchSpecificFileMetadata( fileObject: IpfsFileObject, forceChecksum: boolean ): Promise { - const url = urlJoin(process.env.IPFS_GATEWAY, urlJoin('/ipfs', fileObject.hash)) + const url = urlJoin(this.config.ipfsGateway, urlJoin('/ipfs', fileObject.hash)) const { contentLength, contentType, contentChecksum } = await fetchFileMetadata( url, 'get', diff --git a/src/test/config.json b/src/test/config.json index 95d0a5a77..e053f06db 100644 --- a/src/test/config.json +++ b/src/test/config.json @@ -1,96 +1,170 @@ { - "authorizedDecrypters": [], - "authorizedDecryptersList": [], - "allowedValidators": [], - "allowedValidatorsList": [], - "authorizedPublishers": [], - "authorizedPublishersList": [], - "keys": {}, - "hasIndexer": true, - "hasHttp": true, - "hasP2P": true, - "p2pConfig": { - "bootstrapNodes": [], - "bootstrapTimeout": 20000, - "bootstrapTagName": "bootstrap", - "bootstrapTagValue": 50, - "bootstrapTTL": 0, - "enableIPV4": true, - "enableIPV6": true, - "ipV4BindAddress": "0.0.0.0", - "ipV4BindTcpPort": 8000, - "ipV4BindWsPort": 0, - "ipV6BindAddress": "::1", - "ipV6BindTcpPort": 0, - "ipV6BindWsPort": 0, - "announceAddresses": [], - "pubsubPeerDiscoveryInterval": 10000, - "dhtMaxInboundStreams": 500, - "dhtMaxOutboundStreams": 500, - "dhtFilter": null, - "mDNSInterval": 20000, - "connectionsMaxParallelDials": 15, - "connectionsDialTimeout": 30000, - "upnp": true, - "autoNat": true, - "enableCircuitRelayServer": false, - "enableCircuitRelayClient": false, - "circuitRelays": 0, - "announcePrivateIp": false, - "filterAnnouncedAddresses": [ - "127.0.0.0/8", - "10.0.0.0/8", - "172.16.0.0/12", - "192.168.0.0/16", - "100.64.0.0/10", - "169.254.0.0/16", - "192.0.0.0/24", - "192.0.2.0/24", - "198.51.100.0/24", - "203.0.113.0/24", - "224.0.0.0/4", - "240.0.0.0/4" - ], - "minConnections": 1, - "maxConnections": 300, - "autoDialPeerRetryThreshold": 7200000, - "autoDialConcurrency": 5, - "maxPeerAddrsToDial": 5, - "autoDialInterval": 5000, - "enableNetworkStats": false - }, - "hasControlPanel": true, - "httpPort": 8001, - "dbConfig": { - "url": "http://localhost:9200", - "username": "", - "password": "", - "dbType": "elasticsearch" - }, - "supportedNetworks": { - "8996": { - "rpc": "http://127.0.0.1:8545", - "chainId": 8996, - "network": "development", - "chunkSize": 100 - } - }, - "indexingNetworks": [ - 8996 + "authorizedDecrypters": [], + "authorizedDecryptersList": [], + "allowedValidators": [], + "allowedValidatorsList": [], + "authorizedPublishers": [], + "authorizedPublishersList": [], + "keys": {}, + "hasIndexer": true, + "hasHttp": true, + "hasP2P": true, + "p2pConfig": { + "bootstrapNodes": [], + "bootstrapTimeout": 20000, + "bootstrapTagName": "bootstrap", + "bootstrapTagValue": 50, + "bootstrapTTL": 0, + "enableIPV4": true, + "enableIPV6": true, + "ipV4BindAddress": "0.0.0.0", + "ipV4BindTcpPort": 8000, + "ipV4BindWsPort": 0, + "ipV6BindAddress": "::1", + "ipV6BindTcpPort": 0, + "ipV6BindWsPort": 0, + "announceAddresses": [], + "pubsubPeerDiscoveryInterval": 10000, + "dhtMaxInboundStreams": 500, + "dhtMaxOutboundStreams": 500, + "dhtFilter": null, + "mDNSInterval": 20000, + "connectionsMaxParallelDials": 15, + "connectionsDialTimeout": 30000, + "upnp": true, + "autoNat": true, + "enableCircuitRelayServer": false, + "enableCircuitRelayClient": false, + "circuitRelays": 0, + "announcePrivateIp": false, + "filterAnnouncedAddresses": [ + "127.0.0.0/8", + "10.0.0.0/8", + "172.16.0.0/12", + "192.168.0.0/16", + "100.64.0.0/10", + "169.254.0.0/16", + "192.0.0.0/24", + "192.0.2.0/24", + "198.51.100.0/24", + "203.0.113.0/24", + "224.0.0.0/4", + "240.0.0.0/4" ], - "feeStrategy": {}, - "c2dClusters": [], - "c2dNodeUri": "", - "accountPurgatoryUrl": "", - "assetPurgatoryUrl": "", - "allowedAdmins": [], - "allowedAdminsList": [], - "rateLimit": {}, + "minConnections": 1, "maxConnections": 300, - "denyList": [], - "unsafeURLs": [], - "isBootstrap": false, - "claimDurationTimeout": 600, - "validateUnsignedDDO": true, - "jwtSecret": "ocean-node-secret" -} \ No newline at end of file + "autoDialPeerRetryThreshold": 7200000, + "autoDialConcurrency": 5, + "maxPeerAddrsToDial": 5, + "autoDialInterval": 5000, + "enableNetworkStats": false + }, + "hasControlPanel": true, + "httpPort": 8001, + "dbConfig": { + "url": "http://localhost:9200", + "username": "", + "password": "", + "dbType": "elasticsearch" + }, + "supportedNetworks": { + "8996": { + "rpc": "http://127.0.0.1:8545", + "chainId": 8996, + "network": "development", + "chunkSize": 100 + } + }, + "indexingNetworks": [8996], + "feeStrategy": {}, + "c2dClusters": [], + "c2dNodeUri": "", + "accountPurgatoryUrl": "", + "assetPurgatoryUrl": "", + "allowedAdmins": [], + "allowedAdminsList": [], + "rateLimit": 30, + "maxConnections": 30, + "denyList": { + "peers": [], + "ips": [] + }, + "unsafeURLs": [], + "isBootstrap": false, + "claimDurationTimeout": 600, + "validateUnsignedDDO": true, + "jwtSecret": "ocean-node-secret", + "dockerComputeEnvironments": [ + { + "socketPath": "/var/run/docker.sock", + "resources": [ + { + "id": "disk", + "total": 1000000000 + } + ], + "storageExpiry": 604800, + "maxJobDuration": 3600, + "fees": { + "11155111": [ + { + "feeToken": "0x1B083D8584dd3e6Ff37d04a6e7e82b5F622f3985", + "prices": [ + { + "id": "cpu", + "price": 1 + } + ] + }, + { + "feeToken": "0xfff9976782d46cc05630d1f6ebab18b2324d6b14", + "prices": [ + { + "id": "cpu", + "price": 1 + } + ] + } + ], + "11155420": [ + { + "feeToken": "0xf26c6C93f9f1d725e149d95f8E7B2334a406aD10", + "prices": [ + { + "id": "cpu", + "price": 1 + } + ] + }, + { + "feeToken": "0x4200000000000000000000000000000000000006", + "prices": [ + { + "id": "cpu", + "price": 1 + } + ] + } + ] + }, + "free": { + "maxJobDuration": 3600, + "maxJobs": 3, + "resources": [ + { + "id": "cpu", + "max": 1 + }, + { + "id": "ram", + "max": 1000000000 + }, + { + "id": "disk", + "max": 1000000000 + } + ] + } + } + ] +} diff --git a/src/test/integration/algorithmsAccess.test.ts b/src/test/integration/algorithmsAccess.test.ts index 45ed7e0bd..0d0f40e2d 100644 --- a/src/test/integration/algorithmsAccess.test.ts +++ b/src/test/integration/algorithmsAccess.test.ts @@ -235,15 +235,22 @@ describe('Trusted algorithms Flow', () => { console.log(resp) assert(resp, 'Failed to get response') assert(resp.status.httpStatus === 400, 'Failed to get 400 response') + assert( + resp.status.error === + `Algorithm ${publishedAlgoDataset.ddo.id} not allowed to run on the dataset: ${publishedComputeDataset.ddo.id}`, + 'Inconsistent error message' + ) assert(resp.stream === null, 'Failed to get stream') }) it('should add the algorithm to the dataset trusted algorithm list', async function () { this.timeout(DEFAULT_TEST_TIMEOUT * 5) + const config = await getConfiguration() const algoChecksums = await getAlgoChecksums( publishedAlgoDataset.ddo.id, publishedAlgoDataset.ddo.services[0].id, - oceanNode + oceanNode, + config ) publishedComputeDataset.ddo.services[0].compute = { allowRawAlgorithm: false, diff --git a/src/test/integration/compute.test.ts b/src/test/integration/compute.test.ts index ebdb0d2d5..673a3bb42 100644 --- a/src/test/integration/compute.test.ts +++ b/src/test/integration/compute.test.ts @@ -219,10 +219,12 @@ describe('Compute', () => { it('should add the algorithm to the dataset trusted algorithm list', async function () { this.timeout(DEFAULT_TEST_TIMEOUT * 5) + const config = await getConfiguration() const algoChecksums = await getAlgoChecksums( publishedAlgoDataset.ddo.id, publishedAlgoDataset.ddo.services[0].id, - oceanNode + oceanNode, + config ) publishedComputeDataset.ddo.services[0].compute = { allowRawAlgorithm: false, @@ -1252,10 +1254,12 @@ describe('Compute', () => { ) const algoDDOTest = ddo if (algoDDOTest) { + const config = await getConfiguration() const algoChecksums = await getAlgoChecksums( algoDDOTest.id, algoDDOTest.services[0].id, - oceanNode + oceanNode, + config ) expect(algoChecksums.files).to.equal( 'f6a7b95e4a2e3028957f69fdd2dac27bd5103986b2171bc8bfee68b52f874dcd' @@ -1278,10 +1282,12 @@ describe('Compute', () => { const algoDDOTest = ddo if (algoDDOTest) { + const config = await getConfiguration() const algoChecksums = await getAlgoChecksums( algoDDOTest.id, algoDDOTest.services[0].id, - oceanNode + oceanNode, + config ) const { ddo, wasTimeout } = await waitToIndex( datasetDDO.id, @@ -1316,3 +1322,361 @@ describe('Compute', () => { indexer.stopAllThreads() }) }) + +describe('Compute Access Restrictions', () => { + let previousConfiguration: OverrideEnvConfig[] + let config: OceanNodeConfig + let dbconn: Database + let oceanNode: OceanNode + let provider: any + let publisherAccount: any + let computeEnvironments: any + let publishedComputeDataset: any + let publishedAlgoDataset: any + let paymentToken: any + let firstEnv: ComputeEnvironment + let accessListAddress: string + + const wallet = new ethers.Wallet( + '0xef4b441145c1d0f3b4bc6d61d29f5c6e502359481152f869247c7a4244d45209' + ) + const wallet2 = new ethers.Wallet( + '0xef4b441145c1d0f3b4bc6d61d29f5c6e502359481152f869247c7a4244d45210' + ) + const wallet3 = new ethers.Wallet( + '0xef4b441145c1d0f3b4bc6d61d29f5c6e502359481152f869247c7a4244d4521A' + ) + const mockSupportedNetworks: RPCS = getMockSupportedNetworks() + const computeJobDuration = 60 * 15 + + async function createPaidComputeCommand( + consumerAddr: string, + signerWallet: ethers.Wallet, + envId: string + ): Promise { + const nonce = Date.now().toString() + const message = String(consumerAddr + publishedComputeDataset.ddo.id + nonce) + const consumerMessage = ethers.solidityPackedKeccak256( + ['bytes'], + [ethers.hexlify(ethers.toUtf8Bytes(message))] + ) + const signature = await signerWallet.signMessage(ethers.toBeArray(consumerMessage)) + + return { + command: PROTOCOL_COMMANDS.COMPUTE_START, + consumerAddress: consumerAddr, + environment: envId, + signature, + nonce, + datasets: [ + { + documentId: publishedComputeDataset.ddo.id, + serviceId: publishedComputeDataset.ddo.services[0].id, + transferTxId: '0x123' + } + ], + algorithm: { + documentId: publishedAlgoDataset.ddo.id, + serviceId: publishedAlgoDataset.ddo.services[0].id, + transferTxId: '0x123', + meta: publishedAlgoDataset.ddo.metadata.algorithm + }, + payment: { chainId: DEVELOPMENT_CHAIN_ID, token: paymentToken }, + maxJobDuration: computeJobDuration + } + } + + async function createFreeComputeCommand( + consumerAddr: string, + signerWallet: ethers.Wallet, + envId: string + ): Promise { + const nonce = Date.now().toString() + const consumerMessage = ethers.solidityPackedKeccak256( + ['bytes'], + [ethers.hexlify(ethers.toUtf8Bytes(nonce))] + ) + const signature = await signerWallet.signMessage(ethers.toBeArray(consumerMessage)) + + return { + command: PROTOCOL_COMMANDS.FREE_COMPUTE_START, + consumerAddress: consumerAddr, + signature, + nonce, + environment: envId, + datasets: [ + { + fileObject: computeAsset.services[0].files.files[0], + documentId: publishedComputeDataset.ddo.id, + serviceId: publishedComputeDataset.ddo.services[0].id + } + ], + algorithm: { + fileObject: algoAsset.services[0].files.files[0], + documentId: publishedAlgoDataset.ddo.id, + serviceId: publishedAlgoDataset.ddo.services[0].id, + meta: publishedAlgoDataset.ddo.metadata.algorithm + }, + output: {} + } + } + + describe('Address-based restrictions', () => { + before(async () => { + const artifactsAddresses = getOceanArtifactsAdresses() + paymentToken = artifactsAddresses.development.Ocean + const allowedAddress = await wallet.getAddress() + previousConfiguration = await setupEnvironment( + TEST_ENV_CONFIG_FILE, + buildEnvOverrideConfig( + [ + ENVIRONMENT_VARIABLES.RPCS, + ENVIRONMENT_VARIABLES.INDEXER_NETWORKS, + ENVIRONMENT_VARIABLES.PRIVATE_KEY, + ENVIRONMENT_VARIABLES.AUTHORIZED_DECRYPTERS, + ENVIRONMENT_VARIABLES.ADDRESS_FILE, + ENVIRONMENT_VARIABLES.DOCKER_COMPUTE_ENVIRONMENTS + ], + [ + JSON.stringify(mockSupportedNetworks), + JSON.stringify([DEVELOPMENT_CHAIN_ID]), + '0xc594c6e5def4bab63ac29eed19a134c130388f74f019bc74b8f4389df2837a58', + JSON.stringify(['0xe2DD09d719Da89e5a3D0F2549c7E24566e947260']), + `${homedir}/.ocean/ocean-contracts/artifacts/address.json`, + '[{"socketPath":"/var/run/docker.sock","resources":[{"id":"disk","total":10}],"storageExpiry":604800,"maxJobDuration":3600,"access":{"addresses":["' + + allowedAddress + + '"],"accessLists":[]},"fees":{"' + + DEVELOPMENT_CHAIN_ID + + '":[{"feeToken":"' + + paymentToken + + '","prices":[{"id":"cpu","price":1}]}]},"free":{"maxJobDuration":60,"maxJobs":3,"access":{"addresses":["' + + allowedAddress + + '"],"accessLists":[]},"resources":[{"id":"cpu","max":1},{"id":"ram","max":1},{"id":"disk","max":1}]}}]' + ] + ) + ) + config = await getConfiguration(true) + dbconn = await Database.init(config.dbConfig) + oceanNode = await OceanNode.getInstance(config, dbconn, null, null, null, true) + const indexer = new OceanIndexer(dbconn, config.indexingNetworks) + oceanNode.addIndexer(indexer) + oceanNode.addC2DEngines() + + provider = new JsonRpcProvider('http://127.0.0.1:8545') + publisherAccount = await provider.getSigner(0) + + publishedComputeDataset = await publishAsset(computeAsset, publisherAccount) + publishedAlgoDataset = await publishAsset(algoAsset, publisherAccount) + + await waitToIndex( + publishedComputeDataset.ddo.id, + EVENTS.METADATA_CREATED, + DEFAULT_TEST_TIMEOUT + ) + await waitToIndex( + publishedAlgoDataset.ddo.id, + EVENTS.METADATA_CREATED, + DEFAULT_TEST_TIMEOUT + ) + }) + + it('Get compute environments with address restrictions', async () => { + const getEnvironmentsTask = { command: PROTOCOL_COMMANDS.COMPUTE_GET_ENVIRONMENTS } + const response = await new ComputeGetEnvironmentsHandler(oceanNode).handle( + getEnvironmentsTask + ) + computeEnvironments = await streamToObject(response.stream as Readable) + firstEnv = computeEnvironments[0] + assert(firstEnv.access, 'Access control should exist') + assert( + firstEnv.access.addresses.includes(await wallet.getAddress()), + 'Should have wallet address in allowed list' + ) + }) + + it('should deny access for paid compute when address not in allowed list', async () => { + const command = await createPaidComputeCommand( + await wallet3.getAddress(), + wallet3, + firstEnv.id + ) + const response = await new PaidComputeStartHandler(oceanNode).handle(command) + assert(response.status.httpStatus === 403, 'Should get 403 access denied') + }) + + it('should deny access for free compute when address not in allowed list', async () => { + const command = await createFreeComputeCommand( + await wallet3.getAddress(), + wallet3, + firstEnv.id + ) + const response = await new FreeComputeStartHandler(oceanNode).handle(command) + assert(response.status.httpStatus === 403, 'Should get 403 access denied') + }) + + after(async () => { + await tearDownEnvironment(previousConfiguration) + }) + }) + + describe('Access List restrictions', () => { + before(async () => { + const artifactsAddresses = getOceanArtifactsAdresses() + paymentToken = artifactsAddresses.development.Ocean + + provider = new JsonRpcProvider('http://127.0.0.1:8545') + publisherAccount = await provider.getSigner(0) + + const AccessListFactory = await import( + '@oceanprotocol/contracts/artifacts/contracts/accesslists/AccessListFactory.sol/AccessListFactory.json', + { assert: { type: 'json' } } + ) + + const factoryContract = new ethers.Contract( + artifactsAddresses.development.AccessListFactory, + AccessListFactory.default.abi, + publisherAccount + ) + + const tx = await factoryContract.deployAccessListContract( + 'ComputeAccessList', + 'CAL', + false, + await publisherAccount.getAddress(), + [await wallet.getAddress(), await wallet2.getAddress()], + ['https://oceanprotocol.com/nft/', 'https://oceanprotocol.com/nft/'] + ) + const txReceipt = await tx.wait() + const events = txReceipt?.logs?.filter((log: any) => { + return log.fragment?.name === 'NewAccessList' + }) + accessListAddress = events[0].args[0] + + const AccessListAbi = await import( + '@oceanprotocol/contracts/artifacts/contracts/accesslists/AccessList.sol/AccessList.json', + { assert: { type: 'json' } } + ) + const accessListContract = new ethers.Contract( + accessListAddress, + AccessListAbi.default.abi, + publisherAccount + ) + const wallet1Balance = await accessListContract.balanceOf(await wallet.getAddress()) + const wallet2Balance = await accessListContract.balanceOf( + await wallet2.getAddress() + ) + const wallet3Balance = await accessListContract.balanceOf( + await wallet3.getAddress() + ) + + if (Number(wallet1Balance) === 0 || Number(wallet2Balance) === 0) { + throw new Error('Access list tokens were not minted correctly') + } + + if (Number(wallet3Balance) > 0) { + throw new Error('Wallet3 should not have access list token') + } + + previousConfiguration = await setupEnvironment( + TEST_ENV_CONFIG_FILE, + buildEnvOverrideConfig( + [ + ENVIRONMENT_VARIABLES.RPCS, + ENVIRONMENT_VARIABLES.INDEXER_NETWORKS, + ENVIRONMENT_VARIABLES.PRIVATE_KEY, + ENVIRONMENT_VARIABLES.AUTHORIZED_DECRYPTERS, + ENVIRONMENT_VARIABLES.ADDRESS_FILE, + ENVIRONMENT_VARIABLES.DOCKER_COMPUTE_ENVIRONMENTS + ], + [ + JSON.stringify(mockSupportedNetworks), + JSON.stringify([DEVELOPMENT_CHAIN_ID]), + '0xc594c6e5def4bab63ac29eed19a134c130388f74f019bc74b8f4389df2837a58', + JSON.stringify(['0xe2DD09d719Da89e5a3D0F2549c7E24566e947260']), + `${homedir}/.ocean/ocean-contracts/artifacts/address.json`, + '[{"socketPath":"/var/run/docker.sock","resources":[{"id":"disk","total":10}],"storageExpiry":604800,"maxJobDuration":3600,"access":{"addresses":[],"accessLists":["' + + accessListAddress + + '"]},"fees":{"' + + DEVELOPMENT_CHAIN_ID + + '":[{"feeToken":"' + + paymentToken + + '","prices":[{"id":"cpu","price":1}]}]},"free":{"maxJobDuration":60,"maxJobs":3,"access":{"addresses":[],"accessLists":["' + + accessListAddress + + '"]},"resources":[{"id":"cpu","max":1},{"id":"ram","max":1},{"id":"disk","max":1}]}}]' + ] + ) + ) + config = await getConfiguration(true) + dbconn = await Database.init(config.dbConfig) + oceanNode = await OceanNode.getInstance(config, dbconn, null, null, null, true) + const indexer = new OceanIndexer(dbconn, config.indexingNetworks) + oceanNode.addIndexer(indexer) + oceanNode.addC2DEngines() + + publishedComputeDataset = await publishAsset(computeAsset, publisherAccount) + publishedAlgoDataset = await publishAsset(algoAsset, publisherAccount) + + await waitToIndex( + publishedComputeDataset.ddo.id, + EVENTS.METADATA_CREATED, + DEFAULT_TEST_TIMEOUT + ) + await waitToIndex( + publishedAlgoDataset.ddo.id, + EVENTS.METADATA_CREATED, + DEFAULT_TEST_TIMEOUT + ) + }) + + it('Get compute environments with access list restrictions', async () => { + const getEnvironmentsTask = { command: PROTOCOL_COMMANDS.COMPUTE_GET_ENVIRONMENTS } + const response = await new ComputeGetEnvironmentsHandler(oceanNode).handle( + getEnvironmentsTask + ) + computeEnvironments = await streamToObject(response.stream as Readable) + firstEnv = computeEnvironments[0] + assert(firstEnv.access, 'Access control should exist') + assert( + firstEnv.access.accessLists.includes(accessListAddress), + 'Should have access list address' + ) + }) + + it('should allow access for paid compute when address is in access list', async () => { + const command = await createPaidComputeCommand( + await wallet.getAddress(), + wallet, + firstEnv.id + ) + const response = await new PaidComputeStartHandler(oceanNode).handle(command) + expect(response.status.httpStatus).to.not.equal(403) + }) + + it('should deny access for paid compute when address not in access list', async () => { + const command = await createPaidComputeCommand( + await wallet3.getAddress(), + wallet3, + firstEnv.id + ) + const response = await new PaidComputeStartHandler(oceanNode).handle(command) + assert( + response.status.httpStatus === 403, + `Expected 403 but got ${response.status.httpStatus}: ${response.status.error}` + ) + }) + + it('should allow access for free compute when address is in access list', async () => { + const command = await createFreeComputeCommand( + await wallet2.getAddress(), + wallet2, + firstEnv.id + ) + const response = await new FreeComputeStartHandler(oceanNode).handle(command) + expect(response.status.httpStatus).to.not.equal(403) + }) + + after(async () => { + await tearDownEnvironment(previousConfiguration) + }) + }) +}) diff --git a/src/test/integration/configAdmin.test.ts b/src/test/integration/configAdmin.test.ts new file mode 100644 index 000000000..6b2700d6e --- /dev/null +++ b/src/test/integration/configAdmin.test.ts @@ -0,0 +1,371 @@ +import { Wallet } from 'ethers' +import { Database } from '../../components/database/index.js' +import { getConfiguration, loadConfigFromFile } from '../../utils/index.js' +import { + DEFAULT_TEST_TIMEOUT, + OverrideEnvConfig, + TEST_ENV_CONFIG_FILE, + buildEnvOverrideConfig, + setupEnvironment, + tearDownEnvironment, + getMockSupportedNetworks +} from '../utils/utils.js' +import { ENVIRONMENT_VARIABLES, PROTOCOL_COMMANDS } from '../../utils/constants.js' +import { OceanNodeConfig } from '../../@types/OceanNode.js' +import { RPCS } from '../../@types/blockchain.js' +import { OceanNode } from '../../OceanNode.js' +import { FetchConfigHandler } from '../../components/core/admin/fetchConfigHandler.js' +import { PushConfigHandler } from '../../components/core/admin/pushConfigHandler.js' +import { streamToObject } from '../../utils/util.js' +import { Readable } from 'stream' +import { expect } from 'chai' + +describe('Config Admin Endpoints Integration Tests', () => { + let config: OceanNodeConfig + let database: Database + let adminAccount: Wallet + let previousConfiguration: OverrideEnvConfig[] + let oceanNode: OceanNode + + const mockSupportedNetworks: RPCS = getMockSupportedNetworks() + + before(async () => { + const adminPrivateKey = + '0xc594c6e5def4bab63ac29eed19a134c130388f74f019bc74b8f4389df2837a58' + adminAccount = new Wallet(adminPrivateKey) + const adminAddress = await adminAccount.getAddress() + + previousConfiguration = await setupEnvironment( + TEST_ENV_CONFIG_FILE, + buildEnvOverrideConfig( + [ + ENVIRONMENT_VARIABLES.RPCS, + ENVIRONMENT_VARIABLES.INDEXER_NETWORKS, + ENVIRONMENT_VARIABLES.ALLOWED_ADMINS + ], + [ + JSON.stringify(mockSupportedNetworks), + JSON.stringify([8996]), + JSON.stringify([adminAddress]) + ] + ) + ) + + config = await getConfiguration(true) + database = await Database.init(config.dbConfig) + oceanNode = await OceanNode.getInstance(config, database) + }) + + after(async () => { + await tearDownEnvironment(previousConfiguration) + }) + + const getAdminSignature = async (expiryTimestamp: number): Promise => { + const message = expiryTimestamp.toString() + return await adminAccount.signMessage(message) + } + + describe('Fetch Config Tests', () => { + it('should fetch current config', async function () { + this.timeout(DEFAULT_TEST_TIMEOUT) + + const expiryTimestamp = Date.now() + 60000 + const signature = await getAdminSignature(expiryTimestamp) + + const handlerResponse = await new FetchConfigHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.FETCH_CONFIG, + expiryTimestamp, + signature + }) + + expect(handlerResponse.status.httpStatus).to.equal(200) + + const response = await streamToObject(handlerResponse.stream as Readable) + expect(response).to.be.an('object') + expect(response).to.have.property('hasHttp') + expect(response).to.have.property('hasP2P') + }) + + it('should hide private key in fetched config', async function () { + this.timeout(DEFAULT_TEST_TIMEOUT) + + const expiryTimestamp = Date.now() + 60000 + const signature = await getAdminSignature(expiryTimestamp) + + const handlerResponse = await new FetchConfigHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.FETCH_CONFIG, + expiryTimestamp, + signature + }) + + expect(handlerResponse.status.httpStatus).to.equal(200) + + const response = await streamToObject(handlerResponse.stream as Readable) + expect(response).to.have.property('keys') + expect(response.keys).to.have.property('privateKey') + expect(response.keys.privateKey).to.equal('[*** HIDDEN CONTENT ***]') + }) + + it('should reject fetch config with signature from non-admin', async function () { + this.timeout(DEFAULT_TEST_TIMEOUT) + + const nonAdminPrivateKey = + '0xef4b441145c1d0f3b4bc6d61d29f5c6e502359481152f869247c7a4244d45209' + const nonAdminAccount = new Wallet(nonAdminPrivateKey) + + const expiryTimestamp = Date.now() + 60000 + const message = expiryTimestamp.toString() + const invalidSignature = await nonAdminAccount.signMessage(message) + + const handlerResponse = await new FetchConfigHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.FETCH_CONFIG, + expiryTimestamp, + signature: invalidSignature + }) + + expect(handlerResponse.status.httpStatus).to.not.equal(200) + }) + + it('should reject fetch config with expired timestamp', async function () { + this.timeout(DEFAULT_TEST_TIMEOUT) + + const expiryTimestamp = Date.now() - 60000 + const signature = await getAdminSignature(expiryTimestamp) + + const handlerResponse = await new FetchConfigHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.FETCH_CONFIG, + expiryTimestamp, + signature + }) + + expect(handlerResponse.status.httpStatus).to.not.equal(200) + }) + }) + + describe('Push Config Tests', () => { + it('should push config changes and reload node', async function () { + this.timeout(DEFAULT_TEST_TIMEOUT) + + const expiryTimestamp = Date.now() + 60000 + const signature = await getAdminSignature(expiryTimestamp) + + const newConfig = { + rateLimit: 100, + maxConnections: 200 + } + + const handlerResponse = await new PushConfigHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PUSH_CONFIG, + expiryTimestamp, + signature, + config: newConfig + }) + + expect(handlerResponse.status.httpStatus).to.equal(200) + + const response = await streamToObject(handlerResponse.stream as Readable) + expect(response).to.be.an('object') + expect(response.rateLimit).to.equal(100) + expect(response.maxConnections).to.equal(200) + + const savedConfig = loadConfigFromFile() + expect(savedConfig.rateLimit).to.equal(100) + expect(savedConfig.maxConnections).to.equal(200) + + const restoreConfig = { + rateLimit: 30, + maxConnections: 30 + } + + await new PushConfigHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PUSH_CONFIG, + expiryTimestamp: Date.now() + 60000, + signature: await getAdminSignature(Date.now() + 60000), + config: restoreConfig + }) + }) + + it('should merge new config with existing config', async function () { + this.timeout(DEFAULT_TEST_TIMEOUT) + + const expiryTimestamp = Date.now() + 60000 + const signature = await getAdminSignature(expiryTimestamp) + + const fetchResponse = await new FetchConfigHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.FETCH_CONFIG, + expiryTimestamp, + signature + }) + + const currentConfig = await streamToObject(fetchResponse.stream as Readable) + + const partialConfig = { + rateLimit: 75 + } + + const pushResponse = await new PushConfigHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PUSH_CONFIG, + expiryTimestamp: Date.now() + 60000, + signature: await getAdminSignature(Date.now() + 60000), + config: partialConfig + }) + + const updatedConfig = await streamToObject(pushResponse.stream as Readable) + + expect(updatedConfig.rateLimit).to.equal(75) + expect(updatedConfig.maxConnections).to.equal(currentConfig.maxConnections) + + await new PushConfigHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PUSH_CONFIG, + expiryTimestamp: Date.now() + 60000, + signature: await getAdminSignature(Date.now() + 60000), + config: { rateLimit: currentConfig.rateLimit } + }) + }) + + it('should hide private key in push config response', async function () { + this.timeout(DEFAULT_TEST_TIMEOUT) + + const expiryTimestamp = Date.now() + 60000 + const signature = await getAdminSignature(expiryTimestamp) + + const response = await new PushConfigHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PUSH_CONFIG, + expiryTimestamp, + signature, + config: { rateLimit: 50 } + }) + + expect(response.status.httpStatus).to.equal(200) + + const updatedConfig = await streamToObject(response.stream as Readable) + expect(updatedConfig).to.have.property('keys') + expect(updatedConfig.keys).to.have.property('privateKey') + expect(updatedConfig.keys.privateKey).to.equal('[*** HIDDEN CONTENT ***]') + + await new PushConfigHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PUSH_CONFIG, + expiryTimestamp: Date.now() + 60000, + signature: await getAdminSignature(Date.now() + 60000), + config: { rateLimit: 30 } + }) + }) + + it('should reject push config with signature from non-admin', async function () { + this.timeout(DEFAULT_TEST_TIMEOUT) + + const nonAdminPrivateKey = + '0xef4b441145c1d0f3b4bc6d61d29f5c6e502359481152f869247c7a4244d45209' + const nonAdminAccount = new Wallet(nonAdminPrivateKey) + + const expiryTimestamp = Date.now() + 60000 + const message = expiryTimestamp.toString() + const invalidSignature = await nonAdminAccount.signMessage(message) + + const handlerResponse = await new PushConfigHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PUSH_CONFIG, + expiryTimestamp, + signature: invalidSignature, + config: { rateLimit: 100 } + }) + + expect(handlerResponse.status.httpStatus).to.not.equal(200) + }) + + it('should reject push config with expired timestamp', async function () { + this.timeout(DEFAULT_TEST_TIMEOUT) + + const expiryTimestamp = Date.now() - 60000 + const signature = await getAdminSignature(expiryTimestamp) + + const handlerResponse = await new PushConfigHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PUSH_CONFIG, + expiryTimestamp, + signature, + config: { rateLimit: 100 } + }) + + expect(handlerResponse.status.httpStatus).to.not.equal(200) + }) + + it('should reject push config with missing config parameter', async function () { + this.timeout(DEFAULT_TEST_TIMEOUT) + + const expiryTimestamp = Date.now() + 60000 + const signature = await getAdminSignature(expiryTimestamp) + + const handlerResponse = await new PushConfigHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PUSH_CONFIG, + expiryTimestamp, + signature, + config: undefined + }) + + expect(handlerResponse.status.httpStatus).to.equal(400) + }) + + it('should reject push config with invalid config type', async function () { + this.timeout(DEFAULT_TEST_TIMEOUT) + + const expiryTimestamp = Date.now() + 60000 + const signature = await getAdminSignature(expiryTimestamp) + + const handlerResponse = await new PushConfigHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PUSH_CONFIG, + expiryTimestamp, + signature, + config: 'invalid' as any + }) + + expect(handlerResponse.status.httpStatus).to.equal(400) + }) + + it('should reject push config with invalid field values (Zod validation)', async function () { + this.timeout(DEFAULT_TEST_TIMEOUT) + + const expiryTimestamp = Date.now() + 60000 + const signature = await getAdminSignature(expiryTimestamp) + + const handlerResponse = await new PushConfigHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PUSH_CONFIG, + expiryTimestamp, + signature, + config: { rateLimit: 'not-a-number' as any } + }) + + expect(handlerResponse.status.httpStatus).to.equal(400) + expect(handlerResponse.status.error).to.not.equal(undefined) + expect(handlerResponse.stream).to.equal(null) + }) + }) + + describe('Config Reload Tests', () => { + it('should reload node configuration after push', async function () { + this.timeout(DEFAULT_TEST_TIMEOUT) + + const expiryTimestamp = Date.now() + 60000 + const signature = await getAdminSignature(expiryTimestamp) + + const configBefore = await getConfiguration() + + await new PushConfigHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PUSH_CONFIG, + expiryTimestamp, + signature, + config: { rateLimit: 999 } + }) + + const configAfter = await getConfiguration() + + expect(configAfter.rateLimit).to.equal(999) + expect(configAfter.rateLimit).to.not.equal(configBefore.rateLimit) + + await new PushConfigHandler(oceanNode).handle({ + command: PROTOCOL_COMMANDS.PUSH_CONFIG, + expiryTimestamp: Date.now() + 60000, + signature: await getAdminSignature(Date.now() + 60000), + config: { rateLimit: configBefore.rateLimit } + }) + }) + }) +}) diff --git a/src/test/integration/encryptDecryptDDO.test.ts b/src/test/integration/encryptDecryptDDO.test.ts index 3a25456f8..8ee484558 100644 --- a/src/test/integration/encryptDecryptDDO.test.ts +++ b/src/test/integration/encryptDecryptDDO.test.ts @@ -334,13 +334,7 @@ describe('Should encrypt and decrypt DDO', () => { it('should decrypt ddo with transactionId and return it', async () => { const nonce = Date.now().toString() const wallet = new ethers.Wallet(process.env.PRIVATE_KEY) - const message = String( - txReceiptEncryptDDO.hash + - dataNftAddress + - publisherAddress + - chainId.toString() + - nonce - ) + const message = String(txReceiptEncryptDDO.hash + publisherAddress + chainId + nonce) const messageHash = ethers.solidityPackedKeccak256( ['bytes'], [ethers.hexlify(ethers.toUtf8Bytes(message))] @@ -366,7 +360,7 @@ describe('Should encrypt and decrypt DDO', () => { it('should decrypt ddo with encryptedDocument, flags, documentHash and return it', async () => { const nonce = Date.now().toString() const wallet = new ethers.Wallet(process.env.PRIVATE_KEY) - const message = String(dataNftAddress + publisherAddress + chainId.toString() + nonce) + const message = String(dataNftAddress + publisherAddress + chainId + nonce) const messageHash = ethers.solidityPackedKeccak256( ['bytes'], [ethers.hexlify(ethers.toUtf8Bytes(message))] diff --git a/src/test/integration/getJobs.test.ts b/src/test/integration/getJobs.test.ts new file mode 100644 index 000000000..cc8ca53e6 --- /dev/null +++ b/src/test/integration/getJobs.test.ts @@ -0,0 +1,149 @@ +import { expect } from 'chai' +import { Readable } from 'stream' +import { Database } from '../../components/database/index.js' +import { OceanNode } from '../../OceanNode.js' +import { GetJobsHandler } from '../../components/core/handler/getJobs.js' +import { + C2DStatusNumber, + C2DStatusText, + type DBComputeJob +} from '../../@types/C2D/C2D.js' +import { PROTOCOL_COMMANDS, getConfiguration } from '../../utils/index.js' +import { + DEFAULT_TEST_TIMEOUT, + OverrideEnvConfig, + TEST_ENV_CONFIG_FILE, + setupEnvironment, + tearDownEnvironment +} from '../utils/utils.js' +import { streamToObject } from '../../utils/util.js' + +// Helper to create a minimal valid DBComputeJob +function buildJob(overrides: Partial = {}): DBComputeJob { + const nowSec = Math.floor(Date.now() / 1000).toString() + return { + owner: overrides.owner || '0xowner_test', + did: overrides.did, + jobId: overrides.jobId || `job-${Date.now()}-${Math.random().toString(36).slice(2)}`, + dateCreated: overrides.dateCreated || nowSec, + dateFinished: overrides.dateFinished || (null as unknown as string), + status: overrides.status ?? C2DStatusNumber.JobStarted, + statusText: overrides.statusText || C2DStatusText.JobStarted, + results: overrides.results || [], + inputDID: overrides.inputDID, + algoDID: overrides.algoDID, + maxJobDuration: overrides.maxJobDuration, + agreementId: overrides.agreementId, + environment: overrides.environment || 'env-default', + metadata: overrides.metadata, + terminationDetails: overrides.terminationDetails, + + clusterHash: overrides.clusterHash || '', + configlogURL: overrides.configlogURL || '', + publishlogURL: overrides.publishlogURL || '', + algologURL: overrides.algologURL || '', + outputsURL: overrides.outputsURL || '', + stopRequested: overrides.stopRequested ?? false, + algorithm: overrides.algorithm as any, + assets: overrides.assets || [], + isRunning: overrides.isRunning ?? false, + isStarted: overrides.isStarted ?? true, + containerImage: overrides.containerImage || '', + isFree: overrides.isFree ?? true, + algoStartTimestamp: overrides.algoStartTimestamp || nowSec, + algoStopTimestamp: overrides.algoStopTimestamp || nowSec, + resources: overrides.resources || [], + payment: overrides.payment, + additionalViewers: overrides.additionalViewers || [], + algoDuration: overrides.algoDuration || 0 + } +} + +describe('GetJobsHandler integration', () => { + let previousConfiguration: OverrideEnvConfig[] + let oceanNode: OceanNode + let db: Database + let handler: GetJobsHandler + + const uniqueEnv = `env-it-${Date.now()}` + const ownerA = '0xAa0000000000000000000000000000000000000' + const ownerB = '0xBb0000000000000000000000000000000000000' + + before(async () => { + previousConfiguration = await setupEnvironment(TEST_ENV_CONFIG_FILE) + const config = await getConfiguration(true) + db = await Database.init(config.dbConfig) + oceanNode = await OceanNode.getInstance(config, db) + + handler = new GetJobsHandler(oceanNode) + + const jobA = buildJob({ owner: ownerA, environment: uniqueEnv }) + const jobB = buildJob({ owner: ownerB, environment: uniqueEnv }) + + await db.c2d.newJob(jobA) + await db.c2d.newJob(jobB) + + const finishedAt = Math.floor(Date.now() / 1000).toString() + + jobA.status = C2DStatusNumber.JobFinished + jobA.statusText = C2DStatusText.JobFinished + jobA.dateFinished = finishedAt + jobA.isRunning = false + + jobB.status = C2DStatusNumber.JobFinished + jobB.statusText = C2DStatusText.JobFinished + jobB.dateFinished = finishedAt + jobB.isRunning = false + + await db.c2d.updateJob(jobA) + await db.c2d.updateJob(jobB) + }) + + after(async () => { + await tearDownEnvironment(previousConfiguration) + }) + + it('validate should fail when fromTimestamp is not a string', async () => { + const validation = await handler.validate({ + command: PROTOCOL_COMMANDS.JOBS, + fromTimestamp: 12345 + } as any) + expect(validation.valid).to.be.equal(false) + expect(validation.reason).to.contain('fromTimestamp') + }) + + it('should return finished jobs for a specific environment since timestamp', async function () { + this.timeout(DEFAULT_TEST_TIMEOUT) + + const fromTs = Math.floor(Date.now() / 1000 - 10).toString() + const resp = await handler.handle({ + command: PROTOCOL_COMMANDS.JOBS, + environments: [uniqueEnv], + fromTimestamp: fromTs + }) + + expect(resp.status.httpStatus).to.equal(200) + const jobs = (await streamToObject(resp.stream as Readable)) as any[] + + const filtered = jobs.filter((j) => j.environment === uniqueEnv) + expect(filtered.length).to.be.greaterThanOrEqual(2) + expect(filtered.every((j) => Number(j.dateFinished) >= Number(fromTs))).to.equal(true) + }) + + it('should exclude jobs owned by specified consumer addresses', async function () { + this.timeout(DEFAULT_TEST_TIMEOUT) + + const resp = await handler.handle({ + command: PROTOCOL_COMMANDS.JOBS, + environments: [uniqueEnv], + consumerAddrs: [ownerA] + }) + + expect(resp.status.httpStatus).to.equal(200) + const jobs = (await streamToObject(resp.stream as Readable)) as any[] + + const owners = jobs.filter((j) => j.environment === uniqueEnv).map((j) => j.owner) + expect(owners.includes(ownerA)).to.equal(false) + expect(owners.includes(ownerB)).to.equal(true) + }) +}) diff --git a/src/test/integration/indexer.test.ts b/src/test/integration/indexer.test.ts index c3f3c7ca8..0f1f19a2b 100644 --- a/src/test/integration/indexer.test.ts +++ b/src/test/integration/indexer.test.ts @@ -15,6 +15,7 @@ import ERC721Factory from '@oceanprotocol/contracts/artifacts/contracts/ERC721Fa import ERC721Template from '@oceanprotocol/contracts/artifacts/contracts/templates/ERC721Template.sol/ERC721Template.json' assert { type: 'json' } import ERC20Template from '@oceanprotocol/contracts/artifacts/contracts/templates/ERC20TemplateEnterprise.sol/ERC20TemplateEnterprise.json' assert { type: 'json' } import { Database } from '../../components/database/index.js' +import { DatabaseFactory } from '../../components/database/DatabaseFactory.js' import { INDEXER_CRAWLING_EVENT_EMITTER, OceanIndexer @@ -269,12 +270,10 @@ describe('Indexer stores a new metadata events and orders.', () => { it('should find the state of the ddo using query ddo state handler', async function () { const queryDdoStateHandler = new QueryDdoStateHandler(oceanNode) - // query using the did + const config = await getConfiguration(true) + const queryStrategy = await DatabaseFactory.createDdoStateQuery(config.dbConfig) const queryDdoState: QueryCommand = { - query: { - q: resolvedDDO.id, - query_by: 'did' - }, + query: queryStrategy.buildQuery(resolvedDDO.id), command: PROTOCOL_COMMANDS.QUERY } const response = await queryDdoStateHandler.handle(queryDdoState) @@ -282,14 +281,24 @@ describe('Indexer stores a new metadata events and orders.', () => { assert(response.status.httpStatus === 200, 'Failed to get 200 response') assert(response.stream, 'Failed to get stream') const result = await streamToObject(response.stream as Readable) - if (result) { - // Elastic Search returns Array type - const ddoState = Array.isArray(result) ? result[0] : result.hits[0].document - expect(resolvedDDO.id).to.equal(ddoState.did) - expect(ddoState.valid).to.equal(true) - expect(ddoState.error).to.equal(' ') + assert(result, 'Failed to get result from stream') + + let ddoState + if (Array.isArray(result)) { + assert(result.length > 0, 'No ddo state found in results array') + ddoState = result[0] + } else if (result.hits && Array.isArray(result.hits)) { + assert(result.hits.length > 0, 'No ddo state found in results hits') + ddoState = result.hits[0].document + } else { + assert.fail('Unexpected result format from database') } + assert(ddoState, 'ddoState is undefined') + expect(resolvedDDO.id).to.equal(ddoState.did) + expect(ddoState.valid).to.equal(true) + expect(ddoState.error).to.equal(' ') + // add txId check once we have that as change merged and the event will be indexed }) diff --git a/src/test/unit/commands.test.ts b/src/test/unit/commands.test.ts index d0c79cf23..63bd57852 100644 --- a/src/test/unit/commands.test.ts +++ b/src/test/unit/commands.test.ts @@ -21,7 +21,8 @@ import { NonceCommand, QueryCommand, StatusCommand, - ValidateDDOCommand + ValidateDDOCommand, + GetJobsCommand } from '../../@types/commands.js' import { NonceHandler } from '../../components/core/handler/nonceHandler.js' import { DownloadHandler } from '../../components/core/handler/downloadHandler.js' @@ -48,6 +49,7 @@ import { StopNodeHandler } from '../../components/core/admin/stopNodeHandler.js' import { ReindexTxHandler } from '../../components/core/admin/reindexTxHandler.js' import { ReindexChainHandler } from '../../components/core/admin/reindexChainHandler.js' import { CollectFeesHandler } from '../../components/core/admin/collectFeesHandler.js' +import { GetJobsHandler } from '../../components/core/handler/getJobs.js' describe('Commands and handlers', () => { it('Check that all supported commands have registered handlers', () => { @@ -336,5 +338,15 @@ describe('Commands and handlers', () => { maxJobDuration: 60 } expect(initComputeHandler.validate(computeInitCommand).valid).to.be.equal(false) + // ----------------------------------------- + // JobsHandler + const jobsHandler: GetJobsHandler = CoreHandlersRegistry.getInstance(node).getHandler( + PROTOCOL_COMMANDS.JOBS + ) + const getJobsCommand: GetJobsCommand = { + command: PROTOCOL_COMMANDS.JOBS + } + expect(jobsHandler.validate(getJobsCommand).valid).to.be.equal(true) + // ----------------------------------------- }) }) diff --git a/src/test/unit/compute.test.ts b/src/test/unit/compute.test.ts index bc53cf9cf..31bd3b21e 100644 --- a/src/test/unit/compute.test.ts +++ b/src/test/unit/compute.test.ts @@ -30,7 +30,6 @@ import { import { OceanNodeConfig } from '../../@types/OceanNode.js' import { ENVIRONMENT_VARIABLES } from '../../utils/constants.js' import { dockerImageManifest } from '../data/assets.js' -// import { omitDBComputeFieldsFromComputeJob } from '../../components/c2d/index.js' import { checkManifestPlatform } from '../../components/c2d/compute_engine_docker.js' describe('Compute Jobs Database', () => { @@ -91,11 +90,13 @@ describe('Compute Jobs Database', () => { token: '0x123', lockTx: '0xe2DD09d719Da89e5a3D0F2549c7E24566e947260fdc', claimTx: '0xe2DD09d719Da89e5a3D0F2549c7E24566e947260fdc', - chainId: 8996 + chainId: 8996, + cost: 0 }, isFree: false, algoStartTimestamp: '0', - algoStopTimestamp: '0' + algoStopTimestamp: '0', + algoDuration: 0 } jobId = await db.newJob(job) @@ -159,11 +160,13 @@ describe('Compute Jobs Database', () => { token: '0x123', lockTx: '0xe2DD09d719Da89e5a3D0F2549c7E24566e947260fdc', claimTx: '0xe2DD09d719Da89e5a3D0F2549c7E24566e947260fdc', - chainId: 8996 + chainId: 8996, + cost: 0 }, isFree: false, algoStartTimestamp: '0', - algoStopTimestamp: '0' + algoStopTimestamp: '0', + algoDuration: 0 } const jobId = await db.newJob(job) @@ -200,7 +203,7 @@ describe('Compute Jobs Database', () => { // it('should convert DBComputeJob to ComputeJob and omit internal DB data', () => { // const source: any = completeDBComputeJob // const output: ComputeJob = omitDBComputeFieldsFromComputeJob(source as DBComputeJob) - // console.log('output: ', JSON.stringify(output, null, 2)) + // expect(Object.prototype.hasOwnProperty.call(output, 'clusterHash')).to.be.equal(false) // expect(Object.prototype.hasOwnProperty.call(output, 'configlogURL')).to.be.equal( // false diff --git a/src/test/unit/config.test.ts b/src/test/unit/config.test.ts index fb83efbb6..87a859e4b 100644 --- a/src/test/unit/config.test.ts +++ b/src/test/unit/config.test.ts @@ -1,6 +1,6 @@ import { expect } from 'chai' import { OceanNodeConfig } from '../../@types/OceanNode.js' -import { getConfiguration, loadConfigFromEnv } from '../../utils/config.js' +import { getConfiguration, loadConfigFromFile } from '../../utils/config.js' import { OverrideEnvConfig, TEST_ENV_CONFIG_PATH, @@ -22,12 +22,12 @@ describe('Should validate configuration from JSON', () => { }) it('should get indexer networks from config', () => { - expect(config.indexingNetworks.length).to.be.equal(1) - expect(config.indexingNetworks[0]).to.be.equal(8996) - expect(config.supportedNetworks['8996'].chainId).to.be.equal(8996) - expect(config.supportedNetworks['8996'].rpc).to.be.equal('http://127.0.0.1:8545') - expect(config.supportedNetworks['8996'].network).to.be.equal('development') - expect(config.supportedNetworks['8996'].chunkSize).to.be.equal(100) + expect(Object.keys(config.indexingNetworks).length).to.be.equal(1) + expect(config.indexingNetworks['8996']).to.not.equal(undefined) + expect(config.indexingNetworks['8996'].chainId).to.be.equal(8996) + expect(config.indexingNetworks['8996'].rpc).to.be.equal('http://127.0.0.1:8545') + expect(config.indexingNetworks['8996'].network).to.be.equal('development') + expect(config.indexingNetworks['8996'].chunkSize).to.be.equal(100) }) it('should have indexer', () => { @@ -35,7 +35,7 @@ describe('Should validate configuration from JSON', () => { expect(config.dbConfig).to.not.be.equal(null) // it is exported in the env vars, so it should overwrite the config.json expect(config.dbConfig.dbType).to.be.equal('typesense') - const configFile = loadConfigFromEnv() + const configFile = loadConfigFromFile(process.env.CONFIG_PATH) expect(config.dbConfig.dbType).to.not.be.equal(configFile.dbConfig.dbType) expect(config.dbConfig.url).to.be.equal('http://localhost:8108/?apiKey=xyz') }) diff --git a/src/test/unit/indexer/indexer.test.ts b/src/test/unit/indexer/indexer.test.ts index 2b2ae6178..c8bba1120 100644 --- a/src/test/unit/indexer/indexer.test.ts +++ b/src/test/unit/indexer/indexer.test.ts @@ -95,4 +95,8 @@ describe('OceanIndexer', () => { await tearDownEnvironment(envOverrides) sandbox.restore() }) + after(async () => { + await tearDownEnvironment(envOverrides) + sandbox.restore() + }) }) diff --git a/src/test/unit/networking.test.ts b/src/test/unit/networking.test.ts index 3f6ff5310..d9c98da95 100644 --- a/src/test/unit/networking.test.ts +++ b/src/test/unit/networking.test.ts @@ -72,7 +72,7 @@ describe('Test available network interfaces', () => { null, buildEnvOverrideConfig( [ENVIRONMENT_VARIABLES.INTERFACES], - [JSON.stringify(['p2p'])] + [JSON.stringify(['P2P'])] ) ) const interfaces = JSON.parse(process.env.INTERFACES) as string[] @@ -88,7 +88,7 @@ describe('Test available network interfaces', () => { null, buildEnvOverrideConfig( [ENVIRONMENT_VARIABLES.INTERFACES], - [JSON.stringify(['http'])] + [JSON.stringify(['HTTP'])] ) ) const interfaces = JSON.parse(process.env.INTERFACES) as string[] diff --git a/src/utils/address.ts b/src/utils/address.ts index 9770a6bcc..d4b735ad9 100644 --- a/src/utils/address.ts +++ b/src/utils/address.ts @@ -1,7 +1,7 @@ import fs from 'fs' import addresses from '@oceanprotocol/contracts/addresses/address.json' assert { type: 'json' } import { CORE_LOGGER } from './logging/common.js' -import { ENVIRONMENT_VARIABLES, existsEnvironmentVariable } from './index.js' +import { isDefined } from './index.js' /** * Get the artifacts address from the address.json file @@ -10,9 +10,9 @@ import { ENVIRONMENT_VARIABLES, existsEnvironmentVariable } from './index.js' */ export function getOceanArtifactsAdresses(): any { try { - if (existsEnvironmentVariable(ENVIRONMENT_VARIABLES.ADDRESS_FILE)) { + if (isDefined(process.env.ADDRESS_FILE)) { // eslint-disable-next-line security/detect-non-literal-fs-filename - const data = fs.readFileSync(ENVIRONMENT_VARIABLES.ADDRESS_FILE.value, 'utf8') + const data = fs.readFileSync(process.env.ADDRESS_FILE, 'utf8') return JSON.parse(data) } return addresses @@ -41,10 +41,7 @@ export function getOceanArtifactsAdressesByChainId(chain: number): any { } } // just warn about this missing configuration if running locally - if ( - chain === DEVELOPMENT_CHAIN_ID && - !existsEnvironmentVariable(ENVIRONMENT_VARIABLES.ADDRESS_FILE, true) - ) { + if (chain === DEVELOPMENT_CHAIN_ID && !isDefined(process.env.ADDRESS_FILE)) { CORE_LOGGER.warn( 'Cannot find contract artifacts addresses for "development" chain. Please set the "ADDRESS_FILE" environmental variable!' ) diff --git a/src/utils/asset.ts b/src/utils/asset.ts index 10f86abec..de19ec94b 100644 --- a/src/utils/asset.ts +++ b/src/utils/asset.ts @@ -52,7 +52,8 @@ export async function fetchFileMetadata( const response = await axios({ url, method: method || 'get', - responseType: 'stream' + responseType: 'stream', + timeout: 30000 }) contentType = response.headers['content-type'] let totalSize = 0 diff --git a/src/utils/config.ts b/src/utils/config.ts index 7c10b95ce..f8a538302 100644 --- a/src/utils/config.ts +++ b/src/utils/config.ts @@ -1,1285 +1,10 @@ -import type { - DenyList, - OceanNodeConfig, - OceanNodeKeys, - AccessListContract -} from '../@types/OceanNode' -import { dhtFilterMethod } from '../@types/OceanNode.js' -import type { C2DClusterInfo, C2DDockerConfig } from '../@types/C2D/C2D.js' -import { C2DClusterType } from '../@types/C2D/C2D.js' -import { createFromPrivKey } from '@libp2p/peer-id-factory' -import { keys } from '@libp2p/crypto' -import { - computeCodebaseHash, - DEFAULT_RATE_LIMIT_PER_MINUTE, - ENVIRONMENT_VARIABLES, - EnvVariable, - hexStringToByteArray -} from '../utils/index.js' -import { - DEFAULT_MAX_CONNECTIONS_PER_MINUTE, - defaultBootstrapAddresses, - knownUnsafeURLs -} from '../utils/constants.js' - -import { LOG_LEVELS_STR, GENERIC_EMOJIS, getLoggerLevelEmoji } from './logging/Logger.js' -import { RPCS } from '../@types/blockchain' -import { getAddress, Wallet } from 'ethers' -import { FeeAmount, FeeStrategy, FeeTokens } from '../@types/Fees' -import { - getOceanArtifactsAdresses, - OCEAN_ARTIFACTS_ADDRESSES_PER_CHAIN -} from '../utils/address.js' -import { CONFIG_LOGGER } from './logging/common.js' -import { create256Hash } from './crypt.js' import { isDefined } from './util.js' -import { fileURLToPath } from 'url' -import path from 'path' -import fs from 'fs' -import os from 'os' -import { z } from 'zod' - -const AccessListContractSchema = z.any() -const OceanNodeKeysSchema = z.any() - -const OceanNodeDBConfigSchema = z.any() -const FeeStrategySchema = z.any() -const RPCSSchema = z.any() -const C2DClusterInfoSchema = z.any() -const DenyListSchema = z.any() - -const OceanNodeP2PConfigSchema = z.object({ - bootstrapNodes: z.array(z.string()).optional().default(defaultBootstrapAddresses), - bootstrapTimeout: z.number().int().optional().default(2000), - bootstrapTagName: z.string().optional().default('bootstrap'), - bootstrapTagValue: z.number().int().optional().default(50), - enableIPV4: z.boolean().optional().default(true), - enableIPV6: z.boolean().optional().default(true), - ipV4BindAddress: z.string().optional().default('0.0.0.0'), - ipV4BindTcpPort: z.number().int().optional().default(0), - ipV4BindWsPort: z.number().int().optional().default(0), - ipV6BindAddress: z.string().optional().default('::1'), - ipV6BindTcpPort: z.number().int().optional().default(0), - ipV6BindWsPort: z.number().int().optional().default(0), - pubsubPeerDiscoveryInterval: z.number().int().optional().default(1000), - dhtMaxInboundStreams: z.number().int().optional().default(500), - dhtMaxOutboundStreams: z.number().int().optional().default(500), - mDNSInterval: z.number().int().optional().default(20e3), - connectionsMaxParallelDials: z.number().int().optional().default(15), - connectionsDialTimeout: z.number().int().optional().default(30e3), - upnp: z.boolean().optional().default(true), - autoNat: z.boolean().optional().default(true), - enableCircuitRelayServer: z.boolean().optional().default(false), - enableCircuitRelayClient: z.boolean().optional().default(false), - circuitRelays: z.number().int().optional().default(0), - announcePrivateIp: z.boolean().optional().default(false), - filterAnnouncedAddresses: z - .array(z.string()) - .optional() - .default([ - '127.0.0.0/8', - '10.0.0.0/8', - '172.16.0.0/12', - '192.168.0.0/16', - '100.64.0.0/10', - '169.254.0.0/16', - '192.0.0.0/24', - '192.0.2.0/24', - '198.51.100.0/24', - '203.0.113.0/24', - '224.0.0.0/4', - '240.0.0.0/4' - ]), - minConnections: z.number().int().optional().default(1), - maxConnections: z.number().int().optional().default(300), - autoDialPeerRetryThreshold: z.number().int().optional().default(120000), - autoDialConcurrency: z.number().int().optional().default(5), - maxPeerAddrsToDial: z.number().int().optional().default(5), - autoDialInterval: z.number().int().optional().default(5000), - enableNetworkStats: z.boolean().optional().default(false) -}) - -export const OceanNodeConfigSchema = z.object({ - authorizedDecrypters: z.array(z.string()), - authorizedDecryptersList: AccessListContractSchema.nullable(), - allowedValidators: z.array(z.string()), - allowedValidatorsList: AccessListContractSchema.nullable(), - authorizedPublishers: z.array(z.string()), - authorizedPublishersList: AccessListContractSchema.nullable(), - - keys: OceanNodeKeysSchema, - - hasP2P: z.boolean(), - p2pConfig: OceanNodeP2PConfigSchema.nullable(), - hasIndexer: z.boolean(), - hasHttp: z.boolean(), - hasControlPanel: z.boolean(), - - dbConfig: OceanNodeDBConfigSchema.optional(), - - httpPort: z.number().int(), - rateLimit: z.union([z.number(), z.object({})]).optional(), - feeStrategy: FeeStrategySchema, - - supportedNetworks: RPCSSchema.optional(), - - claimDurationTimeout: z.number().int().default(600), - indexingNetworks: RPCSSchema.optional(), - - c2dClusters: z.array(C2DClusterInfoSchema), - c2dNodeUri: z.string(), - accountPurgatoryUrl: z.string(), - assetPurgatoryUrl: z.string(), - - allowedAdmins: z.array(z.string()).optional(), - allowedAdminsList: AccessListContractSchema.nullable().optional(), - - codeHash: z.string().optional(), - maxConnections: z.number().optional(), - denyList: DenyListSchema.optional(), - unsafeURLs: z.array(z.string()).optional().default([ - // AWS and GCP - '^.*(169.254.169.254).*', - // GCP - '^.*(metadata.google.internal).*', - '^.*(http://metadata).*', - // Azure - '^.*(http://169.254.169.254).*', - // Oracle Cloud - '^.*(http://192.0.0.192).*', - // Alibaba Cloud - '^.*(http://100.100.100.200).*', - // k8s ETCD - '^.*(127.0.0.1).*' - ]), - isBootstrap: z.boolean().optional().default(false), - validateUnsignedDDO: z.boolean().optional().default(true), - jwtSecret: z.string().optional() -}) - -export type OceanNodeConfigParsed = z.infer -// usefull for lazy loading and avoid boilerplate on other places -let previousConfiguration: OceanNodeConfig = null - -export async function getPeerIdFromPrivateKey( - privateKey: string -): Promise { - const key = new keys.supportedKeys.secp256k1.Secp256k1PrivateKey( - hexStringToByteArray(privateKey.slice(2)) - ) - - return { - peerId: await createFromPrivKey(key), - publicKey: key.public.bytes, - // Notes: - // using 'key.public.bytes' gives extra 4 bytes: 08021221 - // using (key as any)._publicKey is stripping this same 4 bytes at the beginning: 08021221 - // when getting the peer details with 'peerIdFromString(peerName)' it returns the version with the 4 extra bytes - // and we also need to send that to the client, so he can uncompress the public key correctly and perform the check and the encryption - // so it would make more sense to use this value on the configuration - privateKey: (key as any)._key, - ethAddress: new Wallet(privateKey.substring(2)).address - } -} - -function getEnvValue(env: any, defaultValue: any) { - /* Gets value for an ENV var, returning defaultValue if not defined */ - if (env === null || env === undefined || (env as string).length === 0) { - return defaultValue - } - return env as string -} - -function getIntEnvValue(env: any, defaultValue: number) { - /* Gets int value for an ENV var, returning defaultValue if not defined */ - const num = parseInt(env, 10) - return isNaN(num) ? defaultValue : num -} - -export function getBoolEnvValue(envName: string, defaultValue: boolean): boolean { - if (!(envName in process.env)) { - return defaultValue - } - if ( - process.env[envName] === 'true' || - process.env[envName] === '1' || - process.env[envName]?.toLowerCase() === 'yes' - ) { - return true - } - return false -} - -function getSupportedChains(): RPCS | null { - const logError = function (): null { - // missing or invalid RPC list - CONFIG_LOGGER.logMessageWithEmoji( - 'Missing or Invalid RPCS env variable format, Running node without the Indexer component...', - true, - GENERIC_EMOJIS.EMOJI_CROSS_MARK, - LOG_LEVELS_STR.LEVEL_ERROR - ) - return null - } - if (!process.env.RPCS) { - return logError() - } - let supportedNetworks: RPCS = null - try { - supportedNetworks = JSON.parse(process.env.RPCS) - } catch (e) { - return logError() - } - - return supportedNetworks -} - -function getIndexingNetworks(supportedNetworks: RPCS): RPCS | null { - const indexerNetworksEnv = process.env.INDEXER_NETWORKS - - const defaultErrorMsg = - 'Missing or invalid "INDEXER_NETWORKS" variable. Running Indexer with all supported networks defined in RPCS env variable...' - if (!indexerNetworksEnv) { - CONFIG_LOGGER.logMessageWithEmoji( - defaultErrorMsg, - true, - GENERIC_EMOJIS.EMOJI_CHECK_MARK, - LOG_LEVELS_STR.LEVEL_INFO - ) - return supportedNetworks - } - try { - const indexerNetworks: number[] = JSON.parse(indexerNetworksEnv) - - // env var exists but is wrong, so it does not index anything, but we still log the error - if (indexerNetworks.length === 0) { - CONFIG_LOGGER.logMessageWithEmoji( - '"INDEXER_NETWORKS" is an empty array, Running node without the Indexer component...', - true, - GENERIC_EMOJIS.EMOJI_CROSS_MARK, - LOG_LEVELS_STR.LEVEL_ERROR - ) - return null - } - - // Use reduce to filter supportedNetworks - const filteredNetworks = indexerNetworks.reduce((acc: RPCS, chainId) => { - if (supportedNetworks[chainId]) { - acc[chainId] = supportedNetworks[chainId] - } - return acc - }, {}) - - // if variables are not aligned we might end up not running indexer at all, so at least we should log a warning - if (Object.keys(filteredNetworks).length === 0) { - CONFIG_LOGGER.logMessageWithEmoji( - `"RPCS" chains: "${Object.keys( - supportedNetworks - )}" and "INDEXER_NETWORKS" chains: "${indexerNetworks}" mismatch! Running node without the Indexer component...`, - true, - GENERIC_EMOJIS.EMOJI_CROSS_MARK, - LOG_LEVELS_STR.LEVEL_ERROR - ) - } - return filteredNetworks - } catch (e) { - CONFIG_LOGGER.logMessageWithEmoji( - defaultErrorMsg, - true, - GENERIC_EMOJIS.EMOJI_CROSS_MARK, - LOG_LEVELS_STR.LEVEL_ERROR - ) - return supportedNetworks - } -} -// valid publishers (what we will index) -function getAuthorizedPublishers(isStartup?: boolean): string[] { - if (existsEnvironmentVariable(ENVIRONMENT_VARIABLES.AUTHORIZED_PUBLISHERS, isStartup)) { - return readAddressListFromEnvVariable( - ENVIRONMENT_VARIABLES.AUTHORIZED_PUBLISHERS, - isStartup - ) - } - return [] -} - -function getAuthorizedPublishersList(isStartup?: boolean): AccessListContract | null { - if ( - existsEnvironmentVariable(ENVIRONMENT_VARIABLES.AUTHORIZED_PUBLISHERS_LIST, isStartup) - ) { - try { - const publisherAccessList = JSON.parse( - ENVIRONMENT_VARIABLES.AUTHORIZED_PUBLISHERS_LIST.value - ) as AccessListContract - return publisherAccessList - } catch (err) { - CONFIG_LOGGER.error(err.message) - } - } - return null -} -// valid decrypthers -function getAuthorizedDecrypters(isStartup?: boolean): string[] { - return readAddressListFromEnvVariable( - ENVIRONMENT_VARIABLES.AUTHORIZED_DECRYPTERS, - isStartup - ) -} - -function getAuthorizedDecryptersList(isStartup?: boolean): AccessListContract | null { - if ( - existsEnvironmentVariable(ENVIRONMENT_VARIABLES.AUTHORIZED_DECRYPTERS_LIST, isStartup) - ) { - try { - const decryptersAccessList = JSON.parse( - ENVIRONMENT_VARIABLES.AUTHORIZED_DECRYPTERS_LIST.value - ) as AccessListContract - return decryptersAccessList - } catch (err) { - CONFIG_LOGGER.error(err.message) - } - } - return null -} -// allowed validators -export function getAllowedValidators(isStartup?: boolean): string[] { - return readAddressListFromEnvVariable( - ENVIRONMENT_VARIABLES.ALLOWED_VALIDATORS, - isStartup - ) -} - -function getAllowedValidatorsList(isStartup?: boolean): AccessListContract | null { - if ( - existsEnvironmentVariable(ENVIRONMENT_VARIABLES.ALLOWED_VALIDATORS_LIST, isStartup) - ) { - try { - const publisherAccessList = JSON.parse( - ENVIRONMENT_VARIABLES.ALLOWED_VALIDATORS_LIST.value - ) as AccessListContract - return publisherAccessList - } catch (err) { - CONFIG_LOGGER.error(err.message) - } - } - return null -} -// valid node admins -function getAllowedAdmins(isStartup?: boolean): string[] { - return readAddressListFromEnvVariable(ENVIRONMENT_VARIABLES.ALLOWED_ADMINS, isStartup) -} - -function getAllowedAdminsList(isStartup?: boolean): AccessListContract | null { - if (existsEnvironmentVariable(ENVIRONMENT_VARIABLES.ALLOWED_ADMINS_LIST, isStartup)) { - try { - const adminAccessList = JSON.parse( - ENVIRONMENT_VARIABLES.ALLOWED_ADMINS_LIST.value - ) as AccessListContract - return adminAccessList - } catch (err) { - CONFIG_LOGGER.error(err.message) - } - } - return null -} - -// whenever we want to read an array of strings from an env variable, use this common function -function readListFromEnvVariable( - envVariable: any, - isStartup?: boolean, - defaultValue: string[] = [] -): string[] { - const { name } = envVariable - try { - if (!existsEnvironmentVariable(envVariable, isStartup)) { - return defaultValue - } - const addressesRaw: string[] = JSON.parse(process.env[name]) - if (!Array.isArray(addressesRaw)) { - CONFIG_LOGGER.logMessageWithEmoji( - `Invalid ${name} env variable format`, - true, - GENERIC_EMOJIS.EMOJI_CROSS_MARK, - LOG_LEVELS_STR.LEVEL_ERROR - ) - return defaultValue - } - return addressesRaw - } catch (error) { - CONFIG_LOGGER.logMessageWithEmoji( - `Missing or Invalid address(es) in ${name} env variable`, - true, - GENERIC_EMOJIS.EMOJI_CROSS_MARK, - LOG_LEVELS_STR.LEVEL_ERROR - ) - return defaultValue - } -} - -// whenever we want to read an array of addresses from an env variable, use this common function -function readAddressListFromEnvVariable(envVariable: any, isStartup?: boolean): string[] { - const addressesRaw: string[] = readListFromEnvVariable(envVariable, isStartup) - return addressesRaw.map((address) => getAddress(address)) -} -/** - * get default values for provider fee tokens - * @param supportedNetworks chains that we support - * @returns ocean fees token - */ -function getDefaultFeeTokens(supportedNetworks: RPCS): FeeTokens[] { - const nodeFeesTokens: FeeTokens[] = [] - let addressesData: any = getOceanArtifactsAdresses() - if (!addressesData) { - addressesData = OCEAN_ARTIFACTS_ADDRESSES_PER_CHAIN - } - // check if we have configured anything ourselves - const hasSupportedNetworks = - supportedNetworks && Object.keys(supportedNetworks).length > 0 - // check if we have it supported - Object.keys(addressesData).forEach((chain: any) => { - const chainName = chain as string - const { chainId, Ocean } = addressesData[chainName] - - // if we have set the supported chains, we use those chains/tokens - if (hasSupportedNetworks) { - // check if exists the correct one to add - const keyId: string = chainId as string - const chainInfo: any = supportedNetworks[keyId] - if (chainInfo) { - nodeFeesTokens.push({ - chain: keyId, - token: Ocean - }) - } - } else { - // otherwise, we add all we know about - nodeFeesTokens.push({ - chain: chainId as string, - token: Ocean - }) - } - }) - return nodeFeesTokens -} - -// parse fees structure from .env -/** - * - * @param supportedNetworks networks supported - * @param isStartup boolean to avoid logging too much - * @returns Fees structure - */ -function getOceanNodeFees(supportedNetworks: RPCS, isStartup?: boolean): FeeStrategy { - const logError = () => { - CONFIG_LOGGER.logMessageWithEmoji( - 'Error parsing Fee Strategy! Please check "FEE_TOKENS" and "FEE_AMOUNT" env variables. Will use defaults...', - true, - GENERIC_EMOJIS.EMOJI_CROSS_MARK, - LOG_LEVELS_STR.LEVEL_ERROR - ) - } - let nodeFeesAmount: FeeAmount - let nodeFeesTokens: FeeTokens[] = [] - try { - // if not exists, just use defaults - if (!existsEnvironmentVariable(ENVIRONMENT_VARIABLES.FEE_AMOUNT)) { - if (isStartup) { - logMissingVariableWithDefault(ENVIRONMENT_VARIABLES.FEE_AMOUNT) - } - - nodeFeesAmount = { amount: 0, unit: 'MB' } - } else { - nodeFeesAmount = JSON.parse(process.env.FEE_AMOUNT) as FeeAmount - } - if (!existsEnvironmentVariable(ENVIRONMENT_VARIABLES.FEE_TOKENS)) { - // try to get first for artifacts address if available - if (isStartup) { - logMissingVariableWithDefault(ENVIRONMENT_VARIABLES.FEE_TOKENS) - } - - nodeFeesTokens = getDefaultFeeTokens(supportedNetworks) - } else { - const tokens = JSON.parse(ENVIRONMENT_VARIABLES.FEE_TOKENS.value) - Object.keys(tokens).forEach((key: any) => { - nodeFeesTokens.push({ - chain: key as string, - token: tokens[key] - }) - }) - } - - return { - feeTokens: nodeFeesTokens, - feeAmount: nodeFeesAmount - } - } catch (error) { - if (isStartup) { - logError() - } - // make sure we always return something usable - return { - feeTokens: nodeFeesTokens.length - ? nodeFeesTokens - : getDefaultFeeTokens(supportedNetworks), - feeAmount: nodeFeesAmount || { amount: 0, unit: 'MB' } - } - } -} - -// get C2D environments -function getC2DClusterEnvironment(isStartup?: boolean): C2DClusterInfo[] { - const clusters: C2DClusterInfo[] = [] - // avoid log too much (too much noise on tests as well), this is not even required - if (existsEnvironmentVariable(ENVIRONMENT_VARIABLES.OPERATOR_SERVICE_URL, isStartup)) { - try { - const clustersURLS: string[] = JSON.parse( - process.env.OPERATOR_SERVICE_URL - ) as string[] - - for (const theURL of clustersURLS) { - clusters.push({ - connection: theURL, - hash: create256Hash(theURL), - type: C2DClusterType.OPF_K8 - }) - } - } catch (error) { - CONFIG_LOGGER.logMessageWithEmoji( - `Invalid or missing "${ENVIRONMENT_VARIABLES.OPERATOR_SERVICE_URL.name}" env variable => ${process.env.OPERATOR_SERVICE_URL}...`, - true, - GENERIC_EMOJIS.EMOJI_CROSS_MARK, - LOG_LEVELS_STR.LEVEL_ERROR - ) - } - } - const dockerC2Ds = getDockerComputeEnvironments(isStartup) - for (const dockerC2d of dockerC2Ds) { - if (dockerC2d.socketPath || dockerC2d.host) { - const hash = create256Hash(JSON.stringify(dockerC2d)) - // get env values - clusters.push({ - connection: dockerC2d, - hash, - type: C2DClusterType.DOCKER, - tempFolder: './c2d_storage/' + hash - }) - } - } - - return clusters -} - -/** - * Reads a partial ComputeEnvironment setting (array of) - * @param isStartup for logging purposes - * @returns - * - * example: - * { - "cpuNumber": 2, - "ramGB": 4, - "diskGB": 10, - "desc": "2Cpu,2gbRam - price 1 OCEAN/minute, max 1 hour", - "maxJobs": 10, - "storageExpiry": 36000, - "maxJobDuration": 3600, - "chainId": 1, - "feeToken": "0x967da4048cD07aB37855c090aAF366e4ce1b9F48", - "priceMin": 1 - }, - */ -function getDockerComputeEnvironments(isStartup?: boolean): C2DDockerConfig[] { - const dockerC2Ds: C2DDockerConfig[] = [] - if ( - existsEnvironmentVariable( - ENVIRONMENT_VARIABLES.DOCKER_COMPUTE_ENVIRONMENTS, - isStartup - ) - ) { - try { - const configs: C2DDockerConfig[] = JSON.parse( - process.env.DOCKER_COMPUTE_ENVIRONMENTS - ) as C2DDockerConfig[] - - for (const config of configs) { - let errors = '' - if (!isDefined(config.fees)) { - errors += ' There is no fees configuration!' - } - - if (config.storageExpiry < config.maxJobDuration) { - errors += ' "storageExpiry" should be greater than "maxJobDuration"! ' - } - // for docker there is no way of getting storage space - let foundDisk = false - if ('resources' in config) { - for (const resource of config.resources) { - if (resource.id === 'disk' && resource.total) { - foundDisk = true - resource.type = 'disk' - } - } - } - if (!foundDisk) { - errors += ' There is no "disk" resource configured.This is mandatory ' - } - if (errors.length > 1) { - CONFIG_LOGGER.error( - 'Please check your compute env settings: ' + - errors + - 'for env: ' + - JSON.stringify(config) - ) - } else { - dockerC2Ds.push(config) - } - } - return dockerC2Ds - } catch (error) { - CONFIG_LOGGER.logMessageWithEmoji( - `Invalid "${ENVIRONMENT_VARIABLES.DOCKER_COMPUTE_ENVIRONMENTS.name}" env variable => ${process.env.DOCKER_COMPUTE_ENVIRONMENTS}...`, - true, - GENERIC_EMOJIS.EMOJI_CROSS_MARK, - LOG_LEVELS_STR.LEVEL_ERROR - ) - console.log(error) - } - } else if (isStartup) { - CONFIG_LOGGER.warn( - `No options for ${ENVIRONMENT_VARIABLES.DOCKER_COMPUTE_ENVIRONMENTS.name} were specified.` - ) - } - return [] -} - -// connect interfaces (p2p or/and http) -function getNodeInterfaces(isStartup: boolean = false) { - let interfaces: string[] = ['P2P', 'HTTP'] - if (!existsEnvironmentVariable(ENVIRONMENT_VARIABLES.INTERFACES)) { - if (isStartup) { - logMissingVariableWithDefault(ENVIRONMENT_VARIABLES.INTERFACES) - } - } else { - try { - interfaces = JSON.parse(process.env.INTERFACES) as string[] - if (interfaces.length === 0) { - return ['P2P', 'HTTP'] - } - } catch (err) { - CONFIG_LOGGER.logMessageWithEmoji( - `Invalid "${ENVIRONMENT_VARIABLES.INTERFACES.name}" env variable => ${process.env.INTERFACES}. Will use defaults...`, - true, - GENERIC_EMOJIS.EMOJI_CROSS_MARK, - LOG_LEVELS_STR.LEVEL_ERROR - ) - } - } - // make it case insensitive - return interfaces.map((iface: string) => { - return iface.toUpperCase() - }) -} +import { getConfiguration } from './config/builder.js' -/** - * checks if a var is defined on env - * @param envVariable check utils/constants ENVIRONMENT_VARIABLES - * @param hasDefault if true we ignore if not set - * @returns boolean - */ -export function existsEnvironmentVariable(envVariable: any, log = false): boolean { - let { name, value, required } = envVariable - // extra check in case we change environment with tests (get the latest) - if (process.env[name] !== value) { - value = process.env[name] - } - if (!value) { - if (log) { - CONFIG_LOGGER.logMessageWithEmoji( - `Invalid or missing "${name}" env variable...`, - true, - required - ? GENERIC_EMOJIS.EMOJI_CROSS_MARK - : getLoggerLevelEmoji(LOG_LEVELS_STR.LEVEL_WARN), - required ? LOG_LEVELS_STR.LEVEL_ERROR : LOG_LEVELS_STR.LEVEL_WARN - ) - } +export * from './config/index.js' - return false - } - return true -} - -function logMissingVariableWithDefault(envVariable: EnvVariable) { - CONFIG_LOGGER.log( - LOG_LEVELS_STR.LEVEL_WARN, - `Missing "${envVariable.name}" env variable. Will use defaults...`, - true - ) -} -// have a rate limit for handler calls (per IP address or peer id) -function getRateLimit(isStartup: boolean = false) { - if (!existsEnvironmentVariable(ENVIRONMENT_VARIABLES.MAX_REQ_PER_MINUTE)) { - if (isStartup) { - logMissingVariableWithDefault(ENVIRONMENT_VARIABLES.MAX_REQ_PER_MINUTE) - } - return DEFAULT_RATE_LIMIT_PER_MINUTE - } else { - try { - return getIntEnvValue(process.env.MAX_REQ_PER_MINUTE, DEFAULT_RATE_LIMIT_PER_MINUTE) - } catch (err) { - CONFIG_LOGGER.error( - `Invalid "${ENVIRONMENT_VARIABLES.MAX_REQ_PER_MINUTE.name}" env variable...` - ) - return DEFAULT_RATE_LIMIT_PER_MINUTE - } - } -} - -// Global requests limit -function getConnectionsLimit(isStartup: boolean = false) { - if (!existsEnvironmentVariable(ENVIRONMENT_VARIABLES.MAX_CONNECTIONS_PER_MINUTE)) { - if (isStartup) { - logMissingVariableWithDefault(ENVIRONMENT_VARIABLES.MAX_CONNECTIONS_PER_MINUTE) - } - return DEFAULT_RATE_LIMIT_PER_MINUTE - } else { - try { - return getIntEnvValue( - process.env.MAX_CONNECTIONS_PER_MINUTE, - DEFAULT_MAX_CONNECTIONS_PER_MINUTE - ) - } catch (err) { - CONFIG_LOGGER.error( - `Invalid "${ENVIRONMENT_VARIABLES.MAX_CONNECTIONS_PER_MINUTE.name}" env variable...` - ) - return DEFAULT_MAX_CONNECTIONS_PER_MINUTE - } - } -} - -// get blocked ips and peer ids -function getDenyList(isStartup: boolean = false): DenyList { - const defaultDenyList: DenyList = { - peers: [], - ips: [] - } - if (!existsEnvironmentVariable(ENVIRONMENT_VARIABLES.RATE_DENY_LIST, isStartup)) { - return defaultDenyList - } else { - try { - const list: DenyList = JSON.parse(process.env.RATE_DENY_LIST) as DenyList - return list - } catch (err) { - CONFIG_LOGGER.error( - `Invalid "${ENVIRONMENT_VARIABLES.RATE_DENY_LIST.name}" env variable...` - ) - return defaultDenyList - } - } -} - -// lazy access ocean node config, when we don't need updated values from process.env -// this only goes through .env processing once (more suitable for a running node instance) -export async function getConfiguration( - forceReload: boolean = false, - isStartup: boolean = false -): Promise { - if (!previousConfiguration || forceReload) { - if (!existsEnvironmentVariable(ENVIRONMENT_VARIABLES.CONFIG_PATH)) { - previousConfiguration = await getEnvConfig(isStartup) - } else { - previousConfiguration = buildMergedConfig() - } - } - if (!previousConfiguration.codeHash) { - const __filename = fileURLToPath(import.meta.url) - const __dirname = path.dirname(__filename.replace('utils/', '')) - previousConfiguration.codeHash = await computeCodebaseHash(__dirname) - } - - return previousConfiguration -} - -export function loadConfigFromEnv(envVar: string = 'CONFIG_PATH'): OceanNodeConfig { - let configPath = process.env[envVar] - if (!configPath) { - if (!fs.existsSync(path.join(process.cwd(), 'config.json'))) { - throw new Error( - `Config file not found. Neither environment variable "${envVar}" is set nor does ${configPath} exist.` - ) - } - configPath = path.join(process.cwd(), 'config.json') - } - // Expand $HOME if present - if (configPath.startsWith('$HOME')) { - const home = process.env.HOME || os.homedir() - if (!home) { - throw new Error( - `"${envVar}" contains $HOME but HOME is not set in the environment.` - ) - } - configPath = path.join(home, configPath.slice('$HOME'.length)) - } - - if (!path.isAbsolute(configPath)) { - throw new Error( - `Environment variable "${envVar}" must be an absolute path. Got: ${configPath}` - ) - } - - if (!fs.existsSync(configPath)) { - throw new Error(`Config file not found at path: ${configPath}`) - } - - const privateKey = process.env.PRIVATE_KEY - if (!privateKey || privateKey.length !== 66) { - // invalid private key - CONFIG_LOGGER.logMessageWithEmoji( - 'Invalid PRIVATE_KEY env variable..', - true, - GENERIC_EMOJIS.EMOJI_CROSS_MARK, - LOG_LEVELS_STR.LEVEL_ERROR - ) - return null - } - - const rawData = fs.readFileSync(configPath, 'utf-8') - let config: OceanNodeConfig - - try { - config = JSON.parse(rawData) - } catch (err) { - throw new Error(`Invalid JSON in config file: ${configPath}. Error: ${err.message}`) - } - if (!previousConfiguration) { - previousConfiguration = config - } else if (configChanged(previousConfiguration, config)) { - CONFIG_LOGGER.warn( - 'Detected Ocean Node Configuration change... This might have unintended effects' - ) - } - return config -} - -const parseJsonEnv = (env: string | undefined, fallback: T): T => { - try { - return env ? JSON.parse(env) : fallback - } catch { - return fallback - } -} - -export function buildMergedConfig(): OceanNodeConfig { - const baseConfig = loadConfigFromEnv() - - let dhtFilterOption - switch (parseInt(process.env.P2P_DHT_FILTER, 0)) { - case 1: - dhtFilterOption = dhtFilterMethod.filterPrivate - break - case 2: - dhtFilterOption = dhtFilterMethod.filterPublic - break - default: - dhtFilterOption = dhtFilterMethod.filterNone - } - - const privateKey = process.env.PRIVATE_KEY - if (!privateKey || privateKey.length !== 66) { - // invalid private key - CONFIG_LOGGER.logMessageWithEmoji( - 'Invalid PRIVATE_KEY env variable..', - true, - GENERIC_EMOJIS.EMOJI_CROSS_MARK, - LOG_LEVELS_STR.LEVEL_ERROR - ) - return null - } - - const overrides: Partial = { - ...(process.env.JWT_SECRET && { jwtSecret: process.env.JWT_SECRET }), - ...(process.env.DB_URL && { - dbConfig: { - url: process.env.DB_URL, - username: process.env.DB_USERNAME ?? baseConfig.dbConfig?.username ?? '', - password: process.env.DB_PASSWORD ?? baseConfig.dbConfig?.password ?? '', - dbType: process.env.DB_TYPE ?? baseConfig.dbConfig?.dbType ?? 'elasticsearch' - } - }), - authorizedDecrypters: process.env.AUTHORIZED_DECRYPTERS - ? getAuthorizedDecrypters(true) - : baseConfig.authorizedDecrypters, - - authorizedDecryptersList: process.env.AUTHORIZED_DECRYPTERS_LIST - ? getAuthorizedDecryptersList(true) - : baseConfig.authorizedDecryptersList, - - allowedValidators: process.env.ALLOWED_VALIDATORS - ? getAllowedValidators(true) - : baseConfig.allowedValidators, - - allowedValidatorsList: process.env.ALLOWED_VALIDATORS_LIST - ? getAllowedValidatorsList(true) - : baseConfig.allowedValidatorsList, - - authorizedPublishers: process.env.ALLOWED_ADMINS - ? getAuthorizedPublishers(true) - : baseConfig.authorizedPublishers, - - authorizedPublishersList: process.env.ALLOWED_ADMINS_LIST - ? getAuthorizedPublishersList(true) - : baseConfig.authorizedPublishersList, - - ...(process.env.HTTP_API_PORT && { httpPort: Number(process.env.HTTP_API_PORT) }), - - p2pConfig: { - ...baseConfig.p2pConfig, - - bootstrapNodes: parseJsonEnv( - process.env.P2P_BOOTSTRAP_NODES, - baseConfig.p2pConfig?.bootstrapNodes ?? [] - ), - bootstrapTimeout: process.env.P2P_BOOTSTRAP_TIMEOUT - ? parseInt(process.env.P2P_BOOTSTRAP_TIMEOUT, 10) - : baseConfig.p2pConfig?.bootstrapTimeout, - bootstrapTagName: - process.env.P2P_BOOTSTRAP_TAGNAME ?? baseConfig.p2pConfig?.bootstrapTagName, - bootstrapTagValue: process.env.P2P_BOOTSTRAP_TAGVALUE - ? parseInt(process.env.P2P_BOOTSTRAP_TAGVALUE, 10) - : baseConfig.p2pConfig?.bootstrapTagValue, - bootstrapTTL: process.env.P2P_BOOTSTRAP_TTL - ? parseInt(process.env.P2P_BOOTSTRAP_TTL, 10) - : baseConfig.p2pConfig?.bootstrapTTL, - - enableIPV4: process.env.P2P_ENABLE_IPV4 - ? process.env.P2P_ENABLE_IPV4 === 'true' - : baseConfig.p2pConfig?.enableIPV4, - enableIPV6: process.env.P2P_ENABLE_IPV6 - ? process.env.P2P_ENABLE_IPV6 === 'true' - : baseConfig.p2pConfig?.enableIPV6, - - ipV4BindAddress: - process.env.P2P_IP_V4_BIND_ADDRESS ?? baseConfig.p2pConfig?.ipV4BindAddress, - ipV4BindTcpPort: process.env.P2P_IP_V4_BIND_TCP_PORT - ? parseInt(process.env.P2P_IP_V4_BIND_TCP_PORT, 10) - : baseConfig.p2pConfig?.ipV4BindTcpPort, - ipV4BindWsPort: process.env.P2P_IP_V4_BIND_WS_PORT - ? parseInt(process.env.P2P_IP_V4_BIND_WS_PORT, 10) - : baseConfig.p2pConfig?.ipV4BindWsPort, - - ipV6BindAddress: - process.env.P2P_IP_V6_BIND_ADDRESS ?? baseConfig.p2pConfig?.ipV6BindAddress, - ipV6BindTcpPort: process.env.P2P_IP_V6_BIND_TCP_PORT - ? parseInt(process.env.P2P_IP_V6_BIND_TCP_PORT, 10) - : baseConfig.p2pConfig?.ipV6BindTcpPort, - ipV6BindWsPort: process.env.P2P_IP_V6_BIND_WS_PORT - ? parseInt(process.env.P2P_IP_V6_BIND_WS_PORT, 10) - : baseConfig.p2pConfig?.ipV6BindWsPort, - - announceAddresses: parseJsonEnv( - process.env.P2P_ANNOUNCE_ADDRESSES, - baseConfig.p2pConfig?.announceAddresses ?? [] - ), - pubsubPeerDiscoveryInterval: process.env.P2P_PUBSUB_PEER_DISCOVERY_INTERVAL - ? parseInt(process.env.P2P_PUBSUB_PEER_DISCOVERY_INTERVAL, 10) - : baseConfig.p2pConfig?.pubsubPeerDiscoveryInterval, - - dhtMaxInboundStreams: process.env.P2P_DHT_MAX_INBOUND_STREAMS - ? parseInt(process.env.P2P_DHT_MAX_INBOUND_STREAMS, 10) - : baseConfig.p2pConfig?.dhtMaxInboundStreams, - dhtMaxOutboundStreams: process.env.P2P_DHT_MAX_OUTBOUND_STREAMS - ? parseInt(process.env.P2P_DHT_MAX_OUTBOUND_STREAMS, 10) - : baseConfig.p2pConfig?.dhtMaxOutboundStreams, - dhtFilter: dhtFilterOption ?? baseConfig.p2pConfig?.dhtFilter, - - mDNSInterval: process.env.P2P_MDNS_INTERVAL - ? parseInt(process.env.P2P_MDNS_INTERVAL, 10) - : baseConfig.p2pConfig?.mDNSInterval, - - connectionsMaxParallelDials: process.env.P2P_CONNECTIONS_MAX_PARALLEL_DIALS - ? parseInt(process.env.P2P_CONNECTIONS_MAX_PARALLEL_DIALS, 10) - : baseConfig.p2pConfig?.connectionsMaxParallelDials, - connectionsDialTimeout: process.env.P2P_CONNECTIONS_DIAL_TIMEOUT - ? parseInt(process.env.P2P_CONNECTIONS_DIAL_TIMEOUT, 10) - : baseConfig.p2pConfig?.connectionsDialTimeout, - - upnp: process.env.P2P_ENABLE_UPNP - ? process.env.P2P_ENABLE_UPNP === 'true' - : baseConfig.p2pConfig?.upnp, - autoNat: process.env.P2P_ENABLE_AUTONAT - ? process.env.P2P_ENABLE_AUTONAT === 'true' - : baseConfig.p2pConfig?.autoNat, - - enableCircuitRelayServer: process.env.P2P_ENABLE_CIRCUIT_RELAY_SERVER - ? process.env.P2P_ENABLE_CIRCUIT_RELAY_SERVER === 'true' - : baseConfig.p2pConfig?.enableCircuitRelayServer, - enableCircuitRelayClient: process.env.P2P_ENABLE_CIRCUIT_RELAY_CLIENT - ? process.env.P2P_ENABLE_CIRCUIT_RELAY_CLIENT === 'true' - : baseConfig.p2pConfig?.enableCircuitRelayClient, - - circuitRelays: process.env.P2P_CIRCUIT_RELAYS - ? parseInt(process.env.P2P_CIRCUIT_RELAYS, 10) - : baseConfig.p2pConfig?.circuitRelays, - announcePrivateIp: process.env.P2P_ANNOUNCE_PRIVATE - ? process.env.P2P_ANNOUNCE_PRIVATE === 'true' - : baseConfig.p2pConfig?.announcePrivateIp, - - filterAnnouncedAddresses: parseJsonEnv( - process.env.P2P_FILTER_ANNOUNCED_ADDRESSES, - baseConfig.p2pConfig?.filterAnnouncedAddresses ?? [] - ), - - minConnections: process.env.P2P_MIN_CONNECTIONS - ? parseInt(process.env.P2P_MIN_CONNECTIONS, 10) - : baseConfig.p2pConfig?.minConnections, - maxConnections: process.env.P2P_MAX_CONNECTIONS - ? parseInt(process.env.P2P_MAX_CONNECTIONS, 10) - : baseConfig.p2pConfig?.maxConnections, - - autoDialPeerRetryThreshold: process.env.P2P_AUTODIAL_PEER_RETRY_THRESHOLD - ? parseInt(process.env.P2P_AUTODIAL_PEER_RETRY_THRESHOLD, 10) - : baseConfig.p2pConfig?.autoDialPeerRetryThreshold, - autoDialConcurrency: process.env.P2P_AUTODIAL_CONCURRENCY - ? parseInt(process.env.P2P_AUTODIAL_CONCURRENCY, 10) - : baseConfig.p2pConfig?.autoDialConcurrency, - maxPeerAddrsToDial: process.env.P2P_MAX_PEER_ADDRS_TO_DIAL - ? parseInt(process.env.P2P_MAX_PEER_ADDRS_TO_DIAL, 10) - : baseConfig.p2pConfig?.maxPeerAddrsToDial, - autoDialInterval: process.env.P2P_AUTODIAL_INTERVAL - ? parseInt(process.env.P2P_AUTODIAL_INTERVAL, 10) - : baseConfig.p2pConfig?.autoDialInterval, - - enableNetworkStats: process.env.P2P_ENABLE_NETWORK_STATS - ? process.env.P2P_ENABLE_NETWORK_STATS === 'true' - : baseConfig.p2pConfig?.enableNetworkStats - }, - - ...(process.env.CONTROL_PANEL && { - hasControlPanel: process.env.CONTROL_PANEL !== 'false' - }), - ...(process.env.RPCS && { - supportedNetworks: parseJsonEnv( - process.env.RPCS, - baseConfig.supportedNetworks ?? {} - ) - }), - ...(process.env.C2D_NODE_URI && { c2dNodeUri: process.env.C2D_NODE_URI }), - ...(process.env.ACCOUNT_PURGATORY_URL && { - accountPurgatoryUrl: process.env.ACCOUNT_PURGATORY_URL - }), - ...(process.env.ASSET_PURGATORY_URL && { - assetPurgatoryUrl: process.env.ASSET_PURGATORY_URL - }), - ...(process.env.UNSAFE_URLS && { - unsafeURLs: parseJsonEnv(process.env.UNSAFE_URLS, baseConfig.unsafeURLs ?? []) - }), - ...(process.env.IS_BOOTSTRAP && { isBootstrap: process.env.IS_BOOTSTRAP === 'true' }), - ...(process.env.ESCROW_CLAIM_TIMEOUT && { - claimDurationTimeout: parseInt(process.env.ESCROW_CLAIM_TIMEOUT, 10) - }), - ...(process.env.VALIDATE_UNSIGNED_DDO && { - validateUnsignedDDO: process.env.VALIDATE_UNSIGNED_DDO === 'true' - }) - } - - const merged = { - ...baseConfig, - ...overrides - } - - return OceanNodeConfigSchema.parse(merged) as OceanNodeConfig -} - -// we can just use the lazy version above "getConfiguration()" and specify if we want to reload from .env variables -async function getEnvConfig(isStartup?: boolean): Promise { - const privateKey = process.env.PRIVATE_KEY - if (!privateKey || privateKey.length !== 66) { - // invalid private key - CONFIG_LOGGER.logMessageWithEmoji( - 'Invalid PRIVATE_KEY env variable..', - true, - GENERIC_EMOJIS.EMOJI_CROSS_MARK, - LOG_LEVELS_STR.LEVEL_ERROR - ) - return null - } - - const supportedNetworks = getSupportedChains() - const indexingNetworks = supportedNetworks - ? getIndexingNetworks(supportedNetworks) - : null - // Notes: we need to have this config on the class and use always that, otherwise we're processing - // all this info every time we call getConfig(), and also loggin too much - - const keys = await getPeerIdFromPrivateKey(privateKey) - // do not log this information everytime we call getConfig() - if (isStartup) { - CONFIG_LOGGER.logMessageWithEmoji( - 'Starting node with peerID: ' + keys.peerId, - true, - GENERIC_EMOJIS.EMOJI_CHECK_MARK - ) - } - - // http and/or p2p connections - const interfaces = getNodeInterfaces(isStartup) - let bootstrapTtl = getIntEnvValue(process.env.P2P_BOOTSTRAP_TTL, 120000) - if (bootstrapTtl === 0) bootstrapTtl = Infinity - let dhtFilterOption - switch (getIntEnvValue(process.env.P2P_DHT_FILTER, 0)) { - case 1: - dhtFilterOption = dhtFilterMethod.filterPrivate - break - case 2: - dhtFilterOption = dhtFilterMethod.filterPublic - break - default: - dhtFilterOption = dhtFilterMethod.filterNone - } - - const config: OceanNodeConfig = { - authorizedDecrypters: getAuthorizedDecrypters(isStartup), - authorizedDecryptersList: getAuthorizedDecryptersList(isStartup), - allowedValidators: getAllowedValidators(isStartup), - allowedValidatorsList: getAllowedValidatorsList(isStartup), - authorizedPublishers: getAuthorizedPublishers(isStartup), - authorizedPublishersList: getAuthorizedPublishersList(isStartup), - keys, - // Only enable indexer if we have a DB_URL and supportedNetworks - hasIndexer: !!(!!getEnvValue(process.env.DB_URL, '') && !!indexingNetworks), - hasHttp: interfaces.includes('HTTP'), - hasP2P: interfaces.includes('P2P'), - p2pConfig: { - bootstrapNodes: readListFromEnvVariable( - ENVIRONMENT_VARIABLES.P2P_BOOTSTRAP_NODES, - isStartup, - defaultBootstrapAddresses - ), - bootstrapTimeout: getIntEnvValue(process.env.P2P_BOOTSTRAP_TIMEOUT, 20000), - bootstrapTagName: getEnvValue(process.env.P2P_BOOTSTRAP_TAGNAME, 'bootstrap'), - bootstrapTagValue: getIntEnvValue(process.env.P2P_BOOTSTRAP_TAGVALUE, 50), - bootstrapTTL: bootstrapTtl, - enableIPV4: getBoolEnvValue('P2P_ENABLE_IPV4', true), - enableIPV6: getBoolEnvValue('P2P_ENABLE_IPV6', true), - ipV4BindAddress: getEnvValue(process.env.P2P_ipV4BindAddress, '0.0.0.0'), - ipV4BindTcpPort: getIntEnvValue(process.env.P2P_ipV4BindTcpPort, 0), - ipV4BindWsPort: getIntEnvValue(process.env.P2P_ipV4BindWsPort, 0), - ipV6BindAddress: getEnvValue(process.env.P2P_ipV6BindAddress, '::1'), - ipV6BindTcpPort: getIntEnvValue(process.env.P2P_ipV6BindTcpPort, 0), - ipV6BindWsPort: getIntEnvValue(process.env.P2P_ipV6BindWsPort, 0), - announceAddresses: readListFromEnvVariable( - ENVIRONMENT_VARIABLES.P2P_ANNOUNCE_ADDRESSES, - isStartup - ), - pubsubPeerDiscoveryInterval: getIntEnvValue( - process.env.P2P_pubsubPeerDiscoveryInterval, - 10000 // every 10 seconds - ), - dhtMaxInboundStreams: getIntEnvValue(process.env.P2P_dhtMaxInboundStreams, 500), - dhtMaxOutboundStreams: getIntEnvValue(process.env.P2P_dhtMaxOutboundStreams, 500), - dhtFilter: dhtFilterOption, - mDNSInterval: getIntEnvValue(process.env.P2P_mDNSInterval, 20e3), // 20 seconds - connectionsMaxParallelDials: getIntEnvValue( - process.env.P2P_connectionsMaxParallelDials, - 15 - ), - connectionsDialTimeout: getIntEnvValue( - process.env.P2P_connectionsDialTimeout, - 30e3 - ), // 10 seconds, - upnp: getBoolEnvValue('P2P_ENABLE_UPNP', true), - autoNat: getBoolEnvValue('P2P_ENABLE_AUTONAT', true), - enableCircuitRelayServer: getBoolEnvValue('P2P_ENABLE_CIRCUIT_RELAY_SERVER', false), - enableCircuitRelayClient: getBoolEnvValue('P2P_ENABLE_CIRCUIT_RELAY_CLIENT', false), - circuitRelays: getIntEnvValue(process.env.P2P_CIRCUIT_RELAYS, 0), - announcePrivateIp: getBoolEnvValue('P2P_ANNOUNCE_PRIVATE', false), - filterAnnouncedAddresses: readListFromEnvVariable( - ENVIRONMENT_VARIABLES.P2P_FILTER_ANNOUNCED_ADDRESSES, - isStartup, - [ - '127.0.0.0/8', - '10.0.0.0/8', - '172.16.0.0/12', - '192.168.0.0/16', - '100.64.0.0/10', - '169.254.0.0/16', - '192.0.0.0/24', - '192.0.2.0/24', - '198.51.100.0/24', - '203.0.113.0/24', - '224.0.0.0/4', - '240.0.0.0/4' - ] // list of all non-routable IP addresses, not availabe from public internet, private networks or specific reserved use - ), - minConnections: getIntEnvValue(process.env.P2P_MIN_CONNECTIONS, 1), - maxConnections: getIntEnvValue(process.env.P2P_MAX_CONNECTIONS, 300), - autoDialPeerRetryThreshold: getIntEnvValue( - process.env.P2P_AUTODIALPEERRETRYTHRESHOLD, - 1000 * 120 - ), - autoDialConcurrency: getIntEnvValue(process.env.P2P_AUTODIALCONCURRENCY, 5), - maxPeerAddrsToDial: getIntEnvValue(process.env.P2P_MAXPEERADDRSTODIAL, 5), - autoDialInterval: getIntEnvValue(process.env.P2P_AUTODIALINTERVAL, 5000), - enableNetworkStats: getBoolEnvValue('P2P_ENABLE_NETWORK_STATS', false) - }, - // keep this for backwards compatibility for now - hasControlPanel: - process.env.CONTROL_PANEL !== 'false' || process.env.DASHBOARD !== 'false', - httpPort: getIntEnvValue(process.env.HTTP_API_PORT, 8000), - dbConfig: { - url: getEnvValue(process.env.DB_URL, ''), - username: getEnvValue(process.env.DB_USERNAME, ''), - password: getEnvValue(process.env.DB_PASSWORD, ''), - dbType: getEnvValue(process.env.DB_TYPE, null) - }, - supportedNetworks, - indexingNetworks, - feeStrategy: getOceanNodeFees(supportedNetworks, isStartup), - c2dClusters: getC2DClusterEnvironment(isStartup), - c2dNodeUri: getEnvValue(process.env.C2D_NODE_URI, ''), - accountPurgatoryUrl: getEnvValue(process.env.ACCOUNT_PURGATORY_URL, ''), - assetPurgatoryUrl: getEnvValue(process.env.ASSET_PURGATORY_URL, ''), - allowedAdmins: getAllowedAdmins(isStartup), - allowedAdminsList: getAllowedAdminsList(isStartup), - rateLimit: getRateLimit(isStartup), - maxConnections: getConnectionsLimit(isStartup), - denyList: getDenyList(isStartup), - unsafeURLs: readListFromEnvVariable( - ENVIRONMENT_VARIABLES.UNSAFE_URLS, - isStartup, - knownUnsafeURLs - ), - isBootstrap: getBoolEnvValue('IS_BOOTSTRAP', false), - claimDurationTimeout: getIntEnvValue(process.env.ESCROW_CLAIM_TIMEOUT, 600), - validateUnsignedDDO: getBoolEnvValue('VALIDATE_UNSIGNED_DDO', true), - jwtSecret: getEnvValue(process.env.JWT_SECRET, 'ocean-node-secret') - } - - if (!previousConfiguration) { - previousConfiguration = config - } else if (configChanged(previousConfiguration, config)) { - CONFIG_LOGGER.warn( - 'Detected Ocean Node Configuration change... This might have unintended effects' - ) - } - return config -} - -function configChanged(previous: OceanNodeConfig, current: OceanNodeConfig): boolean { - return JSON.stringify(previous) !== JSON.stringify(current) -} - -// useful for debugging purposes -export async function printCurrentConfig() { - const conf = await getConfiguration(true) - conf.keys.privateKey = '[*** HIDDEN CONTENT ***]' // hide private key - console.log(JSON.stringify(conf, null, 4)) -} - -// P2P routes related -export const hasP2PInterface = (await (await getConfiguration())?.hasP2P) || false - -// is there a policy server defined? export function isPolicyServerConfigured(): boolean { return isDefined(process.env.POLICY_SERVER_URL) } + +export const hasP2PInterface = (await (await getConfiguration())?.hasP2P) || false diff --git a/src/utils/config/builder.ts b/src/utils/config/builder.ts new file mode 100644 index 000000000..036542960 --- /dev/null +++ b/src/utils/config/builder.ts @@ -0,0 +1,301 @@ +import type { OceanNodeConfig, OceanNodeKeys } from '../../@types/OceanNode.js' +import type { C2DClusterInfo, C2DDockerConfig } from '../../@types/C2D/C2D.js' +import type { RPCS } from '../../@types/blockchain.js' +import type { FeeTokens } from '../../@types/Fees.js' +import { C2DClusterType } from '../../@types/C2D/C2D.js' +import { keys } from '@libp2p/crypto' +import { createFromPrivKey } from '@libp2p/peer-id-factory' +import { Wallet } from 'ethers' +import fs from 'fs' +import os from 'os' +import path from 'path' +import { hexStringToByteArray, computeCodebaseHash } from '../index.js' +import { + getOceanArtifactsAdresses, + OCEAN_ARTIFACTS_ADDRESSES_PER_CHAIN +} from '../address.js' +import { create256Hash } from '../crypt.js' +import { CONFIG_LOGGER } from '../logging/common.js' +import { LOG_LEVELS_STR, GENERIC_EMOJIS } from '../logging/Logger.js' +import { OceanNodeConfigSchema } from './schemas.js' +import { ENV_TO_CONFIG_MAPPING } from './constants.js' +import { fileURLToPath } from 'url' + +let previousConfiguration: OceanNodeConfig = null + +function mapEnvToConfig( + env: NodeJS.ProcessEnv, + mapping: Record +): Record { + const result: Record = {} + for (const [envKey, configKey] of Object.entries(mapping)) { + const value = env[envKey] + if (value !== undefined && value !== 'undefined') { + result[configKey] = value + } + } + return result +} + +function preprocessConfigData(data: any): void { + if (data.INTERFACES) { + try { + const interfaces = JSON.parse(data.INTERFACES).map((i: string) => i.toUpperCase()) + if (interfaces.length > 0) { + data.hasHttp = interfaces.includes('HTTP') + data.hasP2P = interfaces.includes('P2P') + } + } catch (error) { + CONFIG_LOGGER.warn(`Failed to parse INTERFACES: ${error.message}`) + } + delete data.INTERFACES + } + + // Transform DB_* env vars to dbConfig + if (data.DB_URL) { + data.dbConfig = { + url: data.DB_URL, + username: data.DB_USERNAME, + password: data.DB_PASSWORD, + dbType: data.DB_TYPE || 'elasticsearch' + } + delete data.DB_URL + delete data.DB_USERNAME + delete data.DB_PASSWORD + delete data.DB_TYPE + } + + // Transform FEE_* env vars to feeStrategy + if (data.FEE_AMOUNT && data.FEE_TOKENS) { + try { + const feeAmount = JSON.parse(data.FEE_AMOUNT) + const tokens = JSON.parse(data.FEE_TOKENS) + const feeTokens = Object.keys(tokens).map((key) => ({ + chain: key, + token: tokens[key] + })) + data.feeStrategy = { feeAmount, feeTokens } + } catch (error) { + CONFIG_LOGGER.error(`Failed to parse fee strategy: ${error.message}`) + } + delete data.FEE_AMOUNT + delete data.FEE_TOKENS + } +} + +export async function getPeerIdFromPrivateKey( + privateKey: string +): Promise { + const key = new keys.supportedKeys.secp256k1.Secp256k1PrivateKey( + hexStringToByteArray(privateKey.slice(2)) + ) + + return { + peerId: await createFromPrivKey(key), + publicKey: key.public.bytes, + privateKey: (key as any)._key, + ethAddress: new Wallet(privateKey.substring(2)).address + } +} + +export function getDefaultFeeTokens(supportedNetworks?: RPCS): FeeTokens[] { + const nodeFeesTokens: FeeTokens[] = [] + let addressesData: any = getOceanArtifactsAdresses() + if (!addressesData) { + addressesData = OCEAN_ARTIFACTS_ADDRESSES_PER_CHAIN + } + + const hasSupportedNetworks = + supportedNetworks && Object.keys(supportedNetworks).length > 0 + + Object.keys(addressesData).forEach((chain: any) => { + const chainName = chain as string + const { chainId, Ocean } = addressesData[chainName] + + if (hasSupportedNetworks) { + const keyId: string = chainId as string + const chainInfo: any = supportedNetworks[keyId] + if (chainInfo) { + nodeFeesTokens.push({ + chain: keyId, + token: Ocean + }) + } + } else { + nodeFeesTokens.push({ + chain: chainId as string, + token: Ocean + }) + } + }) + return nodeFeesTokens +} + +export function buildC2DClusters( + dockerComputeEnvironments: C2DDockerConfig[] +): C2DClusterInfo[] { + const clusters: C2DClusterInfo[] = [] + + if (process.env.OPERATOR_SERVICE_URL) { + try { + const clustersURLS: string[] = JSON.parse(process.env.OPERATOR_SERVICE_URL) + for (const theURL of clustersURLS) { + clusters.push({ + connection: theURL, + hash: create256Hash(theURL), + type: C2DClusterType.OPF_K8 + }) + } + } catch (error) { + CONFIG_LOGGER.error(`Failed to parse OPERATOR_SERVICE_URL: ${error.message}`) + } + } + + if (dockerComputeEnvironments) { + for (const dockerC2d of dockerComputeEnvironments) { + if (dockerC2d.socketPath || dockerC2d.host) { + const hash = create256Hash(JSON.stringify(dockerC2d)) + clusters.push({ + connection: dockerC2d, + hash, + type: C2DClusterType.DOCKER, + tempFolder: './c2d_storage/' + hash + }) + } + } + } + + return clusters +} + +export function getConfigFilePath(configPath?: string): string { + if (!configPath) { + configPath = process.env.CONFIG_PATH || path.join(process.cwd(), 'config.json') + } + return configPath +} + +export function loadConfigFromFile(configPath?: string): OceanNodeConfig { + configPath = getConfigFilePath(configPath) + + if (configPath.startsWith('$HOME')) { + const home = process.env.HOME || os.homedir() + if (!home) { + throw new Error( + 'Config path contains $HOME but HOME is not set in the environment.' + ) + } + configPath = path.join(home, configPath.slice('$HOME'.length)) + } + + if ( + configPath !== path.join(process.cwd(), 'config.json') && + !path.isAbsolute(configPath) + ) { + throw new Error(`Config path must be absolute. Got: ${configPath}`) + } + + if (!fs.existsSync(configPath)) { + throw new Error(`Config file not found at path: ${configPath}`) + } + + const rawData = fs.readFileSync(configPath, 'utf-8') + let config: OceanNodeConfig + + try { + config = JSON.parse(rawData) + } catch (err) { + throw new Error(`Invalid JSON in config file: ${configPath}. Error: ${err.message}`) + } + + return config +} + +export async function buildMergedConfig(): Promise { + const baseConfig = loadConfigFromFile() + const privateKey = process.env.PRIVATE_KEY + if (!privateKey || privateKey.length !== 66) { + CONFIG_LOGGER.logMessageWithEmoji( + 'Invalid or missing PRIVATE_KEY env variable. Must be 66 characters (0x + 64 hex chars).', + true, + GENERIC_EMOJIS.EMOJI_CROSS_MARK, + LOG_LEVELS_STR.LEVEL_ERROR + ) + throw new Error('Invalid PRIVATE_KEY') + } + + const keys = await getPeerIdFromPrivateKey(privateKey) + + const { env } = process + const envOverrides: Record = { keys } + + Object.assign(envOverrides, mapEnvToConfig(env, ENV_TO_CONFIG_MAPPING)) + + const merged = { ...baseConfig, ...envOverrides } + + preprocessConfigData(merged) + + const parsed = OceanNodeConfigSchema.safeParse(merged) + + if (!parsed.success) { + console.error('\n❌ Invalid Ocean Node configuration:') + for (const issue of parsed.error.issues) { + console.error(` • ${issue.path.join('.')}: ${issue.message}`) + } + throw new Error('Configuration validation failed') + } + + const config = parsed.data as any + + // Post-processing transformations + if (!config.indexingNetworks) { + config.indexingNetworks = config.supportedNetworks + } + + if (Array.isArray(config.indexingNetworks) && config.supportedNetworks) { + const filteredNetworks: RPCS = {} + for (const chainId of config.indexingNetworks) { + const chainIdStr = String(chainId) + if (config.supportedNetworks[chainIdStr]) { + filteredNetworks[chainIdStr] = config.supportedNetworks[chainIdStr] + } + } + config.indexingNetworks = filteredNetworks + } + + if (!config.feeStrategy) { + config.feeStrategy = { + feeAmount: { amount: 0, unit: 'MB' }, + feeTokens: getDefaultFeeTokens(config.supportedNetworks as RPCS) + } + } + + config.c2dClusters = buildC2DClusters( + config.dockerComputeEnvironments as C2DDockerConfig[] + ) + + return config as OceanNodeConfig +} + +export async function getConfiguration( + forceReload: boolean = false, + isStartup: boolean = false +): Promise { + if (!previousConfiguration || forceReload) { + previousConfiguration = await buildMergedConfig() + } + + if (!previousConfiguration.codeHash) { + const __filename = fileURLToPath(import.meta.url) + const __dirname = path.dirname(__filename.replace('utils/config', 'utils')) + previousConfiguration.codeHash = await computeCodebaseHash(__dirname) + } + + return previousConfiguration +} + +export async function printCurrentConfig() { + const conf = await getConfiguration(true) + conf.keys.privateKey = '[*** HIDDEN CONTENT ***]' + console.log(JSON.stringify(conf, null, 4)) +} diff --git a/src/utils/config/constants.ts b/src/utils/config/constants.ts new file mode 100644 index 000000000..beb5d72a9 --- /dev/null +++ b/src/utils/config/constants.ts @@ -0,0 +1,127 @@ +export const ENV_TO_CONFIG_MAPPING = { + INTERFACES: 'INTERFACES', + DB_URL: 'DB_URL', + DB_USERNAME: 'DB_USERNAME', + DB_PASSWORD: 'DB_PASSWORD', + DB_TYPE: 'DB_TYPE', + FEE_AMOUNT: 'FEE_AMOUNT', + FEE_TOKENS: 'FEE_TOKENS', + HTTP_API_PORT: 'httpPort', + CONTROL_PANEL: 'hasControlPanel', + RPCS: 'supportedNetworks', + IPFS_GATEWAY: 'ipfsGateway', + ARWEAVE_GATEWAY: 'arweaveGateway', + ACCOUNT_PURGATORY_URL: 'accountPurgatoryUrl', + ASSET_PURGATORY_URL: 'assetPurgatoryUrl', + UNSAFE_URLS: 'unsafeURLs', + IS_BOOTSTRAP: 'isBootstrap', + ESCROW_CLAIM_TIMEOUT: 'claimDurationTimeout', + VALIDATE_UNSIGNED_DDO: 'validateUnsignedDDO', + JWT_SECRET: 'jwtSecret', + MAX_REQ_PER_MINUTE: 'rateLimit', + MAX_CONNECTIONS_PER_MINUTE: 'maxConnections', + RATE_DENY_LIST: 'denyList', + AUTHORIZED_DECRYPTERS: 'authorizedDecrypters', + AUTHORIZED_DECRYPTERS_LIST: 'authorizedDecryptersList', + ALLOWED_VALIDATORS: 'allowedValidators', + ALLOWED_VALIDATORS_LIST: 'allowedValidatorsList', + AUTHORIZED_PUBLISHERS: 'authorizedPublishers', + AUTHORIZED_PUBLISHERS_LIST: 'authorizedPublishersList', + ALLOWED_ADMINS: 'allowedAdmins', + ALLOWED_ADMINS_LIST: 'allowedAdminsList', + DOCKER_COMPUTE_ENVIRONMENTS: 'dockerComputeEnvironments', + P2P_BOOTSTRAP_NODES: 'bootstrapNodes', + P2P_BOOTSTRAP_TIMEOUT: 'bootstrapTimeout', + P2P_BOOTSTRAP_TAGNAME: 'bootstrapTagName', + P2P_BOOTSTRAP_TAGVALUE: 'bootstrapTagValue', + P2P_BOOTSTRAP_TTL: 'bootstrapTTL', + P2P_ENABLE_IPV4: 'enableIPV4', + P2P_ENABLE_IPV6: 'enableIPV6', + P2P_IP_V4_BIND_ADDRESS: 'ipV4BindAddress', + P2P_IP_V4_BIND_TCP_PORT: 'ipV4BindTcpPort', + P2P_IP_V4_BIND_WS_PORT: 'ipV4BindWsPort', + P2P_IP_V6_BIND_ADDRESS: 'ipV6BindAddress', + P2P_IP_V6_BIND_TCP_PORT: 'ipV6BindTcpPort', + P2P_IP_V6_BIND_WS_PORT: 'ipV6BindWsPort', + P2P_ANNOUNCE_ADDRESSES: 'announceAddresses', + P2P_PUBSUB_PEER_DISCOVERY_INTERVAL: 'pubsubPeerDiscoveryInterval', + P2P_DHT_MAX_INBOUND_STREAMS: 'dhtMaxInboundStreams', + P2P_DHT_MAX_OUTBOUND_STREAMS: 'dhtMaxOutboundStreams', + P2P_DHT_FILTER: 'dhtFilter', + P2P_MDNS_INTERVAL: 'mDNSInterval', + P2P_CONNECTIONS_MAX_PARALLEL_DIALS: 'connectionsMaxParallelDials', + P2P_CONNECTIONS_DIAL_TIMEOUT: 'connectionsDialTimeout', + P2P_ENABLE_UPNP: 'upnp', + P2P_ENABLE_AUTONAT: 'autoNat', + P2P_ENABLE_CIRCUIT_RELAY_SERVER: 'enableCircuitRelayServer', + P2P_ENABLE_CIRCUIT_RELAY_CLIENT: 'enableCircuitRelayClient', + P2P_CIRCUIT_RELAYS: 'circuitRelays', + P2P_ANNOUNCE_PRIVATE: 'announcePrivateIp', + P2P_FILTER_ANNOUNCED_ADDRESSES: 'filterAnnouncedAddresses', + P2P_MIN_CONNECTIONS: 'minConnections', + P2P_MAX_CONNECTIONS: 'maxConnections', + P2P_AUTODIAL_PEER_RETRY_THRESHOLD: 'autoDialPeerRetryThreshold', + P2P_AUTODIAL_CONCURRENCY: 'autoDialConcurrency', + P2P_MAX_PEER_ADDRS_TO_DIAL: 'maxPeerAddrsToDial', + P2P_AUTODIAL_INTERVAL: 'autoDialInterval', + P2P_ENABLE_NETWORK_STATS: 'enableNetworkStats' +} as const + +// Configuration defaults +export const DEFAULT_RATE_LIMIT_PER_MINUTE = 30 +export const DEFAULT_MAX_CONNECTIONS_PER_MINUTE = 60 * 2 // 120 requests per minute + +export const DEFAULT_BOOTSTRAP_ADDRESSES = [ + // OPF nodes + // node1 + '/dns4/node1.oceanprotocol.com/tcp/9000/p2p/16Uiu2HAmLhRDqfufZiQnxvQs2XHhd6hwkLSPfjAQg1gH8wgRixiP', + '/dns4/node1.oceanprotocol.com/tcp/9001/ws/p2p/16Uiu2HAmLhRDqfufZiQnxvQs2XHhd6hwkLSPfjAQg1gH8wgRixiP', + '/dns6/node1.oceanprotocol.com/tcp/9002/p2p/16Uiu2HAmLhRDqfufZiQnxvQs2XHhd6hwkLSPfjAQg1gH8wgRixiP', + '/dns6/node1.oceanprotocol.com/tcp/9003/ws/p2p/16Uiu2HAmLhRDqfufZiQnxvQs2XHhd6hwkLSPfjAQg1gH8wgRixiP', + // node 2 + '/dns4/node2.oceanprotocol.com/tcp/9000/p2p/16Uiu2HAmHwzeVw7RpGopjZe6qNBJbzDDBdqtrSk7Gcx1emYsfgL4', + '/dns4/node2.oceanprotocol.com/tcp/9001/ws/p2p/16Uiu2HAmHwzeVw7RpGopjZe6qNBJbzDDBdqtrSk7Gcx1emYsfgL4', + '/dns6/node2.oceanprotocol.com/tcp/9002/p2p/16Uiu2HAmHwzeVw7RpGopjZe6qNBJbzDDBdqtrSk7Gcx1emYsfgL4', + '/dns6/node2.oceanprotocol.com/tcp/9003/ws/p2p/16Uiu2HAmHwzeVw7RpGopjZe6qNBJbzDDBdqtrSk7Gcx1emYsfgL4', + // node 3 + '/dns4/node3.oceanprotocol.com/tcp/9000/p2p/16Uiu2HAmBKSeEP3v4tYEPsZsZv9VELinyMCsrVTJW9BvQeFXx28U', + '/dns4/node3.oceanprotocol.com/tcp/9001/ws/p2p/16Uiu2HAmBKSeEP3v4tYEPsZsZv9VELinyMCsrVTJW9BvQeFXx28U', + '/dns6/node3.oceanprotocol.com/tcp/9002/p2p/16Uiu2HAmBKSeEP3v4tYEPsZsZv9VELinyMCsrVTJW9BvQeFXx28U', + '/dns6/node3.oceanprotocol.com/tcp/9003/ws/p2p/16Uiu2HAmBKSeEP3v4tYEPsZsZv9VELinyMCsrVTJW9BvQeFXx28U', + // node 4 + '/dns4/node4.oceanprotocol.com/tcp/9000/p2p/16Uiu2HAmSTVTArioKm2wVcyeASHYEsnx2ZNq467Z4GMDU4ErEPom', + '/dns4/node4.oceanprotocol.com/tcp/9001/ws/p2p/16Uiu2HAmSTVTArioKm2wVcyeASHYEsnx2ZNq467Z4GMDU4ErEPom', + '/dns6/node4.oceanprotocol.com/tcp/9002/p2p/16Uiu2HAmSTVTArioKm2wVcyeASHYEsnx2ZNq467Z4GMDU4ErEPom', + '/dns6/node4.oceanprotocol.com/tcp/9003/ws/p2p/16Uiu2HAmSTVTArioKm2wVcyeASHYEsnx2ZNq467Z4GMDU4ErEPom' +] as const + +export const DEFAULT_UNSAFE_URLS = [ + // AWS and GCP + '^.*(169.254.169.254).*', + // GCP + '^.*(metadata.google.internal).*', + '^.*(http://metadata).*', + // Azure + '^.*(http://169.254.169.254).*', + // Oracle Cloud + '^.*(http://192.0.0.192).*', + // Alibaba Cloud + '^.*(http://100.100.100.200).*', + // k8s ETCD + '^.*(127.0.0.1).*' +] as const + +export const DEFAULT_FILTER_ANNOUNCED_ADDRESSES = [ + '127.0.0.0/8', + '10.0.0.0/8', + '172.16.0.0/12', + '192.168.0.0/16', + '100.64.0.0/10', + '169.254.0.0/16', + '192.0.0.0/24', + '192.0.2.0/24', + '198.51.100.0/24', + '203.0.113.0/24', + '224.0.0.0/4', + '240.0.0.0/4' +] as const diff --git a/src/utils/config/index.ts b/src/utils/config/index.ts new file mode 100644 index 000000000..276c92be9 --- /dev/null +++ b/src/utils/config/index.ts @@ -0,0 +1,6 @@ +export * from './schemas.js' +export * from './transforms.js' +export * from './constants.js' +export * from './builder.js' + +export { getConfiguration, getConfigFilePath, printCurrentConfig } from './builder.js' diff --git a/src/utils/config/schemas.ts b/src/utils/config/schemas.ts new file mode 100644 index 000000000..0fa989627 --- /dev/null +++ b/src/utils/config/schemas.ts @@ -0,0 +1,332 @@ +import { z } from 'zod' +import { getAddress } from 'ethers' +import { dhtFilterMethod } from '../../@types/OceanNode.js' +import { C2DClusterType } from '../../@types/C2D/C2D.js' +import { CONFIG_LOGGER } from '../logging/common.js' +import { booleanFromString, jsonFromString } from './transforms.js' +import { + DEFAULT_BOOTSTRAP_ADDRESSES, + DEFAULT_RATE_LIMIT_PER_MINUTE, + DEFAULT_UNSAFE_URLS, + DEFAULT_FILTER_ANNOUNCED_ADDRESSES +} from './constants.js' + +function isValidUrl(urlString: string): boolean { + try { + // eslint-disable-next-line no-new + new URL(urlString) + return true + } catch { + return false + } +} + +export const SupportedNetworkSchema = z.object({ + chainId: z.number(), + rpc: z.string(), + network: z.string().optional(), + chunkSize: z.number().optional(), + startBlock: z.number().optional(), + fallbackRPCs: z.array(z.string()).optional() +}) + +export const RPCSSchema = z.record(z.string(), SupportedNetworkSchema) + +export const AccessListContractSchema = z + .union([ + z.record(z.string(), z.array(z.string())), + z.array(z.any()).transform(() => null), + z.null() + ]) + .nullable() + +export const OceanNodeKeysSchema = z.object({ + peerId: z.any().optional(), + publicKey: z.any().optional(), + privateKey: z.any().optional(), + ethAddress: z.string().optional() +}) + +export const DenyListSchema = z.object({ + peers: z.array(z.string()).default([]), + ips: z.array(z.string()).default([]) +}) + +export const FeeAmountSchema = z.object({ + amount: z.number(), + unit: z.string() +}) + +export const FeeTokensSchema = z.object({ + chain: z.string(), + token: z.string() +}) + +export const FeeStrategySchema = z.object({ + feeTokens: z.array(FeeTokensSchema).optional(), + feeAmount: FeeAmountSchema.optional() +}) + +export const OceanNodeDBConfigSchema = z.object({ + url: z.string().nullable(), + username: z.string().optional(), + password: z.string().optional(), + dbType: z.string().nullable() +}) + +export const ComputeResourceSchema = z.object({ + id: z.string(), + total: z.number().optional(), + description: z.string().optional(), + type: z.string().optional(), + kind: z.string().optional(), + min: z.number().optional(), + max: z.number().optional(), + inUse: z.number().optional(), + init: z.any().optional() +}) + +export const ComputeResourcesPricingInfoSchema = z.object({ + id: z.string(), + price: z.number() +}) + +export const ComputeEnvFeesSchema = z.object({ + feeToken: z.string().optional(), + prices: z.array(ComputeResourcesPricingInfoSchema).optional() +}) + +export const ComputeEnvironmentFreeOptionsSchema = z.object({ + maxJobDuration: z.number().int().optional().default(3600), + maxJobs: z.number().int().optional().default(3), + resources: z.array(ComputeResourceSchema).optional(), + access: z + .object({ + addresses: z.array(z.string()), + accessLists: z.array(z.string()) + }) + .optional() +}) + +export const C2DDockerConfigSchema = z.array( + z + .object({ + socketPath: z.string().optional(), + protocol: z.string().optional(), + host: z.string().optional(), + port: z.number().optional(), + caPath: z.string().optional(), + certPath: z.string().optional(), + keyPath: z.string().optional(), + resources: z.array(ComputeResourceSchema).optional(), + storageExpiry: z.number().int().optional().default(604800), + maxJobDuration: z.number().int().optional().default(3600), + access: z + .object({ + addresses: z.array(z.string()), + accessLists: z.array(z.string()) + }) + .optional(), + fees: z.record(z.string(), z.array(ComputeEnvFeesSchema)), + free: ComputeEnvironmentFreeOptionsSchema.optional() + }) + .refine((data) => data.fees !== undefined && Object.keys(data.fees).length > 0, { + message: 'There is no fees configuration!' + }) + .refine((data) => data.storageExpiry >= data.maxJobDuration, { + message: '"storageExpiry" should be greater than "maxJobDuration"' + }) + .refine( + (data) => { + if (!data.resources) return false + return data.resources.some((r) => r.id === 'disk' && r.total) + }, + { message: 'There is no "disk" resource configured. This is mandatory' } + ) + .transform((data) => { + if (data.resources) { + for (const resource of data.resources) { + if (resource.id === 'disk' && resource.total) { + resource.type = 'disk' + } + } + } + return data + }) +) + +export const C2DClusterInfoSchema = z.object({ + type: z.nativeEnum(C2DClusterType), + hash: z.string(), + connection: z.any().optional(), + tempFolder: z.string().optional() +}) + +export const OceanNodeP2PConfigSchema = z.object({ + bootstrapNodes: jsonFromString(z.array(z.string())).default([ + ...DEFAULT_BOOTSTRAP_ADDRESSES + ]), + bootstrapTimeout: z.number().optional().default(2000), + bootstrapTagName: z.string().optional().default('bootstrap'), + bootstrapTagValue: z.number().optional().default(50), + bootstrapTTL: z.number().optional(), + enableIPV4: booleanFromString.optional().default(true), + enableIPV6: booleanFromString.optional().default(true), + ipV4BindAddress: z.string().nullable().optional().default('0.0.0.0'), + ipV4BindTcpPort: z.number().nullable().optional().default(0), + ipV4BindWsPort: z.number().nullable().optional().default(0), + ipV6BindAddress: z.string().nullable().optional().default('::1'), + ipV6BindTcpPort: z.number().nullable().optional().default(0), + ipV6BindWsPort: z.number().nullable().optional().default(0), + pubsubPeerDiscoveryInterval: z.number().optional().default(1000), + dhtMaxInboundStreams: z.number().optional().default(500), + dhtMaxOutboundStreams: z.number().optional().default(500), + dhtFilter: z + .union([z.nativeEnum(dhtFilterMethod), z.string(), z.number(), z.null()]) + .transform((v) => { + if (v === null) { + return dhtFilterMethod.filterNone + } + if (typeof v === 'number' || typeof v === 'string') { + const filterValue = typeof v === 'string' ? parseInt(v, 10) : v + switch (filterValue) { + case 1: + return dhtFilterMethod.filterPrivate + case 2: + return dhtFilterMethod.filterPublic + default: + return dhtFilterMethod.filterNone + } + } + return v + }) + .optional() + .default(dhtFilterMethod.filterNone), + mDNSInterval: z.number().optional().default(20e3), + connectionsMaxParallelDials: z.number().optional().default(15), + connectionsDialTimeout: z.number().optional().default(30e3), + upnp: booleanFromString.optional().default(true), + autoNat: booleanFromString.optional().default(true), + enableCircuitRelayServer: booleanFromString.optional().default(false), + enableCircuitRelayClient: booleanFromString.optional().default(false), + circuitRelays: z.number().optional().default(0), + announcePrivateIp: booleanFromString.optional().default(false), + announceAddresses: jsonFromString(z.array(z.string())).optional().default([]), + filterAnnouncedAddresses: jsonFromString(z.array(z.string())) + .optional() + .default([...DEFAULT_FILTER_ANNOUNCED_ADDRESSES]), + minConnections: z.number().optional().default(1), + maxConnections: z.number().optional().default(300), + autoDialPeerRetryThreshold: z.number().optional().default(120000), + autoDialConcurrency: z.number().optional().default(5), + maxPeerAddrsToDial: z.number().optional().default(5), + autoDialInterval: z.number().optional().default(5000), + enableNetworkStats: booleanFromString.optional().default(false) +}) + +const addressArrayFromString = jsonFromString(z.array(z.string())).transform( + (addresses) => { + if (!Array.isArray(addresses)) return [] + try { + return addresses.map((addr) => getAddress(addr)) + } catch (error) { + CONFIG_LOGGER.error(`Invalid address in list: ${error.message}`) + return [] + } + } +) + +export const OceanNodeConfigSchema = z + .object({ + dockerComputeEnvironments: jsonFromString(C2DDockerConfigSchema) + .optional() + .default([]), + + authorizedDecrypters: addressArrayFromString.optional().default([]), + authorizedDecryptersList: jsonFromString(AccessListContractSchema).optional(), + + allowedValidators: addressArrayFromString.optional().default([]), + allowedValidatorsList: jsonFromString(AccessListContractSchema).optional(), + + authorizedPublishers: addressArrayFromString.optional().default([]), + authorizedPublishersList: jsonFromString(AccessListContractSchema).optional(), + + keys: OceanNodeKeysSchema.optional(), + + INTERFACES: z.string().optional(), + hasP2P: booleanFromString.optional().default(true), + hasHttp: booleanFromString.optional().default(true), + + p2pConfig: OceanNodeP2PConfigSchema.nullable().optional(), + hasIndexer: booleanFromString.default(true), + hasControlPanel: booleanFromString.default(true), + + DB_URL: z.string().optional(), + DB_USERNAME: z.string().optional(), + DB_PASSWORD: z.string().optional(), + DB_TYPE: z.string().optional(), + dbConfig: OceanNodeDBConfigSchema.optional(), + + FEE_AMOUNT: z.string().optional(), + FEE_TOKENS: z.string().optional(), + feeStrategy: FeeStrategySchema.optional(), + + httpPort: z.coerce.number().optional().default(3000), + rateLimit: z.coerce.number().optional().default(DEFAULT_RATE_LIMIT_PER_MINUTE), + + ipfsGateway: z.string().nullable().optional(), + arweaveGateway: z.string().nullable().optional(), + + supportedNetworks: jsonFromString(RPCSSchema).optional(), + + claimDurationTimeout: z.coerce.number().default(600), + indexingNetworks: z + .union([jsonFromString(RPCSSchema), z.array(z.union([z.string(), z.number()]))]) + .optional(), + + c2dClusters: z.array(C2DClusterInfoSchema).optional(), + accountPurgatoryUrl: z + .string() + .nullable() + .refine((url) => !url || isValidUrl(url), { + message: 'accountPurgatoryUrl must be a valid URL' + }), + assetPurgatoryUrl: z + .string() + .nullable() + .refine((url) => !url || isValidUrl(url), { + message: 'assetPurgatoryUrl must be a valid URL' + }), + allowedAdmins: addressArrayFromString.optional(), + allowedAdminsList: jsonFromString(AccessListContractSchema).optional(), + + codeHash: z.string().optional(), + maxConnections: z.coerce.number().optional(), + denyList: jsonFromString(DenyListSchema).optional().default({ peers: [], ips: [] }), + unsafeURLs: jsonFromString(z.array(z.string())) + .optional() + .default([...DEFAULT_UNSAFE_URLS]), + isBootstrap: booleanFromString.optional().default(false), + validateUnsignedDDO: booleanFromString.optional().default(true), + jwtSecret: z.string() + }) + .passthrough() + .superRefine((data, ctx) => { + if (!data.hasHttp && !data.hasP2P) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: 'At least one interface (HTTP or P2P) must be enabled', + path: ['hasHttp'] + }) + } + + if (data.hasP2P && !data.p2pConfig) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + message: 'P2P configuration is required when hasP2P is true', + path: ['p2pConfig'] + }) + } + }) + +export type OceanNodeConfigParsed = z.infer diff --git a/src/utils/config/transforms.ts b/src/utils/config/transforms.ts new file mode 100644 index 000000000..20d0f8f13 --- /dev/null +++ b/src/utils/config/transforms.ts @@ -0,0 +1,25 @@ +import { z } from 'zod' +import { CONFIG_LOGGER } from '../logging/common.js' + +export const booleanFromString = z.union([z.boolean(), z.string()]).transform((v) => { + if (typeof v === 'string') { + return v === 'true' || v === '1' || v.toLowerCase() === 'yes' + } + return v +}) + +export const jsonFromString = (schema: z.ZodType) => + z.union([schema, z.string(), z.undefined()]).transform((v) => { + if (v === undefined || v === 'undefined') { + return undefined + } + if (typeof v === 'string') { + try { + return JSON.parse(v) + } catch (error) { + CONFIG_LOGGER.warn(`Failed to parse JSON: ${error.message}`) + return v + } + } + return v + }) diff --git a/src/utils/constants.ts b/src/utils/constants.ts index 0c4eee128..acdb9adf3 100644 --- a/src/utils/constants.ts +++ b/src/utils/constants.ts @@ -35,7 +35,10 @@ export const PROTOCOL_COMMANDS = { GET_P2P_NETWORK_STATS: 'getP2PNetworkStats', FIND_PEER: 'findPeer', CREATE_AUTH_TOKEN: 'createAuthToken', - INVALIDATE_AUTH_TOKEN: 'invalidateAuthToken' + INVALIDATE_AUTH_TOKEN: 'invalidateAuthToken', + FETCH_CONFIG: 'fetchConfig', + PUSH_CONFIG: 'pushConfig', + JOBS: 'jobs' } // more visible, keep then close to make sure we always update both export const SUPPORTED_PROTOCOL_COMMANDS: string[] = [ @@ -71,7 +74,10 @@ export const SUPPORTED_PROTOCOL_COMMANDS: string[] = [ PROTOCOL_COMMANDS.GET_P2P_NETWORK_STATS, PROTOCOL_COMMANDS.FIND_PEER, PROTOCOL_COMMANDS.CREATE_AUTH_TOKEN, - PROTOCOL_COMMANDS.INVALIDATE_AUTH_TOKEN + PROTOCOL_COMMANDS.INVALIDATE_AUTH_TOKEN, + PROTOCOL_COMMANDS.FETCH_CONFIG, + PROTOCOL_COMMANDS.PUSH_CONFIG, + PROTOCOL_COMMANDS.JOBS ] export const MetadataStates = { @@ -456,10 +462,6 @@ export const ENVIRONMENT_VARIABLES: Record = { } } export const CONNECTION_HISTORY_DELETE_THRESHOLD = 300 -// default to 30 requests per minute (configurable), per ip/peer -export const DEFAULT_RATE_LIMIT_PER_MINUTE = 30 -// max connections per minute (configurable), all connections -export const DEFAULT_MAX_CONNECTIONS_PER_MINUTE = 60 * 2 // 120 requests per minute // 1 minute export const CONNECTIONS_RATE_INTERVAL = 60 * 1000 // Typesense's maximum limit to send 250 hits at a time @@ -467,47 +469,3 @@ export const TYPESENSE_HITS_CAP = 250 export const DDO_IDENTIFIER_PREFIX = 'did:op:' // global ocean node API services path export const SERVICES_API_BASE_PATH = '/api/services' - -export const defaultBootstrapAddresses = [ - // Public IPFS bootstraps - // '/ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ', - // '/dnsaddr/bootstrap.libp2p.io/ipfs/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN', - // '/dnsaddr/bootstrap.libp2p.io/ipfs/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa', - // OPF nodes - // node1 - '/dns4/node1.oceanprotocol.com/tcp/9000/p2p/16Uiu2HAmLhRDqfufZiQnxvQs2XHhd6hwkLSPfjAQg1gH8wgRixiP', - '/dns4/node1.oceanprotocol.com/tcp/9001/ws/p2p/16Uiu2HAmLhRDqfufZiQnxvQs2XHhd6hwkLSPfjAQg1gH8wgRixiP', - '/dns6/node1.oceanprotocol.com/tcp/9002/p2p/16Uiu2HAmLhRDqfufZiQnxvQs2XHhd6hwkLSPfjAQg1gH8wgRixiP', - '/dns6/node1.oceanprotocol.com/tcp/9003/ws/p2p/16Uiu2HAmLhRDqfufZiQnxvQs2XHhd6hwkLSPfjAQg1gH8wgRixiP', - // node 2 - '/dns4/node2.oceanprotocol.com/tcp/9000/p2p/16Uiu2HAmHwzeVw7RpGopjZe6qNBJbzDDBdqtrSk7Gcx1emYsfgL4', - '/dns4/node2.oceanprotocol.com/tcp/9001/ws/p2p/16Uiu2HAmHwzeVw7RpGopjZe6qNBJbzDDBdqtrSk7Gcx1emYsfgL4', - '/dns6/node2.oceanprotocol.com/tcp/9002/p2p/16Uiu2HAmHwzeVw7RpGopjZe6qNBJbzDDBdqtrSk7Gcx1emYsfgL4', - '/dns6/node2.oceanprotocol.com/tcp/9003/ws/p2p/16Uiu2HAmHwzeVw7RpGopjZe6qNBJbzDDBdqtrSk7Gcx1emYsfgL4', - // node 3 - '/dns4/node3.oceanprotocol.com/tcp/9000/p2p/16Uiu2HAmBKSeEP3v4tYEPsZsZv9VELinyMCsrVTJW9BvQeFXx28U', - '/dns4/node3.oceanprotocol.com/tcp/9001/ws/p2p/16Uiu2HAmBKSeEP3v4tYEPsZsZv9VELinyMCsrVTJW9BvQeFXx28U', - '/dns6/node3.oceanprotocol.com/tcp/9002/p2p/16Uiu2HAmBKSeEP3v4tYEPsZsZv9VELinyMCsrVTJW9BvQeFXx28U', - '/dns6/node3.oceanprotocol.com/tcp/9003/ws/p2p/16Uiu2HAmBKSeEP3v4tYEPsZsZv9VELinyMCsrVTJW9BvQeFXx28U', - // node 4 - '/dns4/node4.oceanprotocol.com/tcp/9000/p2p/16Uiu2HAmSTVTArioKm2wVcyeASHYEsnx2ZNq467Z4GMDU4ErEPom', - '/dns4/node4.oceanprotocol.com/tcp/9001/ws/p2p/16Uiu2HAmSTVTArioKm2wVcyeASHYEsnx2ZNq467Z4GMDU4ErEPom', - '/dns6/node4.oceanprotocol.com/tcp/9002/p2p/16Uiu2HAmSTVTArioKm2wVcyeASHYEsnx2ZNq467Z4GMDU4ErEPom', - '/dns6/node4.oceanprotocol.com/tcp/9003/ws/p2p/16Uiu2HAmSTVTArioKm2wVcyeASHYEsnx2ZNq467Z4GMDU4ErEPom' -] - -export const knownUnsafeURLs: string[] = [ - // AWS and GCP - '^.*(169.254.169.254).*', - // GCP - '^.*(metadata.google.internal).*', - '^.*(http://metadata).*', - // Azure - '^.*(http://169.254.169.254).*', - // Oracle Cloud - '^.*(http://192.0.0.192).*', - // Alibaba Cloud - '^.*(http://100.100.100.200).*', - // k8s ETCD - '^.*(127.0.0.1).*' -] diff --git a/src/utils/file.ts b/src/utils/file.ts index 2309965ef..a131b52fd 100644 --- a/src/utils/file.ts +++ b/src/utils/file.ts @@ -51,6 +51,6 @@ export async function getFile( } catch (error) { const msg = 'Error occured while requesting the files: ' + error.message CORE_LOGGER.error(msg) - throw new Error(msg) + throw new Error('Unable to decrypt files, files not served by the current node!') } } diff --git a/src/utils/index.ts b/src/utils/index.ts index 30eeee2e0..6a9d9e914 100644 --- a/src/utils/index.ts +++ b/src/utils/index.ts @@ -4,3 +4,4 @@ export * from './blockchain.js' export * from './constants.js' export * from './asset.js' export * from './attestation.js' +export { isDefined } from './util.js' diff --git a/src/utils/validators.ts b/src/utils/validators.ts index 2b35ce70b..d99d45d12 100644 --- a/src/utils/validators.ts +++ b/src/utils/validators.ts @@ -3,10 +3,8 @@ import { OceanNodeConfig } from '../@types/OceanNode.js' import { ValidateParams } from '../components/httpRoutes/validateCommands.js' import { RequestLimiter } from '../OceanNode.js' import { CORE_LOGGER } from './logging/common.js' -import { - CONNECTIONS_RATE_INTERVAL, - DEFAULT_MAX_CONNECTIONS_PER_MINUTE -} from './constants.js' +import { CONNECTIONS_RATE_INTERVAL } from './constants.js' +import { DEFAULT_MAX_CONNECTIONS_PER_MINUTE } from './index.js' // TODO we should group common stuff, // we have multiple similar validation interfaces