Skip to content

Commit f475b64

Browse files
feat: merge createDataset and addPieces
1 parent 5d2e601 commit f475b64

File tree

4 files changed

+358
-148
lines changed

4 files changed

+358
-148
lines changed

src/PDPVerifier.sol

Lines changed: 80 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@ contract PDPVerifier is Initializable, UUPSUpgradeable, OwnableUpgradeable {
5252
bytes32 public constant FIL_USD_PRICE_FEED_ID = 0x150ac9b959aee0051e4091f0ef5216d941f590e1c5e7f91cf7635b5c11628c0e;
5353
uint256 public constant NO_CHALLENGE_SCHEDULED = 0;
5454
uint256 public constant NO_PROVEN_EPOCH = 0;
55+
uint256 public constant NEW_DATA_SET_SENTINEL = type(uint256).max;
5556

5657
// Events
5758
event DataSetCreated(uint256 indexed setId, address indexed storageProvider);
@@ -382,35 +383,6 @@ contract PDPVerifier is Initializable, UUPSUpgradeable, OwnableUpgradeable {
382383
}
383384
}
384385

385-
// A data set is created empty, with no pieces. Creation yields a data set ID
386-
// for referring to the data set later.
387-
// Sender of create message is storage provider.
388-
function createDataSet(address listenerAddr, bytes calldata extraData) public payable returns (uint256) {
389-
require(extraData.length <= EXTRA_DATA_MAX_SIZE, "Extra data too large");
390-
uint256 sybilFee = PDPFees.sybilFee();
391-
require(msg.value >= sybilFee, "sybil fee not met");
392-
burnFee(sybilFee);
393-
394-
uint256 setId = nextDataSetId++;
395-
dataSetLeafCount[setId] = 0;
396-
nextChallengeEpoch[setId] = NO_CHALLENGE_SCHEDULED; // Initialized on first call to NextProvingPeriod
397-
storageProvider[setId] = msg.sender;
398-
dataSetListener[setId] = listenerAddr;
399-
dataSetLastProvenEpoch[setId] = NO_PROVEN_EPOCH;
400-
401-
if (listenerAddr != address(0)) {
402-
PDPListener(listenerAddr).dataSetCreated(setId, msg.sender, extraData);
403-
}
404-
emit DataSetCreated(setId, msg.sender);
405-
406-
// Return the at the end to avoid any possible re-entrency issues.
407-
if (msg.value > sybilFee) {
408-
(bool success,) = msg.sender.call{value: msg.value - sybilFee}("");
409-
require(success, "Transfer failed.");
410-
}
411-
return setId;
412-
}
413-
414386
// Removes a data set. Must be called by the storage provider.
415387
function deleteDataSet(uint256 setId, bytes calldata extraData) public {
416388
require(extraData.length <= EXTRA_DATA_MAX_SIZE, "Extra data too large");
@@ -432,35 +404,90 @@ contract PDPVerifier is Initializable, UUPSUpgradeable, OwnableUpgradeable {
432404
emit DataSetDeleted(setId, deletedLeafCount);
433405
}
434406

435-
// Appends new pieces to the collection managed by a data set.
436-
// These pieces won't be challenged until the next proving period is
437-
// started by calling nextProvingPeriod.
438-
function addPieces(uint256 setId, Cids.Cid[] calldata pieceData, bytes calldata extraData)
407+
// Create Dataset and Add Pieces, When setId == NEW_DATA_SET_SENTINEL, this will create a new dataset with piece data provided
408+
// with the provided listenerAddr and expect extraData to be abi.encode(bytes createPayload, bytes addPayload).
409+
// When adding to an existing set, pass listenerAddr == address(0) and setId to the live dataset.
410+
function addPieces(uint256 setId, address listenerAddr, Cids.Cid[] calldata pieceData, bytes calldata extraData)
439411
public
412+
payable
440413
returns (uint256)
441414
{
442-
uint256 nPieces = pieceData.length;
443-
require(extraData.length <= EXTRA_DATA_MAX_SIZE, "Extra data too large");
444-
require(dataSetLive(setId), "Data set not live");
445-
require(nPieces > 0, "Must add at least one piece");
446-
require(storageProvider[setId] == msg.sender, "Only the storage provider can add pieces");
447-
uint256 firstAdded = nextPieceId[setId];
448-
uint256[] memory pieceIds = new uint256[](pieceData.length);
449-
Cids.Cid[] memory pieceCidsAdded = new Cids.Cid[](pieceData.length);
450-
451-
for (uint256 i = 0; i < nPieces; i++) {
452-
addOnePiece(setId, i, pieceData[i]);
453-
pieceIds[i] = firstAdded + i;
454-
pieceCidsAdded[i] = pieceData[i];
455-
}
456-
emit PiecesAdded(setId, pieceIds, pieceCidsAdded);
415+
if (setId == NEW_DATA_SET_SENTINEL) {
416+
(bytes memory createPayload, bytes memory addPayload) = abi.decode(extraData, (bytes, bytes));
417+
418+
require(createPayload.length <= EXTRA_DATA_MAX_SIZE, "Extra data too large");
419+
uint256 sybilFee = PDPFees.sybilFee();
420+
require(msg.value >= sybilFee, "sybil fee not met");
421+
burnFee(sybilFee);
422+
423+
require(listenerAddr != address(0), "listener required for new dataset");
424+
uint256 newSetId = nextDataSetId++;
425+
dataSetLeafCount[newSetId] = 0;
426+
nextChallengeEpoch[newSetId] = NO_CHALLENGE_SCHEDULED; // Initialized on first call to NextProvingPeriod
427+
storageProvider[newSetId] = msg.sender;
428+
dataSetListener[newSetId] = listenerAddr;
429+
dataSetLastProvenEpoch[newSetId] = NO_PROVEN_EPOCH;
457430

458-
address listenerAddr = dataSetListener[setId];
459-
if (listenerAddr != address(0)) {
460-
PDPListener(listenerAddr).piecesAdded(setId, firstAdded, pieceData, extraData);
461-
}
431+
if (listenerAddr != address(0)) {
432+
PDPListener(listenerAddr).dataSetCreated(newSetId, msg.sender, createPayload);
433+
}
434+
emit DataSetCreated(newSetId, msg.sender);
435+
436+
// Add pieces to the newly created data set (if any)
437+
uint256 nPieces = pieceData.length;
438+
require(addPayload.length <= EXTRA_DATA_MAX_SIZE, "Extra data too large");
439+
uint256 firstAddedNew = nextPieceId[newSetId];
462440

463-
return firstAdded;
441+
if (nPieces > 0) {
442+
uint256[] memory pieceIdsNew = new uint256[](pieceData.length);
443+
Cids.Cid[] memory pieceCidsAddedNew = new Cids.Cid[](pieceData.length);
444+
445+
for (uint256 i = 0; i < nPieces; i++) {
446+
addOnePiece(newSetId, i, pieceData[i]);
447+
pieceIdsNew[i] = firstAddedNew + i;
448+
pieceCidsAddedNew[i] = pieceData[i];
449+
}
450+
emit PiecesAdded(newSetId, pieceIdsNew, pieceCidsAddedNew);
451+
452+
address listenerAddrNew = dataSetListener[newSetId];
453+
if (listenerAddrNew != address(0)) {
454+
PDPListener(listenerAddrNew).piecesAdded(newSetId, firstAddedNew, pieceData, addPayload);
455+
}
456+
}
457+
458+
// Return the at the end to avoid any possible re-entrency issues.
459+
if (msg.value > sybilFee) {
460+
(bool success,) = msg.sender.call{value: msg.value - sybilFee}("");
461+
require(success, "Transfer failed.");
462+
}
463+
464+
return newSetId;
465+
} else {
466+
// Adding to an existing set; no fee should be sent and listenerAddr must be zero
467+
require(listenerAddr == address(0), "listener must be zero for existing dataset");
468+
require(msg.value == 0, "no fee on add to existing dataset");
469+
470+
uint256 nPieces = pieceData.length;
471+
require(extraData.length <= EXTRA_DATA_MAX_SIZE, "Extra data too large");
472+
require(dataSetLive(setId), "Data set not live");
473+
require(nPieces > 0, "Must add at least one piece");
474+
require(storageProvider[setId] == msg.sender, "Only the storage provider can add pieces");
475+
uint256 firstAdded = nextPieceId[setId];
476+
uint256[] memory pieceIds = new uint256[](pieceData.length);
477+
Cids.Cid[] memory pieceCidsAdded = new Cids.Cid[](pieceData.length);
478+
479+
for (uint256 i = 0; i < nPieces; i++) {
480+
addOnePiece(setId, i, pieceData[i]);
481+
pieceIds[i] = firstAdded + i;
482+
pieceCidsAdded[i] = pieceData[i];
483+
}
484+
emit PiecesAdded(setId, pieceIds, pieceCidsAdded);
485+
address listenerAddrExisting = dataSetListener[setId];
486+
if (listenerAddrExisting != address(0)) {
487+
PDPListener(listenerAddrExisting).piecesAdded(setId, firstAdded, pieceData, extraData);
488+
}
489+
return firstAdded;
490+
}
464491
}
465492

466493
error IndexedError(uint256 idx, string msg);

0 commit comments

Comments
 (0)